xref: /openbmc/linux/mm/mempolicy.c (revision 46aeb7e6c17d8caa6828b864eb5b0423f0401e9d)
1*46aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/mm.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
766e84f315SIngo Molnar #include <linux/sched/mm.h>
776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
78f719ff9bSIngo Molnar #include <linux/sched/task.h>
791da177e4SLinus Torvalds #include <linux/nodemask.h>
801da177e4SLinus Torvalds #include <linux/cpuset.h>
811da177e4SLinus Torvalds #include <linux/slab.h>
821da177e4SLinus Torvalds #include <linux/string.h>
83b95f1b31SPaul Gortmaker #include <linux/export.h>
84b488893aSPavel Emelyanov #include <linux/nsproxy.h>
851da177e4SLinus Torvalds #include <linux/interrupt.h>
861da177e4SLinus Torvalds #include <linux/init.h>
871da177e4SLinus Torvalds #include <linux/compat.h>
8831367466SOtto Ebeling #include <linux/ptrace.h>
89dc9aa5b9SChristoph Lameter #include <linux/swap.h>
901a75a6c8SChristoph Lameter #include <linux/seq_file.h>
911a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
92b20a3503SChristoph Lameter #include <linux/migrate.h>
9362b61f61SHugh Dickins #include <linux/ksm.h>
9495a402c3SChristoph Lameter #include <linux/rmap.h>
9586c3a764SDavid Quigley #include <linux/security.h>
96dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
97095f1fc4SLee Schermerhorn #include <linux/ctype.h>
986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
100b1de0d13SMitchel Humpherys #include <linux/printk.h>
101c8633798SNaoya Horiguchi #include <linux/swapops.h>
102dc9aa5b9SChristoph Lameter 
1031da177e4SLinus Torvalds #include <asm/tlbflush.h>
1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1051da177e4SLinus Torvalds 
10662695a84SNick Piggin #include "internal.h"
10762695a84SNick Piggin 
10838e35860SChristoph Lameter /* Internal flags */
109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111dc9aa5b9SChristoph Lameter 
112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1161da177e4SLinus Torvalds    policied. */
1176267276fSChristoph Lameter enum zone_type policy_zone = 0;
1181da177e4SLinus Torvalds 
119bea904d5SLee Schermerhorn /*
120bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
121bea904d5SLee Schermerhorn  */
122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1231da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
124bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
125fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1261da177e4SLinus Torvalds };
1271da177e4SLinus Torvalds 
1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1295606e387SMel Gorman 
13074d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1315606e387SMel Gorman {
1325606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
133f15ca78eSOleg Nesterov 	int node;
1345606e387SMel Gorman 
135f15ca78eSOleg Nesterov 	if (pol)
136f15ca78eSOleg Nesterov 		return pol;
1375606e387SMel Gorman 
138f15ca78eSOleg Nesterov 	node = numa_node_id();
1391da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1401da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
141f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
142f15ca78eSOleg Nesterov 		if (pol->mode)
143f15ca78eSOleg Nesterov 			return pol;
1441da6f0e1SJianguo Wu 	}
1455606e387SMel Gorman 
146f15ca78eSOleg Nesterov 	return &default_policy;
1475606e387SMel Gorman }
1485606e387SMel Gorman 
14937012946SDavid Rientjes static const struct mempolicy_operations {
15037012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
151213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
15237012946SDavid Rientjes } mpol_ops[MPOL_MAX];
15337012946SDavid Rientjes 
154f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155f5b087b5SDavid Rientjes {
1566d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1574c50bc01SDavid Rientjes }
1584c50bc01SDavid Rientjes 
1594c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1604c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1614c50bc01SDavid Rientjes {
1624c50bc01SDavid Rientjes 	nodemask_t tmp;
1634c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1644c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
165f5b087b5SDavid Rientjes }
166f5b087b5SDavid Rientjes 
16737012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
16837012946SDavid Rientjes {
16937012946SDavid Rientjes 	if (nodes_empty(*nodes))
17037012946SDavid Rientjes 		return -EINVAL;
17137012946SDavid Rientjes 	pol->v.nodes = *nodes;
17237012946SDavid Rientjes 	return 0;
17337012946SDavid Rientjes }
17437012946SDavid Rientjes 
17537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
17637012946SDavid Rientjes {
17737012946SDavid Rientjes 	if (!nodes)
178fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
17937012946SDavid Rientjes 	else if (nodes_empty(*nodes))
18037012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
18137012946SDavid Rientjes 	else
18237012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
18337012946SDavid Rientjes 	return 0;
18437012946SDavid Rientjes }
18537012946SDavid Rientjes 
18637012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
18737012946SDavid Rientjes {
188859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
18937012946SDavid Rientjes 		return -EINVAL;
19037012946SDavid Rientjes 	pol->v.nodes = *nodes;
19137012946SDavid Rientjes 	return 0;
19237012946SDavid Rientjes }
19337012946SDavid Rientjes 
19458568d2aSMiao Xie /*
19558568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
19658568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
19758568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
19858568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
19958568d2aSMiao Xie  *
20058568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
20158568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
20258568d2aSMiao Xie  */
2034bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2044bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
20558568d2aSMiao Xie {
20658568d2aSMiao Xie 	int ret;
20758568d2aSMiao Xie 
20858568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
20958568d2aSMiao Xie 	if (pol == NULL)
21058568d2aSMiao Xie 		return 0;
21101f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2124bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
21301f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
21458568d2aSMiao Xie 
21558568d2aSMiao Xie 	VM_BUG_ON(!nodes);
21658568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
21758568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
21858568d2aSMiao Xie 	else {
21958568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2204bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
22158568d2aSMiao Xie 		else
2224bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2234bfc4495SKAMEZAWA Hiroyuki 
22458568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
22558568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
22658568d2aSMiao Xie 		else
22758568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
22858568d2aSMiao Xie 						cpuset_current_mems_allowed;
22958568d2aSMiao Xie 	}
23058568d2aSMiao Xie 
2314bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2324bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2334bfc4495SKAMEZAWA Hiroyuki 	else
2344bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
23558568d2aSMiao Xie 	return ret;
23658568d2aSMiao Xie }
23758568d2aSMiao Xie 
23858568d2aSMiao Xie /*
23958568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
24058568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
24158568d2aSMiao Xie  */
242028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243028fec41SDavid Rientjes 				  nodemask_t *nodes)
2441da177e4SLinus Torvalds {
2451da177e4SLinus Torvalds 	struct mempolicy *policy;
2461da177e4SLinus Torvalds 
247028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
24800ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
249140d5a49SPaul Mundt 
2503e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2513e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
25237012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
253d3a71033SLee Schermerhorn 		return NULL;
25437012946SDavid Rientjes 	}
2553e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2563e1f0645SDavid Rientjes 
2573e1f0645SDavid Rientjes 	/*
2583e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2593e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2603e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2613e1f0645SDavid Rientjes 	 */
2623e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2633e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2643e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2653e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2663e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2673e1f0645SDavid Rientjes 		}
268479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2698d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2708d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2718d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
272479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
273479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2743e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2753e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2761da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2771da177e4SLinus Torvalds 	if (!policy)
2781da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2791da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
28045c4745aSLee Schermerhorn 	policy->mode = mode;
28137012946SDavid Rientjes 	policy->flags = flags;
2823e1f0645SDavid Rientjes 
28337012946SDavid Rientjes 	return policy;
28437012946SDavid Rientjes }
28537012946SDavid Rientjes 
28652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
28752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
28852cd3b07SLee Schermerhorn {
28952cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
29052cd3b07SLee Schermerhorn 		return;
29152cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
29252cd3b07SLee Schermerhorn }
29352cd3b07SLee Schermerhorn 
294213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
29537012946SDavid Rientjes {
29637012946SDavid Rientjes }
29737012946SDavid Rientjes 
298213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
2991d0d2680SDavid Rientjes {
3001d0d2680SDavid Rientjes 	nodemask_t tmp;
3011d0d2680SDavid Rientjes 
30237012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
30337012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
30437012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
30537012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3061d0d2680SDavid Rientjes 	else {
307213980c0SVlastimil Babka 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308213980c0SVlastimil Babka 								*nodes);
309213980c0SVlastimil Babka 		pol->w.cpuset_mems_allowed = tmp;
3101d0d2680SDavid Rientjes 	}
31137012946SDavid Rientjes 
312708c1bbcSMiao Xie 	if (nodes_empty(tmp))
313708c1bbcSMiao Xie 		tmp = *nodes;
314708c1bbcSMiao Xie 
3151d0d2680SDavid Rientjes 	pol->v.nodes = tmp;
31637012946SDavid Rientjes }
31737012946SDavid Rientjes 
31837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
319213980c0SVlastimil Babka 						const nodemask_t *nodes)
32037012946SDavid Rientjes {
32137012946SDavid Rientjes 	nodemask_t tmp;
32237012946SDavid Rientjes 
32337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3241d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3251d0d2680SDavid Rientjes 
326fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3271d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
328fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
329fc36b8d3SLee Schermerhorn 		} else
330fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
33137012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
33237012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3331d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
334fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3351d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
33637012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
33737012946SDavid Rientjes 						   *nodes);
33837012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3391d0d2680SDavid Rientjes 	}
3401d0d2680SDavid Rientjes }
34137012946SDavid Rientjes 
342708c1bbcSMiao Xie /*
343708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
344708c1bbcSMiao Xie  *
345213980c0SVlastimil Babka  * Per-vma policies are protected by mmap_sem. Allocations using per-task
346213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
347213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
348708c1bbcSMiao Xie  */
349213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35037012946SDavid Rientjes {
35137012946SDavid Rientjes 	if (!pol)
35237012946SDavid Rientjes 		return;
3532e25644eSVlastimil Babka 	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
35437012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35537012946SDavid Rientjes 		return;
356708c1bbcSMiao Xie 
357213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3581d0d2680SDavid Rientjes }
3591d0d2680SDavid Rientjes 
3601d0d2680SDavid Rientjes /*
3611d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3621d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36358568d2aSMiao Xie  *
36458568d2aSMiao Xie  * Called with task's alloc_lock held.
3651d0d2680SDavid Rientjes  */
3661d0d2680SDavid Rientjes 
367213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3681d0d2680SDavid Rientjes {
369213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3701d0d2680SDavid Rientjes }
3711d0d2680SDavid Rientjes 
3721d0d2680SDavid Rientjes /*
3731d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3741d0d2680SDavid Rientjes  *
3751d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
3761d0d2680SDavid Rientjes  */
3771d0d2680SDavid Rientjes 
3781d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3791d0d2680SDavid Rientjes {
3801d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
3811d0d2680SDavid Rientjes 
3821d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
3831d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
384213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
3851d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
3861d0d2680SDavid Rientjes }
3871d0d2680SDavid Rientjes 
38837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
38937012946SDavid Rientjes 	[MPOL_DEFAULT] = {
39037012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39137012946SDavid Rientjes 	},
39237012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
39337012946SDavid Rientjes 		.create = mpol_new_interleave,
39437012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
39537012946SDavid Rientjes 	},
39637012946SDavid Rientjes 	[MPOL_PREFERRED] = {
39737012946SDavid Rientjes 		.create = mpol_new_preferred,
39837012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
39937012946SDavid Rientjes 	},
40037012946SDavid Rientjes 	[MPOL_BIND] = {
40137012946SDavid Rientjes 		.create = mpol_new_bind,
40237012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40337012946SDavid Rientjes 	},
40437012946SDavid Rientjes };
40537012946SDavid Rientjes 
406fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
407fc301289SChristoph Lameter 				unsigned long flags);
4081a75a6c8SChristoph Lameter 
4096f4576e3SNaoya Horiguchi struct queue_pages {
4106f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4116f4576e3SNaoya Horiguchi 	unsigned long flags;
4126f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
4136f4576e3SNaoya Horiguchi 	struct vm_area_struct *prev;
4146f4576e3SNaoya Horiguchi };
4156f4576e3SNaoya Horiguchi 
41698094945SNaoya Horiguchi /*
41788aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
41888aaa2a1SNaoya Horiguchi  *
41988aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
42088aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
42188aaa2a1SNaoya Horiguchi  */
42288aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
42388aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
42488aaa2a1SNaoya Horiguchi {
42588aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
42688aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
42788aaa2a1SNaoya Horiguchi 
42888aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
42988aaa2a1SNaoya Horiguchi }
43088aaa2a1SNaoya Horiguchi 
431a7f40cfeSYang Shi /*
432a7f40cfeSYang Shi  * queue_pages_pmd() has three possible return values:
433a7f40cfeSYang Shi  * 1 - pages are placed on the right node or queued successfully.
434a7f40cfeSYang Shi  * 0 - THP was split.
435a7f40cfeSYang Shi  * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
436a7f40cfeSYang Shi  *        page was already on a node that does not follow the policy.
437a7f40cfeSYang Shi  */
438c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
439c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
440c8633798SNaoya Horiguchi {
441c8633798SNaoya Horiguchi 	int ret = 0;
442c8633798SNaoya Horiguchi 	struct page *page;
443c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
444c8633798SNaoya Horiguchi 	unsigned long flags;
445c8633798SNaoya Horiguchi 
446c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
447a7f40cfeSYang Shi 		ret = -EIO;
448c8633798SNaoya Horiguchi 		goto unlock;
449c8633798SNaoya Horiguchi 	}
450c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
451c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
452c8633798SNaoya Horiguchi 		spin_unlock(ptl);
453c8633798SNaoya Horiguchi 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
454c8633798SNaoya Horiguchi 		goto out;
455c8633798SNaoya Horiguchi 	}
456c8633798SNaoya Horiguchi 	if (!queue_pages_required(page, qp)) {
457c8633798SNaoya Horiguchi 		ret = 1;
458c8633798SNaoya Horiguchi 		goto unlock;
459c8633798SNaoya Horiguchi 	}
460c8633798SNaoya Horiguchi 
461c8633798SNaoya Horiguchi 	ret = 1;
462c8633798SNaoya Horiguchi 	flags = qp->flags;
463c8633798SNaoya Horiguchi 	/* go to thp migration */
464a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
465a7f40cfeSYang Shi 		if (!vma_migratable(walk->vma)) {
466a7f40cfeSYang Shi 			ret = -EIO;
467a7f40cfeSYang Shi 			goto unlock;
468a7f40cfeSYang Shi 		}
469a7f40cfeSYang Shi 
470c8633798SNaoya Horiguchi 		migrate_page_add(page, qp->pagelist, flags);
471a7f40cfeSYang Shi 	} else
472a7f40cfeSYang Shi 		ret = -EIO;
473c8633798SNaoya Horiguchi unlock:
474c8633798SNaoya Horiguchi 	spin_unlock(ptl);
475c8633798SNaoya Horiguchi out:
476c8633798SNaoya Horiguchi 	return ret;
477c8633798SNaoya Horiguchi }
478c8633798SNaoya Horiguchi 
47988aaa2a1SNaoya Horiguchi /*
48098094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
48198094945SNaoya Horiguchi  * and move them to the pagelist if they do.
48298094945SNaoya Horiguchi  */
4836f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4846f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
4851da177e4SLinus Torvalds {
4866f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
4876f4576e3SNaoya Horiguchi 	struct page *page;
4886f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
4896f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
490c8633798SNaoya Horiguchi 	int ret;
49191612e0dSHugh Dickins 	pte_t *pte;
492705e87c0SHugh Dickins 	spinlock_t *ptl;
493941150a3SHugh Dickins 
494c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
495c8633798SNaoya Horiguchi 	if (ptl) {
496c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
497a7f40cfeSYang Shi 		if (ret > 0)
4986f4576e3SNaoya Horiguchi 			return 0;
499a7f40cfeSYang Shi 		else if (ret < 0)
500a7f40cfeSYang Shi 			return ret;
501248db92dSKirill A. Shutemov 	}
50291612e0dSHugh Dickins 
503337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
504337d9abfSNaoya Horiguchi 		return 0;
50594723aafSMichal Hocko 
5066f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5076f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
50891612e0dSHugh Dickins 		if (!pte_present(*pte))
50991612e0dSHugh Dickins 			continue;
5106aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5116aab341eSLinus Torvalds 		if (!page)
51291612e0dSHugh Dickins 			continue;
513053837fcSNick Piggin 		/*
51462b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
51562b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
516053837fcSNick Piggin 		 */
517b79bc0a0SHugh Dickins 		if (PageReserved(page))
518f4598c8bSChristoph Lameter 			continue;
51988aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
52038e35860SChristoph Lameter 			continue;
521a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
522a7f40cfeSYang Shi 			if (!vma_migratable(vma))
523a7f40cfeSYang Shi 				break;
5246f4576e3SNaoya Horiguchi 			migrate_page_add(page, qp->pagelist, flags);
525a7f40cfeSYang Shi 		} else
526a7f40cfeSYang Shi 			break;
5276f4576e3SNaoya Horiguchi 	}
5286f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5296f4576e3SNaoya Horiguchi 	cond_resched();
530a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
53191612e0dSHugh Dickins }
53291612e0dSHugh Dickins 
5336f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5346f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5356f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
536e2d8cf40SNaoya Horiguchi {
537e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5386f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5396f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
540e2d8cf40SNaoya Horiguchi 	struct page *page;
541cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
542d4c54919SNaoya Horiguchi 	pte_t entry;
543e2d8cf40SNaoya Horiguchi 
5446f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5456f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
546d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
547d4c54919SNaoya Horiguchi 		goto unlock;
548d4c54919SNaoya Horiguchi 	page = pte_page(entry);
54988aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
550e2d8cf40SNaoya Horiguchi 		goto unlock;
551e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
552e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
553e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
5546f4576e3SNaoya Horiguchi 		isolate_huge_page(page, qp->pagelist);
555e2d8cf40SNaoya Horiguchi unlock:
556cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
557e2d8cf40SNaoya Horiguchi #else
558e2d8cf40SNaoya Horiguchi 	BUG();
559e2d8cf40SNaoya Horiguchi #endif
56091612e0dSHugh Dickins 	return 0;
5611da177e4SLinus Torvalds }
5621da177e4SLinus Torvalds 
5635877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
564b24f53a0SLee Schermerhorn /*
5654b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5664b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5674b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5684b10e7d5SMel Gorman  *
5694b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
5704b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
5714b10e7d5SMel Gorman  * changes to the core.
572b24f53a0SLee Schermerhorn  */
5734b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
5744b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
575b24f53a0SLee Schermerhorn {
5764b10e7d5SMel Gorman 	int nr_updated;
577b24f53a0SLee Schermerhorn 
5784d942466SMel Gorman 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
57903c5a6e1SMel Gorman 	if (nr_updated)
58003c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
581b24f53a0SLee Schermerhorn 
5824b10e7d5SMel Gorman 	return nr_updated;
583b24f53a0SLee Schermerhorn }
584b24f53a0SLee Schermerhorn #else
585b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
586b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
587b24f53a0SLee Schermerhorn {
588b24f53a0SLee Schermerhorn 	return 0;
589b24f53a0SLee Schermerhorn }
5905877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
591b24f53a0SLee Schermerhorn 
5926f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
5936f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
5941da177e4SLinus Torvalds {
5956f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5966f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5975b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
5986f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
599dc9aa5b9SChristoph Lameter 
600a7f40cfeSYang Shi 	/*
601a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
602a7f40cfeSYang Shi 	 * regardless of vma_migratable
603a7f40cfeSYang Shi 	 */
604a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
605a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
60648684a65SNaoya Horiguchi 		return 1;
60748684a65SNaoya Horiguchi 
6085b952b3cSAndi Kleen 	if (endvma > end)
6095b952b3cSAndi Kleen 		endvma = end;
6105b952b3cSAndi Kleen 	if (vma->vm_start > start)
6115b952b3cSAndi Kleen 		start = vma->vm_start;
612b24f53a0SLee Schermerhorn 
613b24f53a0SLee Schermerhorn 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
614b24f53a0SLee Schermerhorn 		if (!vma->vm_next && vma->vm_end < end)
615d05f0cdcSHugh Dickins 			return -EFAULT;
6166f4576e3SNaoya Horiguchi 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
617d05f0cdcSHugh Dickins 			return -EFAULT;
618b24f53a0SLee Schermerhorn 	}
619b24f53a0SLee Schermerhorn 
6206f4576e3SNaoya Horiguchi 	qp->prev = vma;
6216f4576e3SNaoya Horiguchi 
622b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6232c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6244355c018SLiang Chen 		if (!is_vm_hugetlb_page(vma) &&
6254355c018SLiang Chen 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
6264355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
627b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6286f4576e3SNaoya Horiguchi 		return 1;
629b24f53a0SLee Schermerhorn 	}
630b24f53a0SLee Schermerhorn 
6316f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
632a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
6336f4576e3SNaoya Horiguchi 		return 0;
6346f4576e3SNaoya Horiguchi 	return 1;
6356f4576e3SNaoya Horiguchi }
636b24f53a0SLee Schermerhorn 
6376f4576e3SNaoya Horiguchi /*
6386f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
6396f4576e3SNaoya Horiguchi  *
6406f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
6416f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
6426f4576e3SNaoya Horiguchi  * passed via @private.)
6436f4576e3SNaoya Horiguchi  */
6446f4576e3SNaoya Horiguchi static int
6456f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6466f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
6476f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
6486f4576e3SNaoya Horiguchi {
6496f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
6506f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
6516f4576e3SNaoya Horiguchi 		.flags = flags,
6526f4576e3SNaoya Horiguchi 		.nmask = nodes,
6536f4576e3SNaoya Horiguchi 		.prev = NULL,
6546f4576e3SNaoya Horiguchi 	};
6556f4576e3SNaoya Horiguchi 	struct mm_walk queue_pages_walk = {
6566f4576e3SNaoya Horiguchi 		.hugetlb_entry = queue_pages_hugetlb,
6576f4576e3SNaoya Horiguchi 		.pmd_entry = queue_pages_pte_range,
6586f4576e3SNaoya Horiguchi 		.test_walk = queue_pages_test_walk,
6596f4576e3SNaoya Horiguchi 		.mm = mm,
6606f4576e3SNaoya Horiguchi 		.private = &qp,
6616f4576e3SNaoya Horiguchi 	};
6626f4576e3SNaoya Horiguchi 
6636f4576e3SNaoya Horiguchi 	return walk_page_range(start, end, &queue_pages_walk);
6641da177e4SLinus Torvalds }
6651da177e4SLinus Torvalds 
666869833f2SKOSAKI Motohiro /*
667869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
668869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
669869833f2SKOSAKI Motohiro  */
670869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
671869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6728d34694cSKOSAKI Motohiro {
673869833f2SKOSAKI Motohiro 	int err;
674869833f2SKOSAKI Motohiro 	struct mempolicy *old;
675869833f2SKOSAKI Motohiro 	struct mempolicy *new;
6768d34694cSKOSAKI Motohiro 
6778d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
6788d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
6798d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
6808d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
6818d34694cSKOSAKI Motohiro 
682869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
683869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
684869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
685869833f2SKOSAKI Motohiro 
686869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
6878d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
688869833f2SKOSAKI Motohiro 		if (err)
689869833f2SKOSAKI Motohiro 			goto err_out;
6908d34694cSKOSAKI Motohiro 	}
691869833f2SKOSAKI Motohiro 
692869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
693869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
694869833f2SKOSAKI Motohiro 	mpol_put(old);
695869833f2SKOSAKI Motohiro 
696869833f2SKOSAKI Motohiro 	return 0;
697869833f2SKOSAKI Motohiro  err_out:
698869833f2SKOSAKI Motohiro 	mpol_put(new);
6998d34694cSKOSAKI Motohiro 	return err;
7008d34694cSKOSAKI Motohiro }
7018d34694cSKOSAKI Motohiro 
7021da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7039d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7049d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7051da177e4SLinus Torvalds {
7061da177e4SLinus Torvalds 	struct vm_area_struct *next;
7079d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7089d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7099d8cebd4SKOSAKI Motohiro 	int err = 0;
710e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7119d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7129d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7131da177e4SLinus Torvalds 
714097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7159d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7169d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7179d8cebd4SKOSAKI Motohiro 
718097d5910SLinus Torvalds 	prev = vma->vm_prev;
719e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
720e26a5114SKOSAKI Motohiro 		prev = vma;
721e26a5114SKOSAKI Motohiro 
7229d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7231da177e4SLinus Torvalds 		next = vma->vm_next;
7249d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7259d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7269d8cebd4SKOSAKI Motohiro 
727e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
728e26a5114SKOSAKI Motohiro 			continue;
729e26a5114SKOSAKI Motohiro 
730e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
731e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7329d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
733e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
73419a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
7359d8cebd4SKOSAKI Motohiro 		if (prev) {
7369d8cebd4SKOSAKI Motohiro 			vma = prev;
7379d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7383964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7399d8cebd4SKOSAKI Motohiro 				continue;
7403964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7413964acd0SOleg Nesterov 			goto replace;
7421da177e4SLinus Torvalds 		}
7439d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7449d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7459d8cebd4SKOSAKI Motohiro 			if (err)
7469d8cebd4SKOSAKI Motohiro 				goto out;
7479d8cebd4SKOSAKI Motohiro 		}
7489d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7499d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7509d8cebd4SKOSAKI Motohiro 			if (err)
7519d8cebd4SKOSAKI Motohiro 				goto out;
7529d8cebd4SKOSAKI Motohiro 		}
7533964acd0SOleg Nesterov  replace:
754869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7559d8cebd4SKOSAKI Motohiro 		if (err)
7569d8cebd4SKOSAKI Motohiro 			goto out;
7579d8cebd4SKOSAKI Motohiro 	}
7589d8cebd4SKOSAKI Motohiro 
7599d8cebd4SKOSAKI Motohiro  out:
7601da177e4SLinus Torvalds 	return err;
7611da177e4SLinus Torvalds }
7621da177e4SLinus Torvalds 
7631da177e4SLinus Torvalds /* Set the process memory policy */
764028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
765028fec41SDavid Rientjes 			     nodemask_t *nodes)
7661da177e4SLinus Torvalds {
76758568d2aSMiao Xie 	struct mempolicy *new, *old;
7684bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
76958568d2aSMiao Xie 	int ret;
7701da177e4SLinus Torvalds 
7714bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7724bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
773f4e53d91SLee Schermerhorn 
7744bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
7754bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
7764bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
7774bfc4495SKAMEZAWA Hiroyuki 		goto out;
7784bfc4495SKAMEZAWA Hiroyuki 	}
7792c7c3a7dSOleg Nesterov 
78058568d2aSMiao Xie 	task_lock(current);
7814bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
78258568d2aSMiao Xie 	if (ret) {
78358568d2aSMiao Xie 		task_unlock(current);
78458568d2aSMiao Xie 		mpol_put(new);
7854bfc4495SKAMEZAWA Hiroyuki 		goto out;
78658568d2aSMiao Xie 	}
78758568d2aSMiao Xie 	old = current->mempolicy;
7881da177e4SLinus Torvalds 	current->mempolicy = new;
78945816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
79045816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
79158568d2aSMiao Xie 	task_unlock(current);
79258568d2aSMiao Xie 	mpol_put(old);
7934bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
7944bfc4495SKAMEZAWA Hiroyuki out:
7954bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
7964bfc4495SKAMEZAWA Hiroyuki 	return ret;
7971da177e4SLinus Torvalds }
7981da177e4SLinus Torvalds 
799bea904d5SLee Schermerhorn /*
800bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
80158568d2aSMiao Xie  *
80258568d2aSMiao Xie  * Called with task's alloc_lock held
803bea904d5SLee Schermerhorn  */
804bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8051da177e4SLinus Torvalds {
806dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
807bea904d5SLee Schermerhorn 	if (p == &default_policy)
808bea904d5SLee Schermerhorn 		return;
809bea904d5SLee Schermerhorn 
81045c4745aSLee Schermerhorn 	switch (p->mode) {
81119770b32SMel Gorman 	case MPOL_BIND:
81219770b32SMel Gorman 		/* Fall through */
8131da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
814dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8151da177e4SLinus Torvalds 		break;
8161da177e4SLinus Torvalds 	case MPOL_PREFERRED:
817fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
818dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
81953f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8201da177e4SLinus Torvalds 		break;
8211da177e4SLinus Torvalds 	default:
8221da177e4SLinus Torvalds 		BUG();
8231da177e4SLinus Torvalds 	}
8241da177e4SLinus Torvalds }
8251da177e4SLinus Torvalds 
8263b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
8271da177e4SLinus Torvalds {
8281da177e4SLinus Torvalds 	struct page *p;
8291da177e4SLinus Torvalds 	int err;
8301da177e4SLinus Torvalds 
8313b9aadf7SAndrea Arcangeli 	int locked = 1;
8323b9aadf7SAndrea Arcangeli 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
8331da177e4SLinus Torvalds 	if (err >= 0) {
8341da177e4SLinus Torvalds 		err = page_to_nid(p);
8351da177e4SLinus Torvalds 		put_page(p);
8361da177e4SLinus Torvalds 	}
8373b9aadf7SAndrea Arcangeli 	if (locked)
8383b9aadf7SAndrea Arcangeli 		up_read(&mm->mmap_sem);
8391da177e4SLinus Torvalds 	return err;
8401da177e4SLinus Torvalds }
8411da177e4SLinus Torvalds 
8421da177e4SLinus Torvalds /* Retrieve NUMA policy */
843dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8441da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8451da177e4SLinus Torvalds {
8468bccd85fSChristoph Lameter 	int err;
8471da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8481da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8493b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
8501da177e4SLinus Torvalds 
851754af6f5SLee Schermerhorn 	if (flags &
852754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8531da177e4SLinus Torvalds 		return -EINVAL;
854754af6f5SLee Schermerhorn 
855754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
856754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
857754af6f5SLee Schermerhorn 			return -EINVAL;
858754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
85958568d2aSMiao Xie 		task_lock(current);
860754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
86158568d2aSMiao Xie 		task_unlock(current);
862754af6f5SLee Schermerhorn 		return 0;
863754af6f5SLee Schermerhorn 	}
864754af6f5SLee Schermerhorn 
8651da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
866bea904d5SLee Schermerhorn 		/*
867bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
868bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
869bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
870bea904d5SLee Schermerhorn 		 */
8711da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8721da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
8731da177e4SLinus Torvalds 		if (!vma) {
8741da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
8751da177e4SLinus Torvalds 			return -EFAULT;
8761da177e4SLinus Torvalds 		}
8771da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
8781da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
8791da177e4SLinus Torvalds 		else
8801da177e4SLinus Torvalds 			pol = vma->vm_policy;
8811da177e4SLinus Torvalds 	} else if (addr)
8821da177e4SLinus Torvalds 		return -EINVAL;
8831da177e4SLinus Torvalds 
8841da177e4SLinus Torvalds 	if (!pol)
885bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
8861da177e4SLinus Torvalds 
8871da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
8881da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
8893b9aadf7SAndrea Arcangeli 			/*
8903b9aadf7SAndrea Arcangeli 			 * Take a refcount on the mpol, lookup_node()
8913b9aadf7SAndrea Arcangeli 			 * wil drop the mmap_sem, so after calling
8923b9aadf7SAndrea Arcangeli 			 * lookup_node() only "pol" remains valid, "vma"
8933b9aadf7SAndrea Arcangeli 			 * is stale.
8943b9aadf7SAndrea Arcangeli 			 */
8953b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
8963b9aadf7SAndrea Arcangeli 			vma = NULL;
8973b9aadf7SAndrea Arcangeli 			mpol_get(pol);
8983b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
8991da177e4SLinus Torvalds 			if (err < 0)
9001da177e4SLinus Torvalds 				goto out;
9018bccd85fSChristoph Lameter 			*policy = err;
9021da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
90345c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
90445816682SVlastimil Babka 			*policy = next_node_in(current->il_prev, pol->v.nodes);
9051da177e4SLinus Torvalds 		} else {
9061da177e4SLinus Torvalds 			err = -EINVAL;
9071da177e4SLinus Torvalds 			goto out;
9081da177e4SLinus Torvalds 		}
909bea904d5SLee Schermerhorn 	} else {
910bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
911bea904d5SLee Schermerhorn 						pol->mode;
912d79df630SDavid Rientjes 		/*
913d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
914d79df630SDavid Rientjes 		 * the policy to userspace.
915d79df630SDavid Rientjes 		 */
916d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
917bea904d5SLee Schermerhorn 	}
9181da177e4SLinus Torvalds 
9191da177e4SLinus Torvalds 	err = 0;
92058568d2aSMiao Xie 	if (nmask) {
921c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
922c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
923c6b6ef8bSLee Schermerhorn 		} else {
92458568d2aSMiao Xie 			task_lock(current);
925bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
92658568d2aSMiao Xie 			task_unlock(current);
92758568d2aSMiao Xie 		}
928c6b6ef8bSLee Schermerhorn 	}
9291da177e4SLinus Torvalds 
9301da177e4SLinus Torvalds  out:
93152cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9321da177e4SLinus Torvalds 	if (vma)
9333b9aadf7SAndrea Arcangeli 		up_read(&mm->mmap_sem);
9343b9aadf7SAndrea Arcangeli 	if (pol_refcount)
9353b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
9361da177e4SLinus Torvalds 	return err;
9371da177e4SLinus Torvalds }
9381da177e4SLinus Torvalds 
939b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9408bccd85fSChristoph Lameter /*
941c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
9426ce3c4c0SChristoph Lameter  */
943fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
944fc301289SChristoph Lameter 				unsigned long flags)
9456ce3c4c0SChristoph Lameter {
946c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
9476ce3c4c0SChristoph Lameter 	/*
948fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9496ce3c4c0SChristoph Lameter 	 */
950c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
951c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
952c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
953c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
954c8633798SNaoya Horiguchi 				NR_ISOLATED_ANON + page_is_file_cache(head),
955c8633798SNaoya Horiguchi 				hpage_nr_pages(head));
95662695a84SNick Piggin 		}
95762695a84SNick Piggin 	}
9586ce3c4c0SChristoph Lameter }
9596ce3c4c0SChristoph Lameter 
960a49bd4d7SMichal Hocko /* page allocation callback for NUMA node migration */
961666feb21SMichal Hocko struct page *alloc_new_node_page(struct page *page, unsigned long node)
96295a402c3SChristoph Lameter {
963e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
964e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
965e2d8cf40SNaoya Horiguchi 					node);
96694723aafSMichal Hocko 	else if (PageTransHuge(page)) {
967c8633798SNaoya Horiguchi 		struct page *thp;
968c8633798SNaoya Horiguchi 
969c8633798SNaoya Horiguchi 		thp = alloc_pages_node(node,
970c8633798SNaoya Horiguchi 			(GFP_TRANSHUGE | __GFP_THISNODE),
971c8633798SNaoya Horiguchi 			HPAGE_PMD_ORDER);
972c8633798SNaoya Horiguchi 		if (!thp)
973c8633798SNaoya Horiguchi 			return NULL;
974c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
975c8633798SNaoya Horiguchi 		return thp;
976c8633798SNaoya Horiguchi 	} else
97796db800fSVlastimil Babka 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
978b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
97995a402c3SChristoph Lameter }
98095a402c3SChristoph Lameter 
9816ce3c4c0SChristoph Lameter /*
9827e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
9837e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
9847e2ab150SChristoph Lameter  */
985dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
986dbcb0f19SAdrian Bunk 			   int flags)
9877e2ab150SChristoph Lameter {
9887e2ab150SChristoph Lameter 	nodemask_t nmask;
9897e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
9907e2ab150SChristoph Lameter 	int err = 0;
9917e2ab150SChristoph Lameter 
9927e2ab150SChristoph Lameter 	nodes_clear(nmask);
9937e2ab150SChristoph Lameter 	node_set(source, nmask);
9947e2ab150SChristoph Lameter 
99508270807SMinchan Kim 	/*
99608270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
99708270807SMinchan Kim 	 * need migration.  Between passing in the full user address
99808270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
99908270807SMinchan Kim 	 */
100008270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
100198094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10027e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10037e2ab150SChristoph Lameter 
1004cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1005a49bd4d7SMichal Hocko 		err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
10069c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1007cf608ac1SMinchan Kim 		if (err)
1008e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1009cf608ac1SMinchan Kim 	}
101095a402c3SChristoph Lameter 
10117e2ab150SChristoph Lameter 	return err;
10127e2ab150SChristoph Lameter }
10137e2ab150SChristoph Lameter 
10147e2ab150SChristoph Lameter /*
10157e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10167e2ab150SChristoph Lameter  * layout as much as possible.
101739743889SChristoph Lameter  *
101839743889SChristoph Lameter  * Returns the number of page that could not be moved.
101939743889SChristoph Lameter  */
10200ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10210ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
102239743889SChristoph Lameter {
10237e2ab150SChristoph Lameter 	int busy = 0;
10240aedadf9SChristoph Lameter 	int err;
10257e2ab150SChristoph Lameter 	nodemask_t tmp;
102639743889SChristoph Lameter 
10270aedadf9SChristoph Lameter 	err = migrate_prep();
10280aedadf9SChristoph Lameter 	if (err)
10290aedadf9SChristoph Lameter 		return err;
10300aedadf9SChristoph Lameter 
103139743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1032d4984711SChristoph Lameter 
10337e2ab150SChristoph Lameter 	/*
10347e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10357e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10367e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10377e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10387e2ab150SChristoph Lameter 	 *
10397e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10407e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10417e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10427e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10437e2ab150SChristoph Lameter 	 *
10447e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10457e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10467e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10477e2ab150SChristoph Lameter 	 *
10487e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10497e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10507e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10517e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10527e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10537e2ab150SChristoph Lameter 	 *
10547e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10557e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10567e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10577e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1058ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10597e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10607e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10617e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10627e2ab150SChristoph Lameter 	 */
10637e2ab150SChristoph Lameter 
10640ce72d4fSAndrew Morton 	tmp = *from;
10657e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10667e2ab150SChristoph Lameter 		int s,d;
1067b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
10687e2ab150SChristoph Lameter 		int dest = 0;
10697e2ab150SChristoph Lameter 
10707e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10714a5b18ccSLarry Woodman 
10724a5b18ccSLarry Woodman 			/*
10734a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
10744a5b18ccSLarry Woodman 			 * node relationship of the pages established between
10754a5b18ccSLarry Woodman 			 * threads and memory areas.
10764a5b18ccSLarry Woodman                          *
10774a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
10784a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
10794a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
10804a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
10814a5b18ccSLarry Woodman 			 * mask.
10824a5b18ccSLarry Woodman 			 *
10834a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
10844a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
10854a5b18ccSLarry Woodman 			 */
10864a5b18ccSLarry Woodman 
10870ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
10880ce72d4fSAndrew Morton 						(node_isset(s, *to)))
10894a5b18ccSLarry Woodman 				continue;
10904a5b18ccSLarry Woodman 
10910ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
10927e2ab150SChristoph Lameter 			if (s == d)
10937e2ab150SChristoph Lameter 				continue;
10947e2ab150SChristoph Lameter 
10957e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
10967e2ab150SChristoph Lameter 			dest = d;
10977e2ab150SChristoph Lameter 
10987e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
10997e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11007e2ab150SChristoph Lameter 				break;
11017e2ab150SChristoph Lameter 		}
1102b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11037e2ab150SChristoph Lameter 			break;
11047e2ab150SChristoph Lameter 
11057e2ab150SChristoph Lameter 		node_clear(source, tmp);
11067e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11077e2ab150SChristoph Lameter 		if (err > 0)
11087e2ab150SChristoph Lameter 			busy += err;
11097e2ab150SChristoph Lameter 		if (err < 0)
11107e2ab150SChristoph Lameter 			break;
111139743889SChristoph Lameter 	}
111239743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11137e2ab150SChristoph Lameter 	if (err < 0)
11147e2ab150SChristoph Lameter 		return err;
11157e2ab150SChristoph Lameter 	return busy;
1116b20a3503SChristoph Lameter 
111739743889SChristoph Lameter }
111839743889SChristoph Lameter 
11193ad33b24SLee Schermerhorn /*
11203ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1121d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
11223ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11233ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11243ad33b24SLee Schermerhorn  * is in virtual address order.
11253ad33b24SLee Schermerhorn  */
1126666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
112795a402c3SChristoph Lameter {
1128d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11293ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
113095a402c3SChristoph Lameter 
1131d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
11323ad33b24SLee Schermerhorn 	while (vma) {
11333ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11343ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11353ad33b24SLee Schermerhorn 			break;
11363ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11373ad33b24SLee Schermerhorn 	}
11383ad33b24SLee Schermerhorn 
113911c731e8SWanpeng Li 	if (PageHuge(page)) {
1140389c8178SMichal Hocko 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1141389c8178SMichal Hocko 				vma, address);
114294723aafSMichal Hocko 	} else if (PageTransHuge(page)) {
1143c8633798SNaoya Horiguchi 		struct page *thp;
1144c8633798SNaoya Horiguchi 
1145356ff8a9SDavid Rientjes 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1146356ff8a9SDavid Rientjes 					 HPAGE_PMD_ORDER);
1147c8633798SNaoya Horiguchi 		if (!thp)
1148c8633798SNaoya Horiguchi 			return NULL;
1149c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1150c8633798SNaoya Horiguchi 		return thp;
115111c731e8SWanpeng Li 	}
115211c731e8SWanpeng Li 	/*
115311c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
115411c731e8SWanpeng Li 	 */
11550f556856SMichal Hocko 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
11560f556856SMichal Hocko 			vma, address);
115795a402c3SChristoph Lameter }
1158b20a3503SChristoph Lameter #else
1159b20a3503SChristoph Lameter 
1160b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1161b20a3503SChristoph Lameter 				unsigned long flags)
1162b20a3503SChristoph Lameter {
1163b20a3503SChristoph Lameter }
1164b20a3503SChristoph Lameter 
11650ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11660ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1167b20a3503SChristoph Lameter {
1168b20a3503SChristoph Lameter 	return -ENOSYS;
1169b20a3503SChristoph Lameter }
117095a402c3SChristoph Lameter 
1171666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
117295a402c3SChristoph Lameter {
117395a402c3SChristoph Lameter 	return NULL;
117495a402c3SChristoph Lameter }
1175b20a3503SChristoph Lameter #endif
1176b20a3503SChristoph Lameter 
1177dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1178028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1179028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
11806ce3c4c0SChristoph Lameter {
11816ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
11826ce3c4c0SChristoph Lameter 	struct mempolicy *new;
11836ce3c4c0SChristoph Lameter 	unsigned long end;
11846ce3c4c0SChristoph Lameter 	int err;
11856ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
11866ce3c4c0SChristoph Lameter 
1187b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
11886ce3c4c0SChristoph Lameter 		return -EINVAL;
118974c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
11906ce3c4c0SChristoph Lameter 		return -EPERM;
11916ce3c4c0SChristoph Lameter 
11926ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
11936ce3c4c0SChristoph Lameter 		return -EINVAL;
11946ce3c4c0SChristoph Lameter 
11956ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
11966ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
11976ce3c4c0SChristoph Lameter 
11986ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
11996ce3c4c0SChristoph Lameter 	end = start + len;
12006ce3c4c0SChristoph Lameter 
12016ce3c4c0SChristoph Lameter 	if (end < start)
12026ce3c4c0SChristoph Lameter 		return -EINVAL;
12036ce3c4c0SChristoph Lameter 	if (end == start)
12046ce3c4c0SChristoph Lameter 		return 0;
12056ce3c4c0SChristoph Lameter 
1206028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12076ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12086ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12096ce3c4c0SChristoph Lameter 
1210b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1211b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1212b24f53a0SLee Schermerhorn 
12136ce3c4c0SChristoph Lameter 	/*
12146ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12156ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12166ce3c4c0SChristoph Lameter 	 */
12176ce3c4c0SChristoph Lameter 	if (!new)
12186ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12196ce3c4c0SChristoph Lameter 
1220028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1221028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
122200ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12236ce3c4c0SChristoph Lameter 
12240aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12250aedadf9SChristoph Lameter 
12260aedadf9SChristoph Lameter 		err = migrate_prep();
12270aedadf9SChristoph Lameter 		if (err)
1228b05ca738SKOSAKI Motohiro 			goto mpol_out;
12290aedadf9SChristoph Lameter 	}
12304bfc4495SKAMEZAWA Hiroyuki 	{
12314bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12324bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12336ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
123458568d2aSMiao Xie 			task_lock(current);
12354bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
123658568d2aSMiao Xie 			task_unlock(current);
12374bfc4495SKAMEZAWA Hiroyuki 			if (err)
123858568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12394bfc4495SKAMEZAWA Hiroyuki 		} else
12404bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12414bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12424bfc4495SKAMEZAWA Hiroyuki 	}
1243b05ca738SKOSAKI Motohiro 	if (err)
1244b05ca738SKOSAKI Motohiro 		goto mpol_out;
1245b05ca738SKOSAKI Motohiro 
1246d05f0cdcSHugh Dickins 	err = queue_pages_range(mm, start, end, nmask,
12476ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1248d05f0cdcSHugh Dickins 	if (!err)
12499d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12507e2ab150SChristoph Lameter 
1251b24f53a0SLee Schermerhorn 	if (!err) {
1252b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1253b24f53a0SLee Schermerhorn 
1254cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1255b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1256d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1257d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1258cf608ac1SMinchan Kim 			if (nr_failed)
125974060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1260cf608ac1SMinchan Kim 		}
12616ce3c4c0SChristoph Lameter 
1262b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
12636ce3c4c0SChristoph Lameter 			err = -EIO;
1264ab8a3e14SKOSAKI Motohiro 	} else
1265b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1266b20a3503SChristoph Lameter 
12676ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1268b05ca738SKOSAKI Motohiro  mpol_out:
1269f0be3d32SLee Schermerhorn 	mpol_put(new);
12706ce3c4c0SChristoph Lameter 	return err;
12716ce3c4c0SChristoph Lameter }
12726ce3c4c0SChristoph Lameter 
127339743889SChristoph Lameter /*
12748bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
12758bccd85fSChristoph Lameter  */
12768bccd85fSChristoph Lameter 
12778bccd85fSChristoph Lameter /* Copy a node mask from user space. */
127839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
12798bccd85fSChristoph Lameter 		     unsigned long maxnode)
12808bccd85fSChristoph Lameter {
12818bccd85fSChristoph Lameter 	unsigned long k;
128256521e7aSYisheng Xie 	unsigned long t;
12838bccd85fSChristoph Lameter 	unsigned long nlongs;
12848bccd85fSChristoph Lameter 	unsigned long endmask;
12858bccd85fSChristoph Lameter 
12868bccd85fSChristoph Lameter 	--maxnode;
12878bccd85fSChristoph Lameter 	nodes_clear(*nodes);
12888bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
12898bccd85fSChristoph Lameter 		return 0;
1290a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1291636f13c1SChris Wright 		return -EINVAL;
12928bccd85fSChristoph Lameter 
12938bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
12948bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
12958bccd85fSChristoph Lameter 		endmask = ~0UL;
12968bccd85fSChristoph Lameter 	else
12978bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
12988bccd85fSChristoph Lameter 
129956521e7aSYisheng Xie 	/*
130056521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
130156521e7aSYisheng Xie 	 * if the non supported part is all zero.
130256521e7aSYisheng Xie 	 *
130356521e7aSYisheng Xie 	 * If maxnode have more longs than MAX_NUMNODES, check
130456521e7aSYisheng Xie 	 * the bits in that area first. And then go through to
130556521e7aSYisheng Xie 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
130656521e7aSYisheng Xie 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
130756521e7aSYisheng Xie 	 */
13088bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
13098bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
13108bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
13118bccd85fSChristoph Lameter 				return -EFAULT;
13128bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
13138bccd85fSChristoph Lameter 				if (t & endmask)
13148bccd85fSChristoph Lameter 					return -EINVAL;
13158bccd85fSChristoph Lameter 			} else if (t)
13168bccd85fSChristoph Lameter 				return -EINVAL;
13178bccd85fSChristoph Lameter 		}
13188bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13198bccd85fSChristoph Lameter 		endmask = ~0UL;
13208bccd85fSChristoph Lameter 	}
13218bccd85fSChristoph Lameter 
132256521e7aSYisheng Xie 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
132356521e7aSYisheng Xie 		unsigned long valid_mask = endmask;
132456521e7aSYisheng Xie 
132556521e7aSYisheng Xie 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
132656521e7aSYisheng Xie 		if (get_user(t, nmask + nlongs - 1))
132756521e7aSYisheng Xie 			return -EFAULT;
132856521e7aSYisheng Xie 		if (t & valid_mask)
132956521e7aSYisheng Xie 			return -EINVAL;
133056521e7aSYisheng Xie 	}
133156521e7aSYisheng Xie 
13328bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13338bccd85fSChristoph Lameter 		return -EFAULT;
13348bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13358bccd85fSChristoph Lameter 	return 0;
13368bccd85fSChristoph Lameter }
13378bccd85fSChristoph Lameter 
13388bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13398bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13408bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13418bccd85fSChristoph Lameter {
13428bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1343050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
13448bccd85fSChristoph Lameter 
13458bccd85fSChristoph Lameter 	if (copy > nbytes) {
13468bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13478bccd85fSChristoph Lameter 			return -EINVAL;
13488bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13498bccd85fSChristoph Lameter 			return -EFAULT;
13508bccd85fSChristoph Lameter 		copy = nbytes;
13518bccd85fSChristoph Lameter 	}
13528bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13538bccd85fSChristoph Lameter }
13548bccd85fSChristoph Lameter 
1355e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1356e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1357e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
13588bccd85fSChristoph Lameter {
13598bccd85fSChristoph Lameter 	nodemask_t nodes;
13608bccd85fSChristoph Lameter 	int err;
1361028fec41SDavid Rientjes 	unsigned short mode_flags;
13628bccd85fSChristoph Lameter 
1363028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1364028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1365a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1366a3b51e01SDavid Rientjes 		return -EINVAL;
13674c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13684c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13694c50bc01SDavid Rientjes 		return -EINVAL;
13708bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13718bccd85fSChristoph Lameter 	if (err)
13728bccd85fSChristoph Lameter 		return err;
1373028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
13748bccd85fSChristoph Lameter }
13758bccd85fSChristoph Lameter 
1376e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1377e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1378e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1379e7dc9ad6SDominik Brodowski {
1380e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1381e7dc9ad6SDominik Brodowski }
1382e7dc9ad6SDominik Brodowski 
13838bccd85fSChristoph Lameter /* Set the process memory policy */
1384af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1385af03c4acSDominik Brodowski 				 unsigned long maxnode)
13868bccd85fSChristoph Lameter {
13878bccd85fSChristoph Lameter 	int err;
13888bccd85fSChristoph Lameter 	nodemask_t nodes;
1389028fec41SDavid Rientjes 	unsigned short flags;
13908bccd85fSChristoph Lameter 
1391028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1392028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1393028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
13948bccd85fSChristoph Lameter 		return -EINVAL;
13954c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
13964c50bc01SDavid Rientjes 		return -EINVAL;
13978bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13988bccd85fSChristoph Lameter 	if (err)
13998bccd85fSChristoph Lameter 		return err;
1400028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
14018bccd85fSChristoph Lameter }
14028bccd85fSChristoph Lameter 
1403af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1404af03c4acSDominik Brodowski 		unsigned long, maxnode)
1405af03c4acSDominik Brodowski {
1406af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1407af03c4acSDominik Brodowski }
1408af03c4acSDominik Brodowski 
1409b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1410b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1411b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
141239743889SChristoph Lameter {
1413596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
141439743889SChristoph Lameter 	struct task_struct *task;
141539743889SChristoph Lameter 	nodemask_t task_nodes;
141639743889SChristoph Lameter 	int err;
1417596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1418596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1419596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
142039743889SChristoph Lameter 
1421596d7cfaSKOSAKI Motohiro 	if (!scratch)
1422596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
142339743889SChristoph Lameter 
1424596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1425596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1426596d7cfaSKOSAKI Motohiro 
1427596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
142839743889SChristoph Lameter 	if (err)
1429596d7cfaSKOSAKI Motohiro 		goto out;
1430596d7cfaSKOSAKI Motohiro 
1431596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1432596d7cfaSKOSAKI Motohiro 	if (err)
1433596d7cfaSKOSAKI Motohiro 		goto out;
143439743889SChristoph Lameter 
143539743889SChristoph Lameter 	/* Find the mm_struct */
143655cfaa3cSZeng Zhaoming 	rcu_read_lock();
1437228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
143839743889SChristoph Lameter 	if (!task) {
143955cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1440596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1441596d7cfaSKOSAKI Motohiro 		goto out;
144239743889SChristoph Lameter 	}
14433268c63eSChristoph Lameter 	get_task_struct(task);
144439743889SChristoph Lameter 
1445596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
144639743889SChristoph Lameter 
144739743889SChristoph Lameter 	/*
144831367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
144931367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
145039743889SChristoph Lameter 	 */
145131367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1452c69e8d9cSDavid Howells 		rcu_read_unlock();
145339743889SChristoph Lameter 		err = -EPERM;
14543268c63eSChristoph Lameter 		goto out_put;
145539743889SChristoph Lameter 	}
1456c69e8d9cSDavid Howells 	rcu_read_unlock();
145739743889SChristoph Lameter 
145839743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
145939743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1460596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
146139743889SChristoph Lameter 		err = -EPERM;
14623268c63eSChristoph Lameter 		goto out_put;
146339743889SChristoph Lameter 	}
146439743889SChristoph Lameter 
14650486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
14660486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
14670486a38bSYisheng Xie 	if (nodes_empty(*new))
14683268c63eSChristoph Lameter 		goto out_put;
14690486a38bSYisheng Xie 
14700486a38bSYisheng Xie 	nodes_and(*new, *new, node_states[N_MEMORY]);
14710486a38bSYisheng Xie 	if (nodes_empty(*new))
14720486a38bSYisheng Xie 		goto out_put;
14733b42d28bSChristoph Lameter 
147486c3a764SDavid Quigley 	err = security_task_movememory(task);
147586c3a764SDavid Quigley 	if (err)
14763268c63eSChristoph Lameter 		goto out_put;
147786c3a764SDavid Quigley 
14783268c63eSChristoph Lameter 	mm = get_task_mm(task);
14793268c63eSChristoph Lameter 	put_task_struct(task);
1480f2a9ef88SSasha Levin 
1481f2a9ef88SSasha Levin 	if (!mm) {
1482f2a9ef88SSasha Levin 		err = -EINVAL;
1483f2a9ef88SSasha Levin 		goto out;
1484f2a9ef88SSasha Levin 	}
1485f2a9ef88SSasha Levin 
1486596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
148774c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
14883268c63eSChristoph Lameter 
148939743889SChristoph Lameter 	mmput(mm);
14903268c63eSChristoph Lameter out:
1491596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1492596d7cfaSKOSAKI Motohiro 
149339743889SChristoph Lameter 	return err;
14943268c63eSChristoph Lameter 
14953268c63eSChristoph Lameter out_put:
14963268c63eSChristoph Lameter 	put_task_struct(task);
14973268c63eSChristoph Lameter 	goto out;
14983268c63eSChristoph Lameter 
149939743889SChristoph Lameter }
150039743889SChristoph Lameter 
1501b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1502b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1503b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1504b6e9b0baSDominik Brodowski {
1505b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1506b6e9b0baSDominik Brodowski }
1507b6e9b0baSDominik Brodowski 
150839743889SChristoph Lameter 
15098bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1510af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1511af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1512af03c4acSDominik Brodowski 				unsigned long maxnode,
1513af03c4acSDominik Brodowski 				unsigned long addr,
1514af03c4acSDominik Brodowski 				unsigned long flags)
15158bccd85fSChristoph Lameter {
1516dbcb0f19SAdrian Bunk 	int err;
1517dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
15188bccd85fSChristoph Lameter 	nodemask_t nodes;
15198bccd85fSChristoph Lameter 
1520050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
15218bccd85fSChristoph Lameter 		return -EINVAL;
15228bccd85fSChristoph Lameter 
15238bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
15248bccd85fSChristoph Lameter 
15258bccd85fSChristoph Lameter 	if (err)
15268bccd85fSChristoph Lameter 		return err;
15278bccd85fSChristoph Lameter 
15288bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
15298bccd85fSChristoph Lameter 		return -EFAULT;
15308bccd85fSChristoph Lameter 
15318bccd85fSChristoph Lameter 	if (nmask)
15328bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
15338bccd85fSChristoph Lameter 
15348bccd85fSChristoph Lameter 	return err;
15358bccd85fSChristoph Lameter }
15368bccd85fSChristoph Lameter 
1537af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1538af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1539af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1540af03c4acSDominik Brodowski {
1541af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1542af03c4acSDominik Brodowski }
1543af03c4acSDominik Brodowski 
15441da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
15451da177e4SLinus Torvalds 
1546c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1547c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1548c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1549c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
15501da177e4SLinus Torvalds {
15511da177e4SLinus Torvalds 	long err;
15521da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15531da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15541da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15551da177e4SLinus Torvalds 
1556050c17f2SRalph Campbell 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
15571da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15581da177e4SLinus Torvalds 
15591da177e4SLinus Torvalds 	if (nmask)
15601da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15611da177e4SLinus Torvalds 
1562af03c4acSDominik Brodowski 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15631da177e4SLinus Torvalds 
15641da177e4SLinus Torvalds 	if (!err && nmask) {
15652bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15662bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15672bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15681da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15691da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15701da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15711da177e4SLinus Torvalds 	}
15721da177e4SLinus Torvalds 
15731da177e4SLinus Torvalds 	return err;
15741da177e4SLinus Torvalds }
15751da177e4SLinus Torvalds 
1576c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1577c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
15781da177e4SLinus Torvalds {
15791da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15801da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15811da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15821da177e4SLinus Torvalds 
15831da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15841da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15851da177e4SLinus Torvalds 
15861da177e4SLinus Torvalds 	if (nmask) {
1587cf01fb99SChris Salls 		if (compat_get_bitmap(bm, nmask, nr_bits))
15881da177e4SLinus Torvalds 			return -EFAULT;
1589cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1590cf01fb99SChris Salls 		if (copy_to_user(nm, bm, alloc_size))
1591cf01fb99SChris Salls 			return -EFAULT;
1592cf01fb99SChris Salls 	}
15931da177e4SLinus Torvalds 
1594af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
15951da177e4SLinus Torvalds }
15961da177e4SLinus Torvalds 
1597c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1598c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1599c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
16001da177e4SLinus Torvalds {
16011da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16021da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1603dfcd3c0dSAndi Kleen 	nodemask_t bm;
16041da177e4SLinus Torvalds 
16051da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
16061da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16071da177e4SLinus Torvalds 
16081da177e4SLinus Torvalds 	if (nmask) {
1609cf01fb99SChris Salls 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
16101da177e4SLinus Torvalds 			return -EFAULT;
1611cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1612cf01fb99SChris Salls 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1613cf01fb99SChris Salls 			return -EFAULT;
1614cf01fb99SChris Salls 	}
16151da177e4SLinus Torvalds 
1616e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
16171da177e4SLinus Torvalds }
16181da177e4SLinus Torvalds 
1619b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1620b6e9b0baSDominik Brodowski 		       compat_ulong_t, maxnode,
1621b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, old_nodes,
1622b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, new_nodes)
1623b6e9b0baSDominik Brodowski {
1624b6e9b0baSDominik Brodowski 	unsigned long __user *old = NULL;
1625b6e9b0baSDominik Brodowski 	unsigned long __user *new = NULL;
1626b6e9b0baSDominik Brodowski 	nodemask_t tmp_mask;
1627b6e9b0baSDominik Brodowski 	unsigned long nr_bits;
1628b6e9b0baSDominik Brodowski 	unsigned long size;
1629b6e9b0baSDominik Brodowski 
1630b6e9b0baSDominik Brodowski 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1631b6e9b0baSDominik Brodowski 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1632b6e9b0baSDominik Brodowski 	if (old_nodes) {
1633b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1634b6e9b0baSDominik Brodowski 			return -EFAULT;
1635b6e9b0baSDominik Brodowski 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1636b6e9b0baSDominik Brodowski 		if (new_nodes)
1637b6e9b0baSDominik Brodowski 			new = old + size / sizeof(unsigned long);
1638b6e9b0baSDominik Brodowski 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1639b6e9b0baSDominik Brodowski 			return -EFAULT;
1640b6e9b0baSDominik Brodowski 	}
1641b6e9b0baSDominik Brodowski 	if (new_nodes) {
1642b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1643b6e9b0baSDominik Brodowski 			return -EFAULT;
1644b6e9b0baSDominik Brodowski 		if (new == NULL)
1645b6e9b0baSDominik Brodowski 			new = compat_alloc_user_space(size);
1646b6e9b0baSDominik Brodowski 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1647b6e9b0baSDominik Brodowski 			return -EFAULT;
1648b6e9b0baSDominik Brodowski 	}
1649b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1650b6e9b0baSDominik Brodowski }
1651b6e9b0baSDominik Brodowski 
1652b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */
16531da177e4SLinus Torvalds 
165474d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
165574d2c3a0SOleg Nesterov 						unsigned long addr)
16561da177e4SLinus Torvalds {
16578d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
16581da177e4SLinus Torvalds 
16591da177e4SLinus Torvalds 	if (vma) {
1660480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
16618d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
166200442ad0SMel Gorman 		} else if (vma->vm_policy) {
16631da177e4SLinus Torvalds 			pol = vma->vm_policy;
166400442ad0SMel Gorman 
166500442ad0SMel Gorman 			/*
166600442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
166700442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
166800442ad0SMel Gorman 			 * count on these policies which will be dropped by
166900442ad0SMel Gorman 			 * mpol_cond_put() later
167000442ad0SMel Gorman 			 */
167100442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
167200442ad0SMel Gorman 				mpol_get(pol);
167300442ad0SMel Gorman 		}
16741da177e4SLinus Torvalds 	}
1675f15ca78eSOleg Nesterov 
167674d2c3a0SOleg Nesterov 	return pol;
167774d2c3a0SOleg Nesterov }
167874d2c3a0SOleg Nesterov 
167974d2c3a0SOleg Nesterov /*
1680dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
168174d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
168274d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
168374d2c3a0SOleg Nesterov  *
168474d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1685dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
168674d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
168774d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
168874d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
168974d2c3a0SOleg Nesterov  * extra reference for shared policies.
169074d2c3a0SOleg Nesterov  */
16912f0799a0SDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1692dd6eecb9SOleg Nesterov 						unsigned long addr)
169374d2c3a0SOleg Nesterov {
169474d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
169574d2c3a0SOleg Nesterov 
16968d90274bSOleg Nesterov 	if (!pol)
1697dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
16988d90274bSOleg Nesterov 
16991da177e4SLinus Torvalds 	return pol;
17001da177e4SLinus Torvalds }
17011da177e4SLinus Torvalds 
17026b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1703fc314724SMel Gorman {
17046b6482bbSOleg Nesterov 	struct mempolicy *pol;
1705f15ca78eSOleg Nesterov 
1706fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1707fc314724SMel Gorman 		bool ret = false;
1708fc314724SMel Gorman 
1709fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1710fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1711fc314724SMel Gorman 			ret = true;
1712fc314724SMel Gorman 		mpol_cond_put(pol);
1713fc314724SMel Gorman 
1714fc314724SMel Gorman 		return ret;
17158d90274bSOleg Nesterov 	}
17168d90274bSOleg Nesterov 
1717fc314724SMel Gorman 	pol = vma->vm_policy;
17188d90274bSOleg Nesterov 	if (!pol)
17196b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1720fc314724SMel Gorman 
1721fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1722fc314724SMel Gorman }
1723fc314724SMel Gorman 
1724d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1725d3eb1570SLai Jiangshan {
1726d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1727d3eb1570SLai Jiangshan 
1728d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1729d3eb1570SLai Jiangshan 
1730d3eb1570SLai Jiangshan 	/*
1731d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1732d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1733d3eb1570SLai Jiangshan 	 *
1734d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1735d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1736d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1737d3eb1570SLai Jiangshan 	 */
1738d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1739d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1740d3eb1570SLai Jiangshan 
1741d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1742d3eb1570SLai Jiangshan }
1743d3eb1570SLai Jiangshan 
174452cd3b07SLee Schermerhorn /*
174552cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
174652cd3b07SLee Schermerhorn  * page allocation
174752cd3b07SLee Schermerhorn  */
174852cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
174919770b32SMel Gorman {
175019770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
175145c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1752d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
175319770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
175419770b32SMel Gorman 		return &policy->v.nodes;
175519770b32SMel Gorman 
175619770b32SMel Gorman 	return NULL;
175719770b32SMel Gorman }
175819770b32SMel Gorman 
175904ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */
176004ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy,
17612f5f9486SAndi Kleen 								int nd)
17621da177e4SLinus Torvalds {
17636d840958SMichal Hocko 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
17641da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
17656d840958SMichal Hocko 	else {
176619770b32SMel Gorman 		/*
17676d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
17686d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
17696d840958SMichal Hocko 		 * requested node and not break the policy.
177019770b32SMel Gorman 		 */
17716d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
17721da177e4SLinus Torvalds 	}
17736d840958SMichal Hocko 
177404ec6264SVlastimil Babka 	return nd;
17751da177e4SLinus Torvalds }
17761da177e4SLinus Torvalds 
17771da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
17781da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
17791da177e4SLinus Torvalds {
178045816682SVlastimil Babka 	unsigned next;
17811da177e4SLinus Torvalds 	struct task_struct *me = current;
17821da177e4SLinus Torvalds 
178345816682SVlastimil Babka 	next = next_node_in(me->il_prev, policy->v.nodes);
1784f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
178545816682SVlastimil Babka 		me->il_prev = next;
178645816682SVlastimil Babka 	return next;
17871da177e4SLinus Torvalds }
17881da177e4SLinus Torvalds 
1789dc85da15SChristoph Lameter /*
1790dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1791dc85da15SChristoph Lameter  * next slab entry.
1792dc85da15SChristoph Lameter  */
17932a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1794dc85da15SChristoph Lameter {
1795e7b691b0SAndi Kleen 	struct mempolicy *policy;
17962a389610SDavid Rientjes 	int node = numa_mem_id();
1797e7b691b0SAndi Kleen 
1798e7b691b0SAndi Kleen 	if (in_interrupt())
17992a389610SDavid Rientjes 		return node;
1800e7b691b0SAndi Kleen 
1801e7b691b0SAndi Kleen 	policy = current->mempolicy;
1802fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
18032a389610SDavid Rientjes 		return node;
1804765c4507SChristoph Lameter 
1805bea904d5SLee Schermerhorn 	switch (policy->mode) {
1806bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1807fc36b8d3SLee Schermerhorn 		/*
1808fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1809fc36b8d3SLee Schermerhorn 		 */
1810bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1811bea904d5SLee Schermerhorn 
1812dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1813dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1814dc85da15SChristoph Lameter 
1815dd1a239fSMel Gorman 	case MPOL_BIND: {
1816c33d6c06SMel Gorman 		struct zoneref *z;
1817c33d6c06SMel Gorman 
1818dc85da15SChristoph Lameter 		/*
1819dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1820dc85da15SChristoph Lameter 		 * first node.
1821dc85da15SChristoph Lameter 		 */
182219770b32SMel Gorman 		struct zonelist *zonelist;
182319770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1824c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1825c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1826c33d6c06SMel Gorman 							&policy->v.nodes);
1827c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1828dd1a239fSMel Gorman 	}
1829dc85da15SChristoph Lameter 
1830dc85da15SChristoph Lameter 	default:
1831bea904d5SLee Schermerhorn 		BUG();
1832dc85da15SChristoph Lameter 	}
1833dc85da15SChristoph Lameter }
1834dc85da15SChristoph Lameter 
1835fee83b3aSAndrew Morton /*
1836fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1837fee83b3aSAndrew Morton  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1838fee83b3aSAndrew Morton  * number of present nodes.
1839fee83b3aSAndrew Morton  */
184098c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
18411da177e4SLinus Torvalds {
1842dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1843f5b087b5SDavid Rientjes 	unsigned target;
1844fee83b3aSAndrew Morton 	int i;
1845fee83b3aSAndrew Morton 	int nid;
18461da177e4SLinus Torvalds 
1847f5b087b5SDavid Rientjes 	if (!nnodes)
1848f5b087b5SDavid Rientjes 		return numa_node_id();
1849fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1850fee83b3aSAndrew Morton 	nid = first_node(pol->v.nodes);
1851fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1852dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
18531da177e4SLinus Torvalds 	return nid;
18541da177e4SLinus Torvalds }
18551da177e4SLinus Torvalds 
18565da7ca86SChristoph Lameter /* Determine a node number for interleave */
18575da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
18585da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
18595da7ca86SChristoph Lameter {
18605da7ca86SChristoph Lameter 	if (vma) {
18615da7ca86SChristoph Lameter 		unsigned long off;
18625da7ca86SChristoph Lameter 
18633b98b087SNishanth Aravamudan 		/*
18643b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
18653b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
18663b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
18673b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
18683b98b087SNishanth Aravamudan 		 * a useful offset.
18693b98b087SNishanth Aravamudan 		 */
18703b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
18713b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
18725da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
187398c70baaSLaurent Dufour 		return offset_il_node(pol, off);
18745da7ca86SChristoph Lameter 	} else
18755da7ca86SChristoph Lameter 		return interleave_nodes(pol);
18765da7ca86SChristoph Lameter }
18775da7ca86SChristoph Lameter 
187800ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1879480eccf9SLee Schermerhorn /*
188004ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1881b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1882b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1883b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1884b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1885b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1886480eccf9SLee Schermerhorn  *
188704ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
188852cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
188952cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
189052cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1891c0ff7453SMiao Xie  *
1892d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1893480eccf9SLee Schermerhorn  */
189404ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
189504ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
18965da7ca86SChristoph Lameter {
189704ec6264SVlastimil Babka 	int nid;
18985da7ca86SChristoph Lameter 
1899dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
190019770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
19015da7ca86SChristoph Lameter 
190252cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
190304ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
190404ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
190552cd3b07SLee Schermerhorn 	} else {
190604ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
190752cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
190852cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1909480eccf9SLee Schermerhorn 	}
191004ec6264SVlastimil Babka 	return nid;
19115da7ca86SChristoph Lameter }
191206808b08SLee Schermerhorn 
191306808b08SLee Schermerhorn /*
191406808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
191506808b08SLee Schermerhorn  *
191606808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
191706808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
191806808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
191906808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
192006808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
192106808b08SLee Schermerhorn  * of non-default mempolicy.
192206808b08SLee Schermerhorn  *
192306808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
192406808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
192506808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
192606808b08SLee Schermerhorn  *
192706808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
192806808b08SLee Schermerhorn  */
192906808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
193006808b08SLee Schermerhorn {
193106808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
193206808b08SLee Schermerhorn 	int nid;
193306808b08SLee Schermerhorn 
193406808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
193506808b08SLee Schermerhorn 		return false;
193606808b08SLee Schermerhorn 
1937c0ff7453SMiao Xie 	task_lock(current);
193806808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
193906808b08SLee Schermerhorn 	switch (mempolicy->mode) {
194006808b08SLee Schermerhorn 	case MPOL_PREFERRED:
194106808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
194206808b08SLee Schermerhorn 			nid = numa_node_id();
194306808b08SLee Schermerhorn 		else
194406808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
194506808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
194606808b08SLee Schermerhorn 		break;
194706808b08SLee Schermerhorn 
194806808b08SLee Schermerhorn 	case MPOL_BIND:
194906808b08SLee Schermerhorn 		/* Fall through */
195006808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
195106808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
195206808b08SLee Schermerhorn 		break;
195306808b08SLee Schermerhorn 
195406808b08SLee Schermerhorn 	default:
195506808b08SLee Schermerhorn 		BUG();
195606808b08SLee Schermerhorn 	}
1957c0ff7453SMiao Xie 	task_unlock(current);
195806808b08SLee Schermerhorn 
195906808b08SLee Schermerhorn 	return true;
196006808b08SLee Schermerhorn }
196100ac59adSChen, Kenneth W #endif
19625da7ca86SChristoph Lameter 
19636f48d0ebSDavid Rientjes /*
19646f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
19656f48d0ebSDavid Rientjes  *
19666f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
19676f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
19686f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
19696f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
19706f48d0ebSDavid Rientjes  *
19716f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
19726f48d0ebSDavid Rientjes  */
19736f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
19746f48d0ebSDavid Rientjes 					const nodemask_t *mask)
19756f48d0ebSDavid Rientjes {
19766f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
19776f48d0ebSDavid Rientjes 	bool ret = true;
19786f48d0ebSDavid Rientjes 
19796f48d0ebSDavid Rientjes 	if (!mask)
19806f48d0ebSDavid Rientjes 		return ret;
19816f48d0ebSDavid Rientjes 	task_lock(tsk);
19826f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
19836f48d0ebSDavid Rientjes 	if (!mempolicy)
19846f48d0ebSDavid Rientjes 		goto out;
19856f48d0ebSDavid Rientjes 
19866f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19876f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19886f48d0ebSDavid Rientjes 		/*
19896f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
19906f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
19916f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
19926f48d0ebSDavid Rientjes 		 * nodes in mask.
19936f48d0ebSDavid Rientjes 		 */
19946f48d0ebSDavid Rientjes 		break;
19956f48d0ebSDavid Rientjes 	case MPOL_BIND:
19966f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19976f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19986f48d0ebSDavid Rientjes 		break;
19996f48d0ebSDavid Rientjes 	default:
20006f48d0ebSDavid Rientjes 		BUG();
20016f48d0ebSDavid Rientjes 	}
20026f48d0ebSDavid Rientjes out:
20036f48d0ebSDavid Rientjes 	task_unlock(tsk);
20046f48d0ebSDavid Rientjes 	return ret;
20056f48d0ebSDavid Rientjes }
20066f48d0ebSDavid Rientjes 
20071da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
20081da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2009662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2010662f3a0bSAndi Kleen 					unsigned nid)
20111da177e4SLinus Torvalds {
20121da177e4SLinus Torvalds 	struct page *page;
20131da177e4SLinus Torvalds 
201404ec6264SVlastimil Babka 	page = __alloc_pages(gfp, order, nid);
20154518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
20164518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
20174518085eSKemi Wang 		return page;
2018de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2019de55c8b2SAndrey Ryabinin 		preempt_disable();
2020de55c8b2SAndrey Ryabinin 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2021de55c8b2SAndrey Ryabinin 		preempt_enable();
2022de55c8b2SAndrey Ryabinin 	}
20231da177e4SLinus Torvalds 	return page;
20241da177e4SLinus Torvalds }
20251da177e4SLinus Torvalds 
20261da177e4SLinus Torvalds /**
20270bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
20281da177e4SLinus Torvalds  *
20291da177e4SLinus Torvalds  * 	@gfp:
20301da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
20311da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
20321da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
20331da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
20341da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
20351da177e4SLinus Torvalds  *
20360bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
20371da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
20381da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2039be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
2040356ff8a9SDavid Rientjes  *	@hugepage: for hugepages try only the preferred node if possible
20411da177e4SLinus Torvalds  *
20421da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
20431da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
20441da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
20451da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
2046be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
2047be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
20481da177e4SLinus Torvalds  */
20491da177e4SLinus Torvalds struct page *
20500bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2051356ff8a9SDavid Rientjes 		unsigned long addr, int node, bool hugepage)
20521da177e4SLinus Torvalds {
2053cc9a6c87SMel Gorman 	struct mempolicy *pol;
2054c0ff7453SMiao Xie 	struct page *page;
205504ec6264SVlastimil Babka 	int preferred_nid;
2056be97a41bSVlastimil Babka 	nodemask_t *nmask;
20571da177e4SLinus Torvalds 
2058dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2059cc9a6c87SMel Gorman 
2060be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
20611da177e4SLinus Torvalds 		unsigned nid;
20625da7ca86SChristoph Lameter 
20638eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
206452cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
20650bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2066be97a41bSVlastimil Babka 		goto out;
20671da177e4SLinus Torvalds 	}
20681da177e4SLinus Torvalds 
2069356ff8a9SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2070356ff8a9SDavid Rientjes 		int hpage_node = node;
2071356ff8a9SDavid Rientjes 
2072356ff8a9SDavid Rientjes 		/*
2073356ff8a9SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
2074356ff8a9SDavid Rientjes 		 * allows the current node (or other explicitly preferred
2075356ff8a9SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
2076356ff8a9SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
2077356ff8a9SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
2078356ff8a9SDavid Rientjes 		 *
2079356ff8a9SDavid Rientjes 		 * If the policy is interleave, or does not allow the current
2080356ff8a9SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
2081356ff8a9SDavid Rientjes 		 */
2082356ff8a9SDavid Rientjes 		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2083356ff8a9SDavid Rientjes 			hpage_node = pol->v.preferred_node;
2084356ff8a9SDavid Rientjes 
2085356ff8a9SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
2086356ff8a9SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
2087356ff8a9SDavid Rientjes 			mpol_cond_put(pol);
2088356ff8a9SDavid Rientjes 			page = __alloc_pages_node(hpage_node,
2089356ff8a9SDavid Rientjes 						gfp | __GFP_THISNODE, order);
2090356ff8a9SDavid Rientjes 			goto out;
2091356ff8a9SDavid Rientjes 		}
2092356ff8a9SDavid Rientjes 	}
2093356ff8a9SDavid Rientjes 
2094077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
209504ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
209604ec6264SVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2097d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2098be97a41bSVlastimil Babka out:
2099077fcf11SAneesh Kumar K.V 	return page;
2100077fcf11SAneesh Kumar K.V }
2101077fcf11SAneesh Kumar K.V 
21021da177e4SLinus Torvalds /**
21031da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
21041da177e4SLinus Torvalds  *
21051da177e4SLinus Torvalds  *	@gfp:
21061da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
21071da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
21081da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
21091da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
21101da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
21111da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
21121da177e4SLinus Torvalds  *
21131da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
21141da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
21151da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
21161da177e4SLinus Torvalds  */
2117dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
21181da177e4SLinus Torvalds {
21198d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2120c0ff7453SMiao Xie 	struct page *page;
21211da177e4SLinus Torvalds 
21228d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
21238d90274bSOleg Nesterov 		pol = get_task_policy(current);
212452cd3b07SLee Schermerhorn 
212552cd3b07SLee Schermerhorn 	/*
212652cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
212752cd3b07SLee Schermerhorn 	 * nor system default_policy
212852cd3b07SLee Schermerhorn 	 */
212945c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2130c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2131c0ff7453SMiao Xie 	else
2132c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
213304ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
21345c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2135cc9a6c87SMel Gorman 
2136c0ff7453SMiao Xie 	return page;
21371da177e4SLinus Torvalds }
21381da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
21391da177e4SLinus Torvalds 
2140ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2141ef0855d3SOleg Nesterov {
2142ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2143ef0855d3SOleg Nesterov 
2144ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2145ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2146ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2147ef0855d3SOleg Nesterov 	return 0;
2148ef0855d3SOleg Nesterov }
2149ef0855d3SOleg Nesterov 
21504225399aSPaul Jackson /*
2151846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
21524225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
21534225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
21544225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
21554225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2156708c1bbcSMiao Xie  *
2157708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2158708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
21594225399aSPaul Jackson  */
21604225399aSPaul Jackson 
2161846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2162846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
21631da177e4SLinus Torvalds {
21641da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
21651da177e4SLinus Torvalds 
21661da177e4SLinus Torvalds 	if (!new)
21671da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2168708c1bbcSMiao Xie 
2169708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2170708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2171708c1bbcSMiao Xie 		task_lock(current);
2172708c1bbcSMiao Xie 		*new = *old;
2173708c1bbcSMiao Xie 		task_unlock(current);
2174708c1bbcSMiao Xie 	} else
2175708c1bbcSMiao Xie 		*new = *old;
2176708c1bbcSMiao Xie 
21774225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21784225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2179213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
21804225399aSPaul Jackson 	}
21811da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21821da177e4SLinus Torvalds 	return new;
21831da177e4SLinus Torvalds }
21841da177e4SLinus Torvalds 
21851da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2186fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21871da177e4SLinus Torvalds {
21881da177e4SLinus Torvalds 	if (!a || !b)
2189fcfb4dccSKOSAKI Motohiro 		return false;
219045c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2191fcfb4dccSKOSAKI Motohiro 		return false;
219219800502SBob Liu 	if (a->flags != b->flags)
2193fcfb4dccSKOSAKI Motohiro 		return false;
219419800502SBob Liu 	if (mpol_store_user_nodemask(a))
219519800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2196fcfb4dccSKOSAKI Motohiro 			return false;
219719800502SBob Liu 
219845c4745aSLee Schermerhorn 	switch (a->mode) {
219919770b32SMel Gorman 	case MPOL_BIND:
220019770b32SMel Gorman 		/* Fall through */
22011da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2202fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
22031da177e4SLinus Torvalds 	case MPOL_PREFERRED:
22048970a63eSYisheng Xie 		/* a's ->flags is the same as b's */
22058970a63eSYisheng Xie 		if (a->flags & MPOL_F_LOCAL)
22068970a63eSYisheng Xie 			return true;
220775719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
22081da177e4SLinus Torvalds 	default:
22091da177e4SLinus Torvalds 		BUG();
2210fcfb4dccSKOSAKI Motohiro 		return false;
22111da177e4SLinus Torvalds 	}
22121da177e4SLinus Torvalds }
22131da177e4SLinus Torvalds 
22141da177e4SLinus Torvalds /*
22151da177e4SLinus Torvalds  * Shared memory backing store policy support.
22161da177e4SLinus Torvalds  *
22171da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
22181da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
22194a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
22201da177e4SLinus Torvalds  * for any accesses to the tree.
22211da177e4SLinus Torvalds  */
22221da177e4SLinus Torvalds 
22234a8c7bb5SNathan Zimmer /*
22244a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
22254a8c7bb5SNathan Zimmer  * reading or for writing
22264a8c7bb5SNathan Zimmer  */
22271da177e4SLinus Torvalds static struct sp_node *
22281da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
22291da177e4SLinus Torvalds {
22301da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
22311da177e4SLinus Torvalds 
22321da177e4SLinus Torvalds 	while (n) {
22331da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
22341da177e4SLinus Torvalds 
22351da177e4SLinus Torvalds 		if (start >= p->end)
22361da177e4SLinus Torvalds 			n = n->rb_right;
22371da177e4SLinus Torvalds 		else if (end <= p->start)
22381da177e4SLinus Torvalds 			n = n->rb_left;
22391da177e4SLinus Torvalds 		else
22401da177e4SLinus Torvalds 			break;
22411da177e4SLinus Torvalds 	}
22421da177e4SLinus Torvalds 	if (!n)
22431da177e4SLinus Torvalds 		return NULL;
22441da177e4SLinus Torvalds 	for (;;) {
22451da177e4SLinus Torvalds 		struct sp_node *w = NULL;
22461da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
22471da177e4SLinus Torvalds 		if (!prev)
22481da177e4SLinus Torvalds 			break;
22491da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
22501da177e4SLinus Torvalds 		if (w->end <= start)
22511da177e4SLinus Torvalds 			break;
22521da177e4SLinus Torvalds 		n = prev;
22531da177e4SLinus Torvalds 	}
22541da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
22551da177e4SLinus Torvalds }
22561da177e4SLinus Torvalds 
22574a8c7bb5SNathan Zimmer /*
22584a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
22594a8c7bb5SNathan Zimmer  * writing.
22604a8c7bb5SNathan Zimmer  */
22611da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
22621da177e4SLinus Torvalds {
22631da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
22641da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
22651da177e4SLinus Torvalds 	struct sp_node *nd;
22661da177e4SLinus Torvalds 
22671da177e4SLinus Torvalds 	while (*p) {
22681da177e4SLinus Torvalds 		parent = *p;
22691da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
22701da177e4SLinus Torvalds 		if (new->start < nd->start)
22711da177e4SLinus Torvalds 			p = &(*p)->rb_left;
22721da177e4SLinus Torvalds 		else if (new->end > nd->end)
22731da177e4SLinus Torvalds 			p = &(*p)->rb_right;
22741da177e4SLinus Torvalds 		else
22751da177e4SLinus Torvalds 			BUG();
22761da177e4SLinus Torvalds 	}
22771da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
22781da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2279140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
228045c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22811da177e4SLinus Torvalds }
22821da177e4SLinus Torvalds 
22831da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22841da177e4SLinus Torvalds struct mempolicy *
22851da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22861da177e4SLinus Torvalds {
22871da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22881da177e4SLinus Torvalds 	struct sp_node *sn;
22891da177e4SLinus Torvalds 
22901da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22911da177e4SLinus Torvalds 		return NULL;
22924a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
22931da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22941da177e4SLinus Torvalds 	if (sn) {
22951da177e4SLinus Torvalds 		mpol_get(sn->policy);
22961da177e4SLinus Torvalds 		pol = sn->policy;
22971da177e4SLinus Torvalds 	}
22984a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
22991da177e4SLinus Torvalds 	return pol;
23001da177e4SLinus Torvalds }
23011da177e4SLinus Torvalds 
230263f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
230363f74ca2SKOSAKI Motohiro {
230463f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
230563f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
230663f74ca2SKOSAKI Motohiro }
230763f74ca2SKOSAKI Motohiro 
2308771fb4d8SLee Schermerhorn /**
2309771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2310771fb4d8SLee Schermerhorn  *
2311b46e14acSFabian Frederick  * @page: page to be checked
2312b46e14acSFabian Frederick  * @vma: vm area where page mapped
2313b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2314771fb4d8SLee Schermerhorn  *
2315771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2316771fb4d8SLee Schermerhorn  * node id.
2317771fb4d8SLee Schermerhorn  *
2318771fb4d8SLee Schermerhorn  * Returns:
2319771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2320771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2321771fb4d8SLee Schermerhorn  *
2322771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2323771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2324771fb4d8SLee Schermerhorn  */
2325771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2326771fb4d8SLee Schermerhorn {
2327771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2328c33d6c06SMel Gorman 	struct zoneref *z;
2329771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2330771fb4d8SLee Schermerhorn 	unsigned long pgoff;
233190572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
233290572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
233398fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2334771fb4d8SLee Schermerhorn 	int ret = -1;
2335771fb4d8SLee Schermerhorn 
2336dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2337771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2338771fb4d8SLee Schermerhorn 		goto out;
2339771fb4d8SLee Schermerhorn 
2340771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2341771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2342771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2343771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
234498c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2345771fb4d8SLee Schermerhorn 		break;
2346771fb4d8SLee Schermerhorn 
2347771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2348771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2349771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2350771fb4d8SLee Schermerhorn 		else
2351771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2352771fb4d8SLee Schermerhorn 		break;
2353771fb4d8SLee Schermerhorn 
2354771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2355c33d6c06SMel Gorman 
2356771fb4d8SLee Schermerhorn 		/*
2357771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2358771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2359771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2360771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2361771fb4d8SLee Schermerhorn 		 */
2362771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2363771fb4d8SLee Schermerhorn 			goto out;
2364c33d6c06SMel Gorman 		z = first_zones_zonelist(
2365771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2366771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2367c33d6c06SMel Gorman 				&pol->v.nodes);
2368c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2369771fb4d8SLee Schermerhorn 		break;
2370771fb4d8SLee Schermerhorn 
2371771fb4d8SLee Schermerhorn 	default:
2372771fb4d8SLee Schermerhorn 		BUG();
2373771fb4d8SLee Schermerhorn 	}
23745606e387SMel Gorman 
23755606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2376e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
237790572890SPeter Zijlstra 		polnid = thisnid;
23785606e387SMel Gorman 
237910f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2380de1c9ce6SRik van Riel 			goto out;
2381de1c9ce6SRik van Riel 	}
2382e42c8ff2SMel Gorman 
2383771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2384771fb4d8SLee Schermerhorn 		ret = polnid;
2385771fb4d8SLee Schermerhorn out:
2386771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2387771fb4d8SLee Schermerhorn 
2388771fb4d8SLee Schermerhorn 	return ret;
2389771fb4d8SLee Schermerhorn }
2390771fb4d8SLee Schermerhorn 
2391c11600e4SDavid Rientjes /*
2392c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2393c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2394c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2395c11600e4SDavid Rientjes  * policy.
2396c11600e4SDavid Rientjes  */
2397c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2398c11600e4SDavid Rientjes {
2399c11600e4SDavid Rientjes 	struct mempolicy *pol;
2400c11600e4SDavid Rientjes 
2401c11600e4SDavid Rientjes 	task_lock(task);
2402c11600e4SDavid Rientjes 	pol = task->mempolicy;
2403c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2404c11600e4SDavid Rientjes 	task_unlock(task);
2405c11600e4SDavid Rientjes 	mpol_put(pol);
2406c11600e4SDavid Rientjes }
2407c11600e4SDavid Rientjes 
24081da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
24091da177e4SLinus Torvalds {
2410140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
24111da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
241263f74ca2SKOSAKI Motohiro 	sp_free(n);
24131da177e4SLinus Torvalds }
24141da177e4SLinus Torvalds 
241542288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
241642288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
241742288fe3SMel Gorman {
241842288fe3SMel Gorman 	node->start = start;
241942288fe3SMel Gorman 	node->end = end;
242042288fe3SMel Gorman 	node->policy = pol;
242142288fe3SMel Gorman }
242242288fe3SMel Gorman 
2423dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2424dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
24251da177e4SLinus Torvalds {
2426869833f2SKOSAKI Motohiro 	struct sp_node *n;
2427869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
24281da177e4SLinus Torvalds 
2429869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
24301da177e4SLinus Torvalds 	if (!n)
24311da177e4SLinus Torvalds 		return NULL;
2432869833f2SKOSAKI Motohiro 
2433869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2434869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2435869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2436869833f2SKOSAKI Motohiro 		return NULL;
2437869833f2SKOSAKI Motohiro 	}
2438869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
243942288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2440869833f2SKOSAKI Motohiro 
24411da177e4SLinus Torvalds 	return n;
24421da177e4SLinus Torvalds }
24431da177e4SLinus Torvalds 
24441da177e4SLinus Torvalds /* Replace a policy range. */
24451da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
24461da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
24471da177e4SLinus Torvalds {
2448b22d127aSMel Gorman 	struct sp_node *n;
244942288fe3SMel Gorman 	struct sp_node *n_new = NULL;
245042288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2451b22d127aSMel Gorman 	int ret = 0;
24521da177e4SLinus Torvalds 
245342288fe3SMel Gorman restart:
24544a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
24551da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
24561da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
24571da177e4SLinus Torvalds 	while (n && n->start < end) {
24581da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
24591da177e4SLinus Torvalds 		if (n->start >= start) {
24601da177e4SLinus Torvalds 			if (n->end <= end)
24611da177e4SLinus Torvalds 				sp_delete(sp, n);
24621da177e4SLinus Torvalds 			else
24631da177e4SLinus Torvalds 				n->start = end;
24641da177e4SLinus Torvalds 		} else {
24651da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
24661da177e4SLinus Torvalds 			if (n->end > end) {
246742288fe3SMel Gorman 				if (!n_new)
246842288fe3SMel Gorman 					goto alloc_new;
246942288fe3SMel Gorman 
247042288fe3SMel Gorman 				*mpol_new = *n->policy;
247142288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
24727880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
24731da177e4SLinus Torvalds 				n->end = start;
24745ca39575SHillf Danton 				sp_insert(sp, n_new);
247542288fe3SMel Gorman 				n_new = NULL;
247642288fe3SMel Gorman 				mpol_new = NULL;
24771da177e4SLinus Torvalds 				break;
24781da177e4SLinus Torvalds 			} else
24791da177e4SLinus Torvalds 				n->end = start;
24801da177e4SLinus Torvalds 		}
24811da177e4SLinus Torvalds 		if (!next)
24821da177e4SLinus Torvalds 			break;
24831da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
24841da177e4SLinus Torvalds 	}
24851da177e4SLinus Torvalds 	if (new)
24861da177e4SLinus Torvalds 		sp_insert(sp, new);
24874a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
248842288fe3SMel Gorman 	ret = 0;
248942288fe3SMel Gorman 
249042288fe3SMel Gorman err_out:
249142288fe3SMel Gorman 	if (mpol_new)
249242288fe3SMel Gorman 		mpol_put(mpol_new);
249342288fe3SMel Gorman 	if (n_new)
249442288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
249542288fe3SMel Gorman 
2496b22d127aSMel Gorman 	return ret;
249742288fe3SMel Gorman 
249842288fe3SMel Gorman alloc_new:
24994a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
250042288fe3SMel Gorman 	ret = -ENOMEM;
250142288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
250242288fe3SMel Gorman 	if (!n_new)
250342288fe3SMel Gorman 		goto err_out;
250442288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
250542288fe3SMel Gorman 	if (!mpol_new)
250642288fe3SMel Gorman 		goto err_out;
250742288fe3SMel Gorman 	goto restart;
25081da177e4SLinus Torvalds }
25091da177e4SLinus Torvalds 
251071fe804bSLee Schermerhorn /**
251171fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
251271fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
251371fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
251471fe804bSLee Schermerhorn  *
251571fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
251671fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
251771fe804bSLee Schermerhorn  * This must be released on exit.
25184bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
251971fe804bSLee Schermerhorn  */
252071fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
25217339ff83SRobin Holt {
252258568d2aSMiao Xie 	int ret;
252358568d2aSMiao Xie 
252471fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
25254a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
25267339ff83SRobin Holt 
252771fe804bSLee Schermerhorn 	if (mpol) {
25287339ff83SRobin Holt 		struct vm_area_struct pvma;
252971fe804bSLee Schermerhorn 		struct mempolicy *new;
25304bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
25317339ff83SRobin Holt 
25324bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
25335c0c1654SLee Schermerhorn 			goto put_mpol;
253471fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
253571fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
253615d77835SLee Schermerhorn 		if (IS_ERR(new))
25370cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
253858568d2aSMiao Xie 
253958568d2aSMiao Xie 		task_lock(current);
25404bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
254158568d2aSMiao Xie 		task_unlock(current);
254215d77835SLee Schermerhorn 		if (ret)
25435c0c1654SLee Schermerhorn 			goto put_new;
254471fe804bSLee Schermerhorn 
254571fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
25462c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
254771fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
254871fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
254915d77835SLee Schermerhorn 
25505c0c1654SLee Schermerhorn put_new:
255171fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
25520cae3457SDan Carpenter free_scratch:
25534bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
25545c0c1654SLee Schermerhorn put_mpol:
25555c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
25567339ff83SRobin Holt 	}
25577339ff83SRobin Holt }
25587339ff83SRobin Holt 
25591da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
25601da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
25611da177e4SLinus Torvalds {
25621da177e4SLinus Torvalds 	int err;
25631da177e4SLinus Torvalds 	struct sp_node *new = NULL;
25641da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
25651da177e4SLinus Torvalds 
2566028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
25671da177e4SLinus Torvalds 		 vma->vm_pgoff,
256845c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2569028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
257000ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
25711da177e4SLinus Torvalds 
25721da177e4SLinus Torvalds 	if (npol) {
25731da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
25741da177e4SLinus Torvalds 		if (!new)
25751da177e4SLinus Torvalds 			return -ENOMEM;
25761da177e4SLinus Torvalds 	}
25771da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
25781da177e4SLinus Torvalds 	if (err && new)
257963f74ca2SKOSAKI Motohiro 		sp_free(new);
25801da177e4SLinus Torvalds 	return err;
25811da177e4SLinus Torvalds }
25821da177e4SLinus Torvalds 
25831da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
25841da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
25851da177e4SLinus Torvalds {
25861da177e4SLinus Torvalds 	struct sp_node *n;
25871da177e4SLinus Torvalds 	struct rb_node *next;
25881da177e4SLinus Torvalds 
25891da177e4SLinus Torvalds 	if (!p->root.rb_node)
25901da177e4SLinus Torvalds 		return;
25914a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
25921da177e4SLinus Torvalds 	next = rb_first(&p->root);
25931da177e4SLinus Torvalds 	while (next) {
25941da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25951da177e4SLinus Torvalds 		next = rb_next(&n->nd);
259663f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
25971da177e4SLinus Torvalds 	}
25984a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
25991da177e4SLinus Torvalds }
26001da177e4SLinus Torvalds 
26011a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2602c297663cSMel Gorman static int __initdata numabalancing_override;
26031a687c2eSMel Gorman 
26041a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
26051a687c2eSMel Gorman {
26061a687c2eSMel Gorman 	bool numabalancing_default = false;
26071a687c2eSMel Gorman 
26081a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
26091a687c2eSMel Gorman 		numabalancing_default = true;
26101a687c2eSMel Gorman 
2611c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2612c297663cSMel Gorman 	if (numabalancing_override)
2613c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2614c297663cSMel Gorman 
2615b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2616756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2617c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
26181a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
26191a687c2eSMel Gorman 	}
26201a687c2eSMel Gorman }
26211a687c2eSMel Gorman 
26221a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
26231a687c2eSMel Gorman {
26241a687c2eSMel Gorman 	int ret = 0;
26251a687c2eSMel Gorman 	if (!str)
26261a687c2eSMel Gorman 		goto out;
26271a687c2eSMel Gorman 
26281a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2629c297663cSMel Gorman 		numabalancing_override = 1;
26301a687c2eSMel Gorman 		ret = 1;
26311a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2632c297663cSMel Gorman 		numabalancing_override = -1;
26331a687c2eSMel Gorman 		ret = 1;
26341a687c2eSMel Gorman 	}
26351a687c2eSMel Gorman out:
26361a687c2eSMel Gorman 	if (!ret)
26374a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
26381a687c2eSMel Gorman 
26391a687c2eSMel Gorman 	return ret;
26401a687c2eSMel Gorman }
26411a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
26421a687c2eSMel Gorman #else
26431a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
26441a687c2eSMel Gorman {
26451a687c2eSMel Gorman }
26461a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
26471a687c2eSMel Gorman 
26481da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
26491da177e4SLinus Torvalds void __init numa_policy_init(void)
26501da177e4SLinus Torvalds {
2651b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2652b71636e2SPaul Mundt 	unsigned long largest = 0;
2653b71636e2SPaul Mundt 	int nid, prefer = 0;
2654b71636e2SPaul Mundt 
26551da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
26561da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
265720c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
26581da177e4SLinus Torvalds 
26591da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
26601da177e4SLinus Torvalds 				     sizeof(struct sp_node),
266120c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
26621da177e4SLinus Torvalds 
26635606e387SMel Gorman 	for_each_node(nid) {
26645606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
26655606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
26665606e387SMel Gorman 			.mode = MPOL_PREFERRED,
26675606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
26685606e387SMel Gorman 			.v = { .preferred_node = nid, },
26695606e387SMel Gorman 		};
26705606e387SMel Gorman 	}
26715606e387SMel Gorman 
2672b71636e2SPaul Mundt 	/*
2673b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2674b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2675b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2676b71636e2SPaul Mundt 	 */
2677b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
267801f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2679b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
26801da177e4SLinus Torvalds 
2681b71636e2SPaul Mundt 		/* Preserve the largest node */
2682b71636e2SPaul Mundt 		if (largest < total_pages) {
2683b71636e2SPaul Mundt 			largest = total_pages;
2684b71636e2SPaul Mundt 			prefer = nid;
2685b71636e2SPaul Mundt 		}
2686b71636e2SPaul Mundt 
2687b71636e2SPaul Mundt 		/* Interleave this node? */
2688b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2689b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2690b71636e2SPaul Mundt 	}
2691b71636e2SPaul Mundt 
2692b71636e2SPaul Mundt 	/* All too small, use the largest */
2693b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2694b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2695b71636e2SPaul Mundt 
2696028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2697b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
26981a687c2eSMel Gorman 
26991a687c2eSMel Gorman 	check_numabalancing_enable();
27001da177e4SLinus Torvalds }
27011da177e4SLinus Torvalds 
27028bccd85fSChristoph Lameter /* Reset policy of current process to default */
27031da177e4SLinus Torvalds void numa_default_policy(void)
27041da177e4SLinus Torvalds {
2705028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
27061da177e4SLinus Torvalds }
270768860ec1SPaul Jackson 
27084225399aSPaul Jackson /*
2709095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2710095f1fc4SLee Schermerhorn  */
2711095f1fc4SLee Schermerhorn 
2712095f1fc4SLee Schermerhorn /*
2713f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
27141a75a6c8SChristoph Lameter  */
2715345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2716345ace9cSLee Schermerhorn {
2717345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2718345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2719345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2720345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2721d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2722345ace9cSLee Schermerhorn };
27231a75a6c8SChristoph Lameter 
2724095f1fc4SLee Schermerhorn 
2725095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2726095f1fc4SLee Schermerhorn /**
2727f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2728095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
272971fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2730095f1fc4SLee Schermerhorn  *
2731095f1fc4SLee Schermerhorn  * Format of input:
2732095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2733095f1fc4SLee Schermerhorn  *
273471fe804bSLee Schermerhorn  * On success, returns 0, else 1
2735095f1fc4SLee Schermerhorn  */
2736a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2737095f1fc4SLee Schermerhorn {
273871fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2739f2a07f40SHugh Dickins 	unsigned short mode_flags;
274071fe804bSLee Schermerhorn 	nodemask_t nodes;
2741095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2742095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2743dedf2c73Szhong jiang 	int err = 1, mode;
2744095f1fc4SLee Schermerhorn 
2745095f1fc4SLee Schermerhorn 	if (nodelist) {
2746095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2747095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
274871fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2749095f1fc4SLee Schermerhorn 			goto out;
275001f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2751095f1fc4SLee Schermerhorn 			goto out;
275271fe804bSLee Schermerhorn 	} else
275371fe804bSLee Schermerhorn 		nodes_clear(nodes);
275471fe804bSLee Schermerhorn 
2755095f1fc4SLee Schermerhorn 	if (flags)
2756095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2757095f1fc4SLee Schermerhorn 
2758dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
2759dedf2c73Szhong jiang 	if (mode < 0)
2760095f1fc4SLee Schermerhorn 		goto out;
2761095f1fc4SLee Schermerhorn 
276271fe804bSLee Schermerhorn 	switch (mode) {
2763095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
276471fe804bSLee Schermerhorn 		/*
276571fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
276671fe804bSLee Schermerhorn 		 */
2767095f1fc4SLee Schermerhorn 		if (nodelist) {
2768095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2769095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2770095f1fc4SLee Schermerhorn 				rest++;
2771926f2ae0SKOSAKI Motohiro 			if (*rest)
2772926f2ae0SKOSAKI Motohiro 				goto out;
2773095f1fc4SLee Schermerhorn 		}
2774095f1fc4SLee Schermerhorn 		break;
2775095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2776095f1fc4SLee Schermerhorn 		/*
2777095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2778095f1fc4SLee Schermerhorn 		 */
2779095f1fc4SLee Schermerhorn 		if (!nodelist)
278001f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
27813f226aa1SLee Schermerhorn 		break;
278271fe804bSLee Schermerhorn 	case MPOL_LOCAL:
27833f226aa1SLee Schermerhorn 		/*
278471fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
27853f226aa1SLee Schermerhorn 		 */
278671fe804bSLee Schermerhorn 		if (nodelist)
27873f226aa1SLee Schermerhorn 			goto out;
278871fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
27893f226aa1SLee Schermerhorn 		break;
2790413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2791413b43deSRavikiran G Thirumalai 		/*
2792413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2793413b43deSRavikiran G Thirumalai 		 */
2794413b43deSRavikiran G Thirumalai 		if (!nodelist)
2795413b43deSRavikiran G Thirumalai 			err = 0;
2796413b43deSRavikiran G Thirumalai 		goto out;
2797d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
279871fe804bSLee Schermerhorn 		/*
2799d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
280071fe804bSLee Schermerhorn 		 */
2801d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2802d69b2e63SKOSAKI Motohiro 			goto out;
2803095f1fc4SLee Schermerhorn 	}
2804095f1fc4SLee Schermerhorn 
280571fe804bSLee Schermerhorn 	mode_flags = 0;
2806095f1fc4SLee Schermerhorn 	if (flags) {
2807095f1fc4SLee Schermerhorn 		/*
2808095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2809095f1fc4SLee Schermerhorn 		 * mode flags.
2810095f1fc4SLee Schermerhorn 		 */
2811095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
281271fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2813095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
281471fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2815095f1fc4SLee Schermerhorn 		else
2816926f2ae0SKOSAKI Motohiro 			goto out;
2817095f1fc4SLee Schermerhorn 	}
281871fe804bSLee Schermerhorn 
281971fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
282071fe804bSLee Schermerhorn 	if (IS_ERR(new))
2821926f2ae0SKOSAKI Motohiro 		goto out;
2822926f2ae0SKOSAKI Motohiro 
2823f2a07f40SHugh Dickins 	/*
2824f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2825f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2826f2a07f40SHugh Dickins 	 */
2827f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2828f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2829f2a07f40SHugh Dickins 	else if (nodelist)
2830f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2831f2a07f40SHugh Dickins 	else
2832f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2833f2a07f40SHugh Dickins 
2834f2a07f40SHugh Dickins 	/*
2835f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2836f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2837f2a07f40SHugh Dickins 	 */
2838e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2839f2a07f40SHugh Dickins 
2840926f2ae0SKOSAKI Motohiro 	err = 0;
284171fe804bSLee Schermerhorn 
2842095f1fc4SLee Schermerhorn out:
2843095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2844095f1fc4SLee Schermerhorn 	if (nodelist)
2845095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2846095f1fc4SLee Schermerhorn 	if (flags)
2847095f1fc4SLee Schermerhorn 		*--flags = '=';
284871fe804bSLee Schermerhorn 	if (!err)
284971fe804bSLee Schermerhorn 		*mpol = new;
2850095f1fc4SLee Schermerhorn 	return err;
2851095f1fc4SLee Schermerhorn }
2852095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2853095f1fc4SLee Schermerhorn 
285471fe804bSLee Schermerhorn /**
285571fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
285671fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
285771fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
285871fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
285971fe804bSLee Schermerhorn  *
2860948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2861948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2862948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
28631a75a6c8SChristoph Lameter  */
2864948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
28651a75a6c8SChristoph Lameter {
28661a75a6c8SChristoph Lameter 	char *p = buffer;
2867948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2868948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2869948927eeSDavid Rientjes 	unsigned short flags = 0;
28701a75a6c8SChristoph Lameter 
28718790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2872bea904d5SLee Schermerhorn 		mode = pol->mode;
2873948927eeSDavid Rientjes 		flags = pol->flags;
2874948927eeSDavid Rientjes 	}
2875bea904d5SLee Schermerhorn 
28761a75a6c8SChristoph Lameter 	switch (mode) {
28771a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
28781a75a6c8SChristoph Lameter 		break;
28791a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2880fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2881f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
288253f2556bSLee Schermerhorn 		else
2883fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
28841a75a6c8SChristoph Lameter 		break;
28851a75a6c8SChristoph Lameter 	case MPOL_BIND:
28861a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
28871a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
28881a75a6c8SChristoph Lameter 		break;
28891a75a6c8SChristoph Lameter 	default:
2890948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2891948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2892948927eeSDavid Rientjes 		return;
28931a75a6c8SChristoph Lameter 	}
28941a75a6c8SChristoph Lameter 
2895b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
28961a75a6c8SChristoph Lameter 
2897fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2898948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2899f5b087b5SDavid Rientjes 
29002291990aSLee Schermerhorn 		/*
29012291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
29022291990aSLee Schermerhorn 		 */
2903f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
29042291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
29052291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
29062291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2907f5b087b5SDavid Rientjes 	}
2908f5b087b5SDavid Rientjes 
29099e763e0fSTejun Heo 	if (!nodes_empty(nodes))
29109e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
29119e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
29121a75a6c8SChristoph Lameter }
2913