xref: /openbmc/linux/mm/mempolicy.c (revision 04ec6264f28793e56114d0a367bb4d3af667ab6a)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/mm.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
766e84f315SIngo Molnar #include <linux/sched/mm.h>
776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
78f719ff9bSIngo Molnar #include <linux/sched/task.h>
791da177e4SLinus Torvalds #include <linux/nodemask.h>
801da177e4SLinus Torvalds #include <linux/cpuset.h>
811da177e4SLinus Torvalds #include <linux/slab.h>
821da177e4SLinus Torvalds #include <linux/string.h>
83b95f1b31SPaul Gortmaker #include <linux/export.h>
84b488893aSPavel Emelyanov #include <linux/nsproxy.h>
851da177e4SLinus Torvalds #include <linux/interrupt.h>
861da177e4SLinus Torvalds #include <linux/init.h>
871da177e4SLinus Torvalds #include <linux/compat.h>
88dc9aa5b9SChristoph Lameter #include <linux/swap.h>
891a75a6c8SChristoph Lameter #include <linux/seq_file.h>
901a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
91b20a3503SChristoph Lameter #include <linux/migrate.h>
9262b61f61SHugh Dickins #include <linux/ksm.h>
9395a402c3SChristoph Lameter #include <linux/rmap.h>
9486c3a764SDavid Quigley #include <linux/security.h>
95dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
96095f1fc4SLee Schermerhorn #include <linux/ctype.h>
976d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
98b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
99b1de0d13SMitchel Humpherys #include <linux/printk.h>
100dc9aa5b9SChristoph Lameter 
1011da177e4SLinus Torvalds #include <asm/tlbflush.h>
1027c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1031da177e4SLinus Torvalds 
10462695a84SNick Piggin #include "internal.h"
10562695a84SNick Piggin 
10638e35860SChristoph Lameter /* Internal flags */
107dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10838e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
109dc9aa5b9SChristoph Lameter 
110fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
111fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1121da177e4SLinus Torvalds 
1131da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1141da177e4SLinus Torvalds    policied. */
1156267276fSChristoph Lameter enum zone_type policy_zone = 0;
1161da177e4SLinus Torvalds 
117bea904d5SLee Schermerhorn /*
118bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
119bea904d5SLee Schermerhorn  */
120e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1211da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
122bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
123fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1241da177e4SLinus Torvalds };
1251da177e4SLinus Torvalds 
1265606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1275606e387SMel Gorman 
12874d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1295606e387SMel Gorman {
1305606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
131f15ca78eSOleg Nesterov 	int node;
1325606e387SMel Gorman 
133f15ca78eSOleg Nesterov 	if (pol)
134f15ca78eSOleg Nesterov 		return pol;
1355606e387SMel Gorman 
136f15ca78eSOleg Nesterov 	node = numa_node_id();
1371da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1381da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
139f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
140f15ca78eSOleg Nesterov 		if (pol->mode)
141f15ca78eSOleg Nesterov 			return pol;
1421da6f0e1SJianguo Wu 	}
1435606e387SMel Gorman 
144f15ca78eSOleg Nesterov 	return &default_policy;
1455606e387SMel Gorman }
1465606e387SMel Gorman 
14737012946SDavid Rientjes static const struct mempolicy_operations {
14837012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
149708c1bbcSMiao Xie 	/*
150708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
151708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
152708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
153708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
154708c1bbcSMiao Xie 	 * page.
155708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
156708c1bbcSMiao Xie 	 * rebind directly.
157708c1bbcSMiao Xie 	 *
158708c1bbcSMiao Xie 	 * step:
159708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
160708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
161708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
162708c1bbcSMiao Xie 	 */
163708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
164708c1bbcSMiao Xie 			enum mpol_rebind_step step);
16537012946SDavid Rientjes } mpol_ops[MPOL_MAX];
16637012946SDavid Rientjes 
167f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
168f5b087b5SDavid Rientjes {
1696d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1704c50bc01SDavid Rientjes }
1714c50bc01SDavid Rientjes 
1724c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1734c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1744c50bc01SDavid Rientjes {
1754c50bc01SDavid Rientjes 	nodemask_t tmp;
1764c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1774c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
178f5b087b5SDavid Rientjes }
179f5b087b5SDavid Rientjes 
18037012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
18137012946SDavid Rientjes {
18237012946SDavid Rientjes 	if (nodes_empty(*nodes))
18337012946SDavid Rientjes 		return -EINVAL;
18437012946SDavid Rientjes 	pol->v.nodes = *nodes;
18537012946SDavid Rientjes 	return 0;
18637012946SDavid Rientjes }
18737012946SDavid Rientjes 
18837012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
18937012946SDavid Rientjes {
19037012946SDavid Rientjes 	if (!nodes)
191fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
19237012946SDavid Rientjes 	else if (nodes_empty(*nodes))
19337012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
19437012946SDavid Rientjes 	else
19537012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
19637012946SDavid Rientjes 	return 0;
19737012946SDavid Rientjes }
19837012946SDavid Rientjes 
19937012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
20037012946SDavid Rientjes {
201859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
20237012946SDavid Rientjes 		return -EINVAL;
20337012946SDavid Rientjes 	pol->v.nodes = *nodes;
20437012946SDavid Rientjes 	return 0;
20537012946SDavid Rientjes }
20637012946SDavid Rientjes 
20758568d2aSMiao Xie /*
20858568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
20958568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
21058568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
21158568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
21258568d2aSMiao Xie  *
21358568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
21458568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
21558568d2aSMiao Xie  */
2164bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2174bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
21858568d2aSMiao Xie {
21958568d2aSMiao Xie 	int ret;
22058568d2aSMiao Xie 
22158568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
22258568d2aSMiao Xie 	if (pol == NULL)
22358568d2aSMiao Xie 		return 0;
22401f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2254bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
22601f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
22758568d2aSMiao Xie 
22858568d2aSMiao Xie 	VM_BUG_ON(!nodes);
22958568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
23058568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
23158568d2aSMiao Xie 	else {
23258568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2334bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
23458568d2aSMiao Xie 		else
2354bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2364bfc4495SKAMEZAWA Hiroyuki 
23758568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
23858568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
23958568d2aSMiao Xie 		else
24058568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
24158568d2aSMiao Xie 						cpuset_current_mems_allowed;
24258568d2aSMiao Xie 	}
24358568d2aSMiao Xie 
2444bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2454bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2464bfc4495SKAMEZAWA Hiroyuki 	else
2474bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
24858568d2aSMiao Xie 	return ret;
24958568d2aSMiao Xie }
25058568d2aSMiao Xie 
25158568d2aSMiao Xie /*
25258568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25358568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
25458568d2aSMiao Xie  */
255028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
256028fec41SDavid Rientjes 				  nodemask_t *nodes)
2571da177e4SLinus Torvalds {
2581da177e4SLinus Torvalds 	struct mempolicy *policy;
2591da177e4SLinus Torvalds 
260028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26100ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
262140d5a49SPaul Mundt 
2633e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2643e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
26537012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
266d3a71033SLee Schermerhorn 		return NULL;
26737012946SDavid Rientjes 	}
2683e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2693e1f0645SDavid Rientjes 
2703e1f0645SDavid Rientjes 	/*
2713e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2723e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2733e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2743e1f0645SDavid Rientjes 	 */
2753e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2763e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2773e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2783e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2793e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2803e1f0645SDavid Rientjes 		}
281479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2828d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2838d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2848d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
285479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
286479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2873e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2883e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2891da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2901da177e4SLinus Torvalds 	if (!policy)
2911da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2921da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
29345c4745aSLee Schermerhorn 	policy->mode = mode;
29437012946SDavid Rientjes 	policy->flags = flags;
2953e1f0645SDavid Rientjes 
29637012946SDavid Rientjes 	return policy;
29737012946SDavid Rientjes }
29837012946SDavid Rientjes 
29952cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30052cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30152cd3b07SLee Schermerhorn {
30252cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
30352cd3b07SLee Schermerhorn 		return;
30452cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
30552cd3b07SLee Schermerhorn }
30652cd3b07SLee Schermerhorn 
307708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
308708c1bbcSMiao Xie 				enum mpol_rebind_step step)
30937012946SDavid Rientjes {
31037012946SDavid Rientjes }
31137012946SDavid Rientjes 
312708c1bbcSMiao Xie /*
313708c1bbcSMiao Xie  * step:
314708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
315708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
316708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
317708c1bbcSMiao Xie  */
318708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
319708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3201d0d2680SDavid Rientjes {
3211d0d2680SDavid Rientjes 	nodemask_t tmp;
3221d0d2680SDavid Rientjes 
32337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32437012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32537012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32637012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3271d0d2680SDavid Rientjes 	else {
328708c1bbcSMiao Xie 		/*
329708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
330708c1bbcSMiao Xie 		 * result
331708c1bbcSMiao Xie 		 */
332708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
333708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
334708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
335708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
336708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
337708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
33837012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
339708c1bbcSMiao Xie 		} else
340708c1bbcSMiao Xie 			BUG();
3411d0d2680SDavid Rientjes 	}
34237012946SDavid Rientjes 
343708c1bbcSMiao Xie 	if (nodes_empty(tmp))
344708c1bbcSMiao Xie 		tmp = *nodes;
345708c1bbcSMiao Xie 
346708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
347708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
348708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3491d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
350708c1bbcSMiao Xie 	else
351708c1bbcSMiao Xie 		BUG();
35237012946SDavid Rientjes }
35337012946SDavid Rientjes 
35437012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
355708c1bbcSMiao Xie 				  const nodemask_t *nodes,
356708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
35737012946SDavid Rientjes {
35837012946SDavid Rientjes 	nodemask_t tmp;
35937012946SDavid Rientjes 
36037012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3611d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3621d0d2680SDavid Rientjes 
363fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3641d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
365fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
366fc36b8d3SLee Schermerhorn 		} else
367fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
36837012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
36937012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3701d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
371fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3721d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
37337012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
37437012946SDavid Rientjes 						   *nodes);
37537012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3761d0d2680SDavid Rientjes 	}
3771d0d2680SDavid Rientjes }
37837012946SDavid Rientjes 
379708c1bbcSMiao Xie /*
380708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
381708c1bbcSMiao Xie  *
382708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
383708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
384708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
385708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
386708c1bbcSMiao Xie  * page.
387708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
388708c1bbcSMiao Xie  * rebind directly.
389708c1bbcSMiao Xie  *
390708c1bbcSMiao Xie  * step:
391708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
392708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
393708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
394708c1bbcSMiao Xie  */
395708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
396708c1bbcSMiao Xie 				enum mpol_rebind_step step)
39737012946SDavid Rientjes {
39837012946SDavid Rientjes 	if (!pol)
39937012946SDavid Rientjes 		return;
40089c522c7SWang Sheng-Hui 	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
40137012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
40237012946SDavid Rientjes 		return;
403708c1bbcSMiao Xie 
404708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
405708c1bbcSMiao Xie 		return;
406708c1bbcSMiao Xie 
407708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
408708c1bbcSMiao Xie 		BUG();
409708c1bbcSMiao Xie 
410708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
411708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
412708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
413708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
414708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
415708c1bbcSMiao Xie 		BUG();
416708c1bbcSMiao Xie 
417708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4181d0d2680SDavid Rientjes }
4191d0d2680SDavid Rientjes 
4201d0d2680SDavid Rientjes /*
4211d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4221d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
42358568d2aSMiao Xie  *
42458568d2aSMiao Xie  * Called with task's alloc_lock held.
4251d0d2680SDavid Rientjes  */
4261d0d2680SDavid Rientjes 
427708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
428708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4291d0d2680SDavid Rientjes {
430708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4311d0d2680SDavid Rientjes }
4321d0d2680SDavid Rientjes 
4331d0d2680SDavid Rientjes /*
4341d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4351d0d2680SDavid Rientjes  *
4361d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4371d0d2680SDavid Rientjes  */
4381d0d2680SDavid Rientjes 
4391d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4401d0d2680SDavid Rientjes {
4411d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4421d0d2680SDavid Rientjes 
4431d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4441d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
445708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4461d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4471d0d2680SDavid Rientjes }
4481d0d2680SDavid Rientjes 
44937012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
45037012946SDavid Rientjes 	[MPOL_DEFAULT] = {
45137012946SDavid Rientjes 		.rebind = mpol_rebind_default,
45237012946SDavid Rientjes 	},
45337012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
45437012946SDavid Rientjes 		.create = mpol_new_interleave,
45537012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
45637012946SDavid Rientjes 	},
45737012946SDavid Rientjes 	[MPOL_PREFERRED] = {
45837012946SDavid Rientjes 		.create = mpol_new_preferred,
45937012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
46037012946SDavid Rientjes 	},
46137012946SDavid Rientjes 	[MPOL_BIND] = {
46237012946SDavid Rientjes 		.create = mpol_new_bind,
46337012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46437012946SDavid Rientjes 	},
46537012946SDavid Rientjes };
46637012946SDavid Rientjes 
467fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
468fc301289SChristoph Lameter 				unsigned long flags);
4691a75a6c8SChristoph Lameter 
4706f4576e3SNaoya Horiguchi struct queue_pages {
4716f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4726f4576e3SNaoya Horiguchi 	unsigned long flags;
4736f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
4746f4576e3SNaoya Horiguchi 	struct vm_area_struct *prev;
4756f4576e3SNaoya Horiguchi };
4766f4576e3SNaoya Horiguchi 
47798094945SNaoya Horiguchi /*
47898094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
47998094945SNaoya Horiguchi  * and move them to the pagelist if they do.
48098094945SNaoya Horiguchi  */
4816f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4826f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
4831da177e4SLinus Torvalds {
4846f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
4856f4576e3SNaoya Horiguchi 	struct page *page;
4866f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
4876f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
488248db92dSKirill A. Shutemov 	int nid, ret;
48991612e0dSHugh Dickins 	pte_t *pte;
490705e87c0SHugh Dickins 	spinlock_t *ptl;
491941150a3SHugh Dickins 
492248db92dSKirill A. Shutemov 	if (pmd_trans_huge(*pmd)) {
493248db92dSKirill A. Shutemov 		ptl = pmd_lock(walk->mm, pmd);
494248db92dSKirill A. Shutemov 		if (pmd_trans_huge(*pmd)) {
495248db92dSKirill A. Shutemov 			page = pmd_page(*pmd);
496248db92dSKirill A. Shutemov 			if (is_huge_zero_page(page)) {
497248db92dSKirill A. Shutemov 				spin_unlock(ptl);
498fd60775aSDavid Rientjes 				__split_huge_pmd(vma, pmd, addr, false, NULL);
499248db92dSKirill A. Shutemov 			} else {
500248db92dSKirill A. Shutemov 				get_page(page);
501248db92dSKirill A. Shutemov 				spin_unlock(ptl);
502248db92dSKirill A. Shutemov 				lock_page(page);
503248db92dSKirill A. Shutemov 				ret = split_huge_page(page);
504248db92dSKirill A. Shutemov 				unlock_page(page);
505248db92dSKirill A. Shutemov 				put_page(page);
506248db92dSKirill A. Shutemov 				if (ret)
5076f4576e3SNaoya Horiguchi 					return 0;
508248db92dSKirill A. Shutemov 			}
509248db92dSKirill A. Shutemov 		} else {
510248db92dSKirill A. Shutemov 			spin_unlock(ptl);
511248db92dSKirill A. Shutemov 		}
512248db92dSKirill A. Shutemov 	}
51391612e0dSHugh Dickins 
514337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
515337d9abfSNaoya Horiguchi 		return 0;
516248db92dSKirill A. Shutemov retry:
5176f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5186f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
51991612e0dSHugh Dickins 		if (!pte_present(*pte))
52091612e0dSHugh Dickins 			continue;
5216aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5226aab341eSLinus Torvalds 		if (!page)
52391612e0dSHugh Dickins 			continue;
524053837fcSNick Piggin 		/*
52562b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
52662b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
527053837fcSNick Piggin 		 */
528b79bc0a0SHugh Dickins 		if (PageReserved(page))
529f4598c8bSChristoph Lameter 			continue;
5306aab341eSLinus Torvalds 		nid = page_to_nid(page);
5316f4576e3SNaoya Horiguchi 		if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
53238e35860SChristoph Lameter 			continue;
533800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
534248db92dSKirill A. Shutemov 			get_page(page);
535248db92dSKirill A. Shutemov 			pte_unmap_unlock(pte, ptl);
536248db92dSKirill A. Shutemov 			lock_page(page);
537248db92dSKirill A. Shutemov 			ret = split_huge_page(page);
538248db92dSKirill A. Shutemov 			unlock_page(page);
539248db92dSKirill A. Shutemov 			put_page(page);
540248db92dSKirill A. Shutemov 			/* Failed to split -- skip. */
541248db92dSKirill A. Shutemov 			if (ret) {
542248db92dSKirill A. Shutemov 				pte = pte_offset_map_lock(walk->mm, pmd,
543248db92dSKirill A. Shutemov 						addr, &ptl);
544248db92dSKirill A. Shutemov 				continue;
545248db92dSKirill A. Shutemov 			}
546248db92dSKirill A. Shutemov 			goto retry;
547248db92dSKirill A. Shutemov 		}
54838e35860SChristoph Lameter 
5496f4576e3SNaoya Horiguchi 		migrate_page_add(page, qp->pagelist, flags);
5506f4576e3SNaoya Horiguchi 	}
5516f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5526f4576e3SNaoya Horiguchi 	cond_resched();
5536f4576e3SNaoya Horiguchi 	return 0;
55491612e0dSHugh Dickins }
55591612e0dSHugh Dickins 
5566f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5576f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5586f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
559e2d8cf40SNaoya Horiguchi {
560e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5616f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5626f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
563e2d8cf40SNaoya Horiguchi 	int nid;
564e2d8cf40SNaoya Horiguchi 	struct page *page;
565cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
566d4c54919SNaoya Horiguchi 	pte_t entry;
567e2d8cf40SNaoya Horiguchi 
5686f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5696f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
570d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
571d4c54919SNaoya Horiguchi 		goto unlock;
572d4c54919SNaoya Horiguchi 	page = pte_page(entry);
573e2d8cf40SNaoya Horiguchi 	nid = page_to_nid(page);
5746f4576e3SNaoya Horiguchi 	if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
575e2d8cf40SNaoya Horiguchi 		goto unlock;
576e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
577e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
578e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
5796f4576e3SNaoya Horiguchi 		isolate_huge_page(page, qp->pagelist);
580e2d8cf40SNaoya Horiguchi unlock:
581cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
582e2d8cf40SNaoya Horiguchi #else
583e2d8cf40SNaoya Horiguchi 	BUG();
584e2d8cf40SNaoya Horiguchi #endif
58591612e0dSHugh Dickins 	return 0;
5861da177e4SLinus Torvalds }
5871da177e4SLinus Torvalds 
5885877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
589b24f53a0SLee Schermerhorn /*
5904b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5914b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5924b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5934b10e7d5SMel Gorman  *
5944b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
5954b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
5964b10e7d5SMel Gorman  * changes to the core.
597b24f53a0SLee Schermerhorn  */
5984b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
5994b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
600b24f53a0SLee Schermerhorn {
6014b10e7d5SMel Gorman 	int nr_updated;
602b24f53a0SLee Schermerhorn 
6034d942466SMel Gorman 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
60403c5a6e1SMel Gorman 	if (nr_updated)
60503c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
606b24f53a0SLee Schermerhorn 
6074b10e7d5SMel Gorman 	return nr_updated;
608b24f53a0SLee Schermerhorn }
609b24f53a0SLee Schermerhorn #else
610b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
611b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
612b24f53a0SLee Schermerhorn {
613b24f53a0SLee Schermerhorn 	return 0;
614b24f53a0SLee Schermerhorn }
6155877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
616b24f53a0SLee Schermerhorn 
6176f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6186f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6191da177e4SLinus Torvalds {
6206f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6216f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6225b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6236f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
624dc9aa5b9SChristoph Lameter 
62577bf45e7SKirill A. Shutemov 	if (!vma_migratable(vma))
62648684a65SNaoya Horiguchi 		return 1;
62748684a65SNaoya Horiguchi 
6285b952b3cSAndi Kleen 	if (endvma > end)
6295b952b3cSAndi Kleen 		endvma = end;
6305b952b3cSAndi Kleen 	if (vma->vm_start > start)
6315b952b3cSAndi Kleen 		start = vma->vm_start;
632b24f53a0SLee Schermerhorn 
633b24f53a0SLee Schermerhorn 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
634b24f53a0SLee Schermerhorn 		if (!vma->vm_next && vma->vm_end < end)
635d05f0cdcSHugh Dickins 			return -EFAULT;
6366f4576e3SNaoya Horiguchi 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
637d05f0cdcSHugh Dickins 			return -EFAULT;
638b24f53a0SLee Schermerhorn 	}
639b24f53a0SLee Schermerhorn 
6406f4576e3SNaoya Horiguchi 	qp->prev = vma;
6416f4576e3SNaoya Horiguchi 
642b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6432c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6444355c018SLiang Chen 		if (!is_vm_hugetlb_page(vma) &&
6454355c018SLiang Chen 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
6464355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
647b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6486f4576e3SNaoya Horiguchi 		return 1;
649b24f53a0SLee Schermerhorn 	}
650b24f53a0SLee Schermerhorn 
6516f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
65277bf45e7SKirill A. Shutemov 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6536f4576e3SNaoya Horiguchi 		return 0;
6546f4576e3SNaoya Horiguchi 	return 1;
6556f4576e3SNaoya Horiguchi }
656b24f53a0SLee Schermerhorn 
6576f4576e3SNaoya Horiguchi /*
6586f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
6596f4576e3SNaoya Horiguchi  *
6606f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
6616f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
6626f4576e3SNaoya Horiguchi  * passed via @private.)
6636f4576e3SNaoya Horiguchi  */
6646f4576e3SNaoya Horiguchi static int
6656f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6666f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
6676f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
6686f4576e3SNaoya Horiguchi {
6696f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
6706f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
6716f4576e3SNaoya Horiguchi 		.flags = flags,
6726f4576e3SNaoya Horiguchi 		.nmask = nodes,
6736f4576e3SNaoya Horiguchi 		.prev = NULL,
6746f4576e3SNaoya Horiguchi 	};
6756f4576e3SNaoya Horiguchi 	struct mm_walk queue_pages_walk = {
6766f4576e3SNaoya Horiguchi 		.hugetlb_entry = queue_pages_hugetlb,
6776f4576e3SNaoya Horiguchi 		.pmd_entry = queue_pages_pte_range,
6786f4576e3SNaoya Horiguchi 		.test_walk = queue_pages_test_walk,
6796f4576e3SNaoya Horiguchi 		.mm = mm,
6806f4576e3SNaoya Horiguchi 		.private = &qp,
6816f4576e3SNaoya Horiguchi 	};
6826f4576e3SNaoya Horiguchi 
6836f4576e3SNaoya Horiguchi 	return walk_page_range(start, end, &queue_pages_walk);
6841da177e4SLinus Torvalds }
6851da177e4SLinus Torvalds 
686869833f2SKOSAKI Motohiro /*
687869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
688869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
689869833f2SKOSAKI Motohiro  */
690869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
691869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6928d34694cSKOSAKI Motohiro {
693869833f2SKOSAKI Motohiro 	int err;
694869833f2SKOSAKI Motohiro 	struct mempolicy *old;
695869833f2SKOSAKI Motohiro 	struct mempolicy *new;
6968d34694cSKOSAKI Motohiro 
6978d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
6988d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
6998d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7008d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7018d34694cSKOSAKI Motohiro 
702869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
703869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
704869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
705869833f2SKOSAKI Motohiro 
706869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7078d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
708869833f2SKOSAKI Motohiro 		if (err)
709869833f2SKOSAKI Motohiro 			goto err_out;
7108d34694cSKOSAKI Motohiro 	}
711869833f2SKOSAKI Motohiro 
712869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
713869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
714869833f2SKOSAKI Motohiro 	mpol_put(old);
715869833f2SKOSAKI Motohiro 
716869833f2SKOSAKI Motohiro 	return 0;
717869833f2SKOSAKI Motohiro  err_out:
718869833f2SKOSAKI Motohiro 	mpol_put(new);
7198d34694cSKOSAKI Motohiro 	return err;
7208d34694cSKOSAKI Motohiro }
7218d34694cSKOSAKI Motohiro 
7221da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7239d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7249d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7251da177e4SLinus Torvalds {
7261da177e4SLinus Torvalds 	struct vm_area_struct *next;
7279d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7289d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7299d8cebd4SKOSAKI Motohiro 	int err = 0;
730e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7319d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7329d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7331da177e4SLinus Torvalds 
734097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7359d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7369d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7379d8cebd4SKOSAKI Motohiro 
738097d5910SLinus Torvalds 	prev = vma->vm_prev;
739e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
740e26a5114SKOSAKI Motohiro 		prev = vma;
741e26a5114SKOSAKI Motohiro 
7429d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7431da177e4SLinus Torvalds 		next = vma->vm_next;
7449d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7459d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7469d8cebd4SKOSAKI Motohiro 
747e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
748e26a5114SKOSAKI Motohiro 			continue;
749e26a5114SKOSAKI Motohiro 
750e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
751e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7529d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
753e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
75419a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
7559d8cebd4SKOSAKI Motohiro 		if (prev) {
7569d8cebd4SKOSAKI Motohiro 			vma = prev;
7579d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7583964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7599d8cebd4SKOSAKI Motohiro 				continue;
7603964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7613964acd0SOleg Nesterov 			goto replace;
7621da177e4SLinus Torvalds 		}
7639d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7649d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7659d8cebd4SKOSAKI Motohiro 			if (err)
7669d8cebd4SKOSAKI Motohiro 				goto out;
7679d8cebd4SKOSAKI Motohiro 		}
7689d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7699d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7709d8cebd4SKOSAKI Motohiro 			if (err)
7719d8cebd4SKOSAKI Motohiro 				goto out;
7729d8cebd4SKOSAKI Motohiro 		}
7733964acd0SOleg Nesterov  replace:
774869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7759d8cebd4SKOSAKI Motohiro 		if (err)
7769d8cebd4SKOSAKI Motohiro 			goto out;
7779d8cebd4SKOSAKI Motohiro 	}
7789d8cebd4SKOSAKI Motohiro 
7799d8cebd4SKOSAKI Motohiro  out:
7801da177e4SLinus Torvalds 	return err;
7811da177e4SLinus Torvalds }
7821da177e4SLinus Torvalds 
7831da177e4SLinus Torvalds /* Set the process memory policy */
784028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
785028fec41SDavid Rientjes 			     nodemask_t *nodes)
7861da177e4SLinus Torvalds {
78758568d2aSMiao Xie 	struct mempolicy *new, *old;
7884bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
78958568d2aSMiao Xie 	int ret;
7901da177e4SLinus Torvalds 
7914bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7924bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
793f4e53d91SLee Schermerhorn 
7944bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
7954bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
7964bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
7974bfc4495SKAMEZAWA Hiroyuki 		goto out;
7984bfc4495SKAMEZAWA Hiroyuki 	}
7992c7c3a7dSOleg Nesterov 
80058568d2aSMiao Xie 	task_lock(current);
8014bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
80258568d2aSMiao Xie 	if (ret) {
80358568d2aSMiao Xie 		task_unlock(current);
80458568d2aSMiao Xie 		mpol_put(new);
8054bfc4495SKAMEZAWA Hiroyuki 		goto out;
80658568d2aSMiao Xie 	}
80758568d2aSMiao Xie 	old = current->mempolicy;
8081da177e4SLinus Torvalds 	current->mempolicy = new;
80945816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
81045816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
81158568d2aSMiao Xie 	task_unlock(current);
81258568d2aSMiao Xie 	mpol_put(old);
8134bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8144bfc4495SKAMEZAWA Hiroyuki out:
8154bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8164bfc4495SKAMEZAWA Hiroyuki 	return ret;
8171da177e4SLinus Torvalds }
8181da177e4SLinus Torvalds 
819bea904d5SLee Schermerhorn /*
820bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
82158568d2aSMiao Xie  *
82258568d2aSMiao Xie  * Called with task's alloc_lock held
823bea904d5SLee Schermerhorn  */
824bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8251da177e4SLinus Torvalds {
826dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
827bea904d5SLee Schermerhorn 	if (p == &default_policy)
828bea904d5SLee Schermerhorn 		return;
829bea904d5SLee Schermerhorn 
83045c4745aSLee Schermerhorn 	switch (p->mode) {
83119770b32SMel Gorman 	case MPOL_BIND:
83219770b32SMel Gorman 		/* Fall through */
8331da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
834dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8351da177e4SLinus Torvalds 		break;
8361da177e4SLinus Torvalds 	case MPOL_PREFERRED:
837fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
838dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
83953f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8401da177e4SLinus Torvalds 		break;
8411da177e4SLinus Torvalds 	default:
8421da177e4SLinus Torvalds 		BUG();
8431da177e4SLinus Torvalds 	}
8441da177e4SLinus Torvalds }
8451da177e4SLinus Torvalds 
846d4edcf0dSDave Hansen static int lookup_node(unsigned long addr)
8471da177e4SLinus Torvalds {
8481da177e4SLinus Torvalds 	struct page *p;
8491da177e4SLinus Torvalds 	int err;
8501da177e4SLinus Torvalds 
851768ae309SLorenzo Stoakes 	err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
8521da177e4SLinus Torvalds 	if (err >= 0) {
8531da177e4SLinus Torvalds 		err = page_to_nid(p);
8541da177e4SLinus Torvalds 		put_page(p);
8551da177e4SLinus Torvalds 	}
8561da177e4SLinus Torvalds 	return err;
8571da177e4SLinus Torvalds }
8581da177e4SLinus Torvalds 
8591da177e4SLinus Torvalds /* Retrieve NUMA policy */
860dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8611da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8621da177e4SLinus Torvalds {
8638bccd85fSChristoph Lameter 	int err;
8641da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8651da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8661da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8671da177e4SLinus Torvalds 
868754af6f5SLee Schermerhorn 	if (flags &
869754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8701da177e4SLinus Torvalds 		return -EINVAL;
871754af6f5SLee Schermerhorn 
872754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
873754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
874754af6f5SLee Schermerhorn 			return -EINVAL;
875754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
87658568d2aSMiao Xie 		task_lock(current);
877754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
87858568d2aSMiao Xie 		task_unlock(current);
879754af6f5SLee Schermerhorn 		return 0;
880754af6f5SLee Schermerhorn 	}
881754af6f5SLee Schermerhorn 
8821da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
883bea904d5SLee Schermerhorn 		/*
884bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
885bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
886bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
887bea904d5SLee Schermerhorn 		 */
8881da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8891da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
8901da177e4SLinus Torvalds 		if (!vma) {
8911da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
8921da177e4SLinus Torvalds 			return -EFAULT;
8931da177e4SLinus Torvalds 		}
8941da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
8951da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
8961da177e4SLinus Torvalds 		else
8971da177e4SLinus Torvalds 			pol = vma->vm_policy;
8981da177e4SLinus Torvalds 	} else if (addr)
8991da177e4SLinus Torvalds 		return -EINVAL;
9001da177e4SLinus Torvalds 
9011da177e4SLinus Torvalds 	if (!pol)
902bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9031da177e4SLinus Torvalds 
9041da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9051da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
906d4edcf0dSDave Hansen 			err = lookup_node(addr);
9071da177e4SLinus Torvalds 			if (err < 0)
9081da177e4SLinus Torvalds 				goto out;
9098bccd85fSChristoph Lameter 			*policy = err;
9101da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
91145c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
91245816682SVlastimil Babka 			*policy = next_node_in(current->il_prev, pol->v.nodes);
9131da177e4SLinus Torvalds 		} else {
9141da177e4SLinus Torvalds 			err = -EINVAL;
9151da177e4SLinus Torvalds 			goto out;
9161da177e4SLinus Torvalds 		}
917bea904d5SLee Schermerhorn 	} else {
918bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
919bea904d5SLee Schermerhorn 						pol->mode;
920d79df630SDavid Rientjes 		/*
921d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
922d79df630SDavid Rientjes 		 * the policy to userspace.
923d79df630SDavid Rientjes 		 */
924d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
925bea904d5SLee Schermerhorn 	}
9261da177e4SLinus Torvalds 
9271da177e4SLinus Torvalds 	if (vma) {
9281da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9291da177e4SLinus Torvalds 		vma = NULL;
9301da177e4SLinus Torvalds 	}
9311da177e4SLinus Torvalds 
9321da177e4SLinus Torvalds 	err = 0;
93358568d2aSMiao Xie 	if (nmask) {
934c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
935c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
936c6b6ef8bSLee Schermerhorn 		} else {
93758568d2aSMiao Xie 			task_lock(current);
938bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
93958568d2aSMiao Xie 			task_unlock(current);
94058568d2aSMiao Xie 		}
941c6b6ef8bSLee Schermerhorn 	}
9421da177e4SLinus Torvalds 
9431da177e4SLinus Torvalds  out:
94452cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9451da177e4SLinus Torvalds 	if (vma)
9461da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9471da177e4SLinus Torvalds 	return err;
9481da177e4SLinus Torvalds }
9491da177e4SLinus Torvalds 
950b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9518bccd85fSChristoph Lameter /*
9526ce3c4c0SChristoph Lameter  * page migration
9536ce3c4c0SChristoph Lameter  */
954fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
955fc301289SChristoph Lameter 				unsigned long flags)
9566ce3c4c0SChristoph Lameter {
9576ce3c4c0SChristoph Lameter 	/*
958fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9596ce3c4c0SChristoph Lameter 	 */
96062695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
96162695a84SNick Piggin 		if (!isolate_lru_page(page)) {
96262695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
963599d0c95SMel Gorman 			inc_node_page_state(page, NR_ISOLATED_ANON +
9646d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
96562695a84SNick Piggin 		}
96662695a84SNick Piggin 	}
9676ce3c4c0SChristoph Lameter }
9686ce3c4c0SChristoph Lameter 
969742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
97095a402c3SChristoph Lameter {
971e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
972e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
973e2d8cf40SNaoya Horiguchi 					node);
974e2d8cf40SNaoya Horiguchi 	else
97596db800fSVlastimil Babka 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
976b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
97795a402c3SChristoph Lameter }
97895a402c3SChristoph Lameter 
9796ce3c4c0SChristoph Lameter /*
9807e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
9817e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
9827e2ab150SChristoph Lameter  */
983dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
984dbcb0f19SAdrian Bunk 			   int flags)
9857e2ab150SChristoph Lameter {
9867e2ab150SChristoph Lameter 	nodemask_t nmask;
9877e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
9887e2ab150SChristoph Lameter 	int err = 0;
9897e2ab150SChristoph Lameter 
9907e2ab150SChristoph Lameter 	nodes_clear(nmask);
9917e2ab150SChristoph Lameter 	node_set(source, nmask);
9927e2ab150SChristoph Lameter 
99308270807SMinchan Kim 	/*
99408270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
99508270807SMinchan Kim 	 * need migration.  Between passing in the full user address
99608270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
99708270807SMinchan Kim 	 */
99808270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
99998094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10007e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10017e2ab150SChristoph Lameter 
1002cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
100368711a74SDavid Rientjes 		err = migrate_pages(&pagelist, new_node_page, NULL, dest,
10049c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1005cf608ac1SMinchan Kim 		if (err)
1006e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1007cf608ac1SMinchan Kim 	}
100895a402c3SChristoph Lameter 
10097e2ab150SChristoph Lameter 	return err;
10107e2ab150SChristoph Lameter }
10117e2ab150SChristoph Lameter 
10127e2ab150SChristoph Lameter /*
10137e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10147e2ab150SChristoph Lameter  * layout as much as possible.
101539743889SChristoph Lameter  *
101639743889SChristoph Lameter  * Returns the number of page that could not be moved.
101739743889SChristoph Lameter  */
10180ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10190ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
102039743889SChristoph Lameter {
10217e2ab150SChristoph Lameter 	int busy = 0;
10220aedadf9SChristoph Lameter 	int err;
10237e2ab150SChristoph Lameter 	nodemask_t tmp;
102439743889SChristoph Lameter 
10250aedadf9SChristoph Lameter 	err = migrate_prep();
10260aedadf9SChristoph Lameter 	if (err)
10270aedadf9SChristoph Lameter 		return err;
10280aedadf9SChristoph Lameter 
102939743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1030d4984711SChristoph Lameter 
10317e2ab150SChristoph Lameter 	/*
10327e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10337e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10347e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10357e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10367e2ab150SChristoph Lameter 	 *
10377e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10387e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10397e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10407e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10417e2ab150SChristoph Lameter 	 *
10427e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10437e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10447e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10457e2ab150SChristoph Lameter 	 *
10467e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10477e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10487e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10497e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10507e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10517e2ab150SChristoph Lameter 	 *
10527e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10537e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10547e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10557e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1056ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10577e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10587e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10597e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10607e2ab150SChristoph Lameter 	 */
10617e2ab150SChristoph Lameter 
10620ce72d4fSAndrew Morton 	tmp = *from;
10637e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10647e2ab150SChristoph Lameter 		int s,d;
1065b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
10667e2ab150SChristoph Lameter 		int dest = 0;
10677e2ab150SChristoph Lameter 
10687e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10694a5b18ccSLarry Woodman 
10704a5b18ccSLarry Woodman 			/*
10714a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
10724a5b18ccSLarry Woodman 			 * node relationship of the pages established between
10734a5b18ccSLarry Woodman 			 * threads and memory areas.
10744a5b18ccSLarry Woodman                          *
10754a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
10764a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
10774a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
10784a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
10794a5b18ccSLarry Woodman 			 * mask.
10804a5b18ccSLarry Woodman 			 *
10814a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
10824a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
10834a5b18ccSLarry Woodman 			 */
10844a5b18ccSLarry Woodman 
10850ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
10860ce72d4fSAndrew Morton 						(node_isset(s, *to)))
10874a5b18ccSLarry Woodman 				continue;
10884a5b18ccSLarry Woodman 
10890ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
10907e2ab150SChristoph Lameter 			if (s == d)
10917e2ab150SChristoph Lameter 				continue;
10927e2ab150SChristoph Lameter 
10937e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
10947e2ab150SChristoph Lameter 			dest = d;
10957e2ab150SChristoph Lameter 
10967e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
10977e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
10987e2ab150SChristoph Lameter 				break;
10997e2ab150SChristoph Lameter 		}
1100b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11017e2ab150SChristoph Lameter 			break;
11027e2ab150SChristoph Lameter 
11037e2ab150SChristoph Lameter 		node_clear(source, tmp);
11047e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11057e2ab150SChristoph Lameter 		if (err > 0)
11067e2ab150SChristoph Lameter 			busy += err;
11077e2ab150SChristoph Lameter 		if (err < 0)
11087e2ab150SChristoph Lameter 			break;
110939743889SChristoph Lameter 	}
111039743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11117e2ab150SChristoph Lameter 	if (err < 0)
11127e2ab150SChristoph Lameter 		return err;
11137e2ab150SChristoph Lameter 	return busy;
1114b20a3503SChristoph Lameter 
111539743889SChristoph Lameter }
111639743889SChristoph Lameter 
11173ad33b24SLee Schermerhorn /*
11183ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1119d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
11203ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11213ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11223ad33b24SLee Schermerhorn  * is in virtual address order.
11233ad33b24SLee Schermerhorn  */
1124d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
112595a402c3SChristoph Lameter {
1126d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11273ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
112895a402c3SChristoph Lameter 
1129d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
11303ad33b24SLee Schermerhorn 	while (vma) {
11313ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11323ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11333ad33b24SLee Schermerhorn 			break;
11343ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11353ad33b24SLee Schermerhorn 	}
11363ad33b24SLee Schermerhorn 
113711c731e8SWanpeng Li 	if (PageHuge(page)) {
1138cc81717eSMichal Hocko 		BUG_ON(!vma);
113974060e4dSNaoya Horiguchi 		return alloc_huge_page_noerr(vma, address, 1);
114011c731e8SWanpeng Li 	}
114111c731e8SWanpeng Li 	/*
114211c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
114311c731e8SWanpeng Li 	 */
11443ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
114595a402c3SChristoph Lameter }
1146b20a3503SChristoph Lameter #else
1147b20a3503SChristoph Lameter 
1148b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1149b20a3503SChristoph Lameter 				unsigned long flags)
1150b20a3503SChristoph Lameter {
1151b20a3503SChristoph Lameter }
1152b20a3503SChristoph Lameter 
11530ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11540ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1155b20a3503SChristoph Lameter {
1156b20a3503SChristoph Lameter 	return -ENOSYS;
1157b20a3503SChristoph Lameter }
115895a402c3SChristoph Lameter 
1159d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
116095a402c3SChristoph Lameter {
116195a402c3SChristoph Lameter 	return NULL;
116295a402c3SChristoph Lameter }
1163b20a3503SChristoph Lameter #endif
1164b20a3503SChristoph Lameter 
1165dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1166028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1167028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
11686ce3c4c0SChristoph Lameter {
11696ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
11706ce3c4c0SChristoph Lameter 	struct mempolicy *new;
11716ce3c4c0SChristoph Lameter 	unsigned long end;
11726ce3c4c0SChristoph Lameter 	int err;
11736ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
11746ce3c4c0SChristoph Lameter 
1175b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
11766ce3c4c0SChristoph Lameter 		return -EINVAL;
117774c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
11786ce3c4c0SChristoph Lameter 		return -EPERM;
11796ce3c4c0SChristoph Lameter 
11806ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
11816ce3c4c0SChristoph Lameter 		return -EINVAL;
11826ce3c4c0SChristoph Lameter 
11836ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
11846ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
11856ce3c4c0SChristoph Lameter 
11866ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
11876ce3c4c0SChristoph Lameter 	end = start + len;
11886ce3c4c0SChristoph Lameter 
11896ce3c4c0SChristoph Lameter 	if (end < start)
11906ce3c4c0SChristoph Lameter 		return -EINVAL;
11916ce3c4c0SChristoph Lameter 	if (end == start)
11926ce3c4c0SChristoph Lameter 		return 0;
11936ce3c4c0SChristoph Lameter 
1194028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
11956ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
11966ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
11976ce3c4c0SChristoph Lameter 
1198b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1199b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1200b24f53a0SLee Schermerhorn 
12016ce3c4c0SChristoph Lameter 	/*
12026ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12036ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12046ce3c4c0SChristoph Lameter 	 */
12056ce3c4c0SChristoph Lameter 	if (!new)
12066ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12076ce3c4c0SChristoph Lameter 
1208028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1209028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
121000ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12116ce3c4c0SChristoph Lameter 
12120aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12130aedadf9SChristoph Lameter 
12140aedadf9SChristoph Lameter 		err = migrate_prep();
12150aedadf9SChristoph Lameter 		if (err)
1216b05ca738SKOSAKI Motohiro 			goto mpol_out;
12170aedadf9SChristoph Lameter 	}
12184bfc4495SKAMEZAWA Hiroyuki 	{
12194bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12204bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12216ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
122258568d2aSMiao Xie 			task_lock(current);
12234bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
122458568d2aSMiao Xie 			task_unlock(current);
12254bfc4495SKAMEZAWA Hiroyuki 			if (err)
122658568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12274bfc4495SKAMEZAWA Hiroyuki 		} else
12284bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12294bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12304bfc4495SKAMEZAWA Hiroyuki 	}
1231b05ca738SKOSAKI Motohiro 	if (err)
1232b05ca738SKOSAKI Motohiro 		goto mpol_out;
1233b05ca738SKOSAKI Motohiro 
1234d05f0cdcSHugh Dickins 	err = queue_pages_range(mm, start, end, nmask,
12356ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1236d05f0cdcSHugh Dickins 	if (!err)
12379d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12387e2ab150SChristoph Lameter 
1239b24f53a0SLee Schermerhorn 	if (!err) {
1240b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1241b24f53a0SLee Schermerhorn 
1242cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1243b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1244d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1245d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1246cf608ac1SMinchan Kim 			if (nr_failed)
124774060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1248cf608ac1SMinchan Kim 		}
12496ce3c4c0SChristoph Lameter 
1250b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
12516ce3c4c0SChristoph Lameter 			err = -EIO;
1252ab8a3e14SKOSAKI Motohiro 	} else
1253b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1254b20a3503SChristoph Lameter 
12556ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1256b05ca738SKOSAKI Motohiro  mpol_out:
1257f0be3d32SLee Schermerhorn 	mpol_put(new);
12586ce3c4c0SChristoph Lameter 	return err;
12596ce3c4c0SChristoph Lameter }
12606ce3c4c0SChristoph Lameter 
126139743889SChristoph Lameter /*
12628bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
12638bccd85fSChristoph Lameter  */
12648bccd85fSChristoph Lameter 
12658bccd85fSChristoph Lameter /* Copy a node mask from user space. */
126639743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
12678bccd85fSChristoph Lameter 		     unsigned long maxnode)
12688bccd85fSChristoph Lameter {
12698bccd85fSChristoph Lameter 	unsigned long k;
12708bccd85fSChristoph Lameter 	unsigned long nlongs;
12718bccd85fSChristoph Lameter 	unsigned long endmask;
12728bccd85fSChristoph Lameter 
12738bccd85fSChristoph Lameter 	--maxnode;
12748bccd85fSChristoph Lameter 	nodes_clear(*nodes);
12758bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
12768bccd85fSChristoph Lameter 		return 0;
1277a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1278636f13c1SChris Wright 		return -EINVAL;
12798bccd85fSChristoph Lameter 
12808bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
12818bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
12828bccd85fSChristoph Lameter 		endmask = ~0UL;
12838bccd85fSChristoph Lameter 	else
12848bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
12858bccd85fSChristoph Lameter 
12868bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
12878bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
12888bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
12898bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
12908bccd85fSChristoph Lameter 			return -EINVAL;
12918bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
12928bccd85fSChristoph Lameter 			unsigned long t;
12938bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
12948bccd85fSChristoph Lameter 				return -EFAULT;
12958bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
12968bccd85fSChristoph Lameter 				if (t & endmask)
12978bccd85fSChristoph Lameter 					return -EINVAL;
12988bccd85fSChristoph Lameter 			} else if (t)
12998bccd85fSChristoph Lameter 				return -EINVAL;
13008bccd85fSChristoph Lameter 		}
13018bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13028bccd85fSChristoph Lameter 		endmask = ~0UL;
13038bccd85fSChristoph Lameter 	}
13048bccd85fSChristoph Lameter 
13058bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13068bccd85fSChristoph Lameter 		return -EFAULT;
13078bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13088bccd85fSChristoph Lameter 	return 0;
13098bccd85fSChristoph Lameter }
13108bccd85fSChristoph Lameter 
13118bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13128bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13138bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13148bccd85fSChristoph Lameter {
13158bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
13168bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
13178bccd85fSChristoph Lameter 
13188bccd85fSChristoph Lameter 	if (copy > nbytes) {
13198bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13208bccd85fSChristoph Lameter 			return -EINVAL;
13218bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13228bccd85fSChristoph Lameter 			return -EFAULT;
13238bccd85fSChristoph Lameter 		copy = nbytes;
13248bccd85fSChristoph Lameter 	}
13258bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13268bccd85fSChristoph Lameter }
13278bccd85fSChristoph Lameter 
1328938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1329f7f28ca9SRasmus Villemoes 		unsigned long, mode, const unsigned long __user *, nmask,
1330938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
13318bccd85fSChristoph Lameter {
13328bccd85fSChristoph Lameter 	nodemask_t nodes;
13338bccd85fSChristoph Lameter 	int err;
1334028fec41SDavid Rientjes 	unsigned short mode_flags;
13358bccd85fSChristoph Lameter 
1336028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1337028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1338a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1339a3b51e01SDavid Rientjes 		return -EINVAL;
13404c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13414c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13424c50bc01SDavid Rientjes 		return -EINVAL;
13438bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13448bccd85fSChristoph Lameter 	if (err)
13458bccd85fSChristoph Lameter 		return err;
1346028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
13478bccd85fSChristoph Lameter }
13488bccd85fSChristoph Lameter 
13498bccd85fSChristoph Lameter /* Set the process memory policy */
135023c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1351938bb9f5SHeiko Carstens 		unsigned long, maxnode)
13528bccd85fSChristoph Lameter {
13538bccd85fSChristoph Lameter 	int err;
13548bccd85fSChristoph Lameter 	nodemask_t nodes;
1355028fec41SDavid Rientjes 	unsigned short flags;
13568bccd85fSChristoph Lameter 
1357028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1358028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1359028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
13608bccd85fSChristoph Lameter 		return -EINVAL;
13614c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
13624c50bc01SDavid Rientjes 		return -EINVAL;
13638bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13648bccd85fSChristoph Lameter 	if (err)
13658bccd85fSChristoph Lameter 		return err;
1366028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
13678bccd85fSChristoph Lameter }
13688bccd85fSChristoph Lameter 
1369938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1370938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1371938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
137239743889SChristoph Lameter {
1373c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
1374596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
137539743889SChristoph Lameter 	struct task_struct *task;
137639743889SChristoph Lameter 	nodemask_t task_nodes;
137739743889SChristoph Lameter 	int err;
1378596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1379596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1380596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
138139743889SChristoph Lameter 
1382596d7cfaSKOSAKI Motohiro 	if (!scratch)
1383596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
138439743889SChristoph Lameter 
1385596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1386596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1387596d7cfaSKOSAKI Motohiro 
1388596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
138939743889SChristoph Lameter 	if (err)
1390596d7cfaSKOSAKI Motohiro 		goto out;
1391596d7cfaSKOSAKI Motohiro 
1392596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1393596d7cfaSKOSAKI Motohiro 	if (err)
1394596d7cfaSKOSAKI Motohiro 		goto out;
139539743889SChristoph Lameter 
139639743889SChristoph Lameter 	/* Find the mm_struct */
139755cfaa3cSZeng Zhaoming 	rcu_read_lock();
1398228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
139939743889SChristoph Lameter 	if (!task) {
140055cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1401596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1402596d7cfaSKOSAKI Motohiro 		goto out;
140339743889SChristoph Lameter 	}
14043268c63eSChristoph Lameter 	get_task_struct(task);
140539743889SChristoph Lameter 
1406596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
140739743889SChristoph Lameter 
140839743889SChristoph Lameter 	/*
140939743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
141039743889SChristoph Lameter 	 * process. The right exists if the process has administrative
14117f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
141239743889SChristoph Lameter 	 * userid as the target process.
141339743889SChristoph Lameter 	 */
1414c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1415b38a86ebSEric W. Biederman 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1416b38a86ebSEric W. Biederman 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
141774c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1418c69e8d9cSDavid Howells 		rcu_read_unlock();
141939743889SChristoph Lameter 		err = -EPERM;
14203268c63eSChristoph Lameter 		goto out_put;
142139743889SChristoph Lameter 	}
1422c69e8d9cSDavid Howells 	rcu_read_unlock();
142339743889SChristoph Lameter 
142439743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
142539743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1426596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
142739743889SChristoph Lameter 		err = -EPERM;
14283268c63eSChristoph Lameter 		goto out_put;
142939743889SChristoph Lameter 	}
143039743889SChristoph Lameter 
143101f13bd6SLai Jiangshan 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
14323b42d28bSChristoph Lameter 		err = -EINVAL;
14333268c63eSChristoph Lameter 		goto out_put;
14343b42d28bSChristoph Lameter 	}
14353b42d28bSChristoph Lameter 
143686c3a764SDavid Quigley 	err = security_task_movememory(task);
143786c3a764SDavid Quigley 	if (err)
14383268c63eSChristoph Lameter 		goto out_put;
143986c3a764SDavid Quigley 
14403268c63eSChristoph Lameter 	mm = get_task_mm(task);
14413268c63eSChristoph Lameter 	put_task_struct(task);
1442f2a9ef88SSasha Levin 
1443f2a9ef88SSasha Levin 	if (!mm) {
1444f2a9ef88SSasha Levin 		err = -EINVAL;
1445f2a9ef88SSasha Levin 		goto out;
1446f2a9ef88SSasha Levin 	}
1447f2a9ef88SSasha Levin 
1448596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
144974c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
14503268c63eSChristoph Lameter 
145139743889SChristoph Lameter 	mmput(mm);
14523268c63eSChristoph Lameter out:
1453596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1454596d7cfaSKOSAKI Motohiro 
145539743889SChristoph Lameter 	return err;
14563268c63eSChristoph Lameter 
14573268c63eSChristoph Lameter out_put:
14583268c63eSChristoph Lameter 	put_task_struct(task);
14593268c63eSChristoph Lameter 	goto out;
14603268c63eSChristoph Lameter 
146139743889SChristoph Lameter }
146239743889SChristoph Lameter 
146339743889SChristoph Lameter 
14648bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1465938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1466938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1467938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
14688bccd85fSChristoph Lameter {
1469dbcb0f19SAdrian Bunk 	int err;
1470dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
14718bccd85fSChristoph Lameter 	nodemask_t nodes;
14728bccd85fSChristoph Lameter 
14738bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
14748bccd85fSChristoph Lameter 		return -EINVAL;
14758bccd85fSChristoph Lameter 
14768bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
14778bccd85fSChristoph Lameter 
14788bccd85fSChristoph Lameter 	if (err)
14798bccd85fSChristoph Lameter 		return err;
14808bccd85fSChristoph Lameter 
14818bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
14828bccd85fSChristoph Lameter 		return -EFAULT;
14838bccd85fSChristoph Lameter 
14848bccd85fSChristoph Lameter 	if (nmask)
14858bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
14868bccd85fSChristoph Lameter 
14878bccd85fSChristoph Lameter 	return err;
14888bccd85fSChristoph Lameter }
14898bccd85fSChristoph Lameter 
14901da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
14911da177e4SLinus Torvalds 
1492c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1493c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1494c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1495c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
14961da177e4SLinus Torvalds {
14971da177e4SLinus Torvalds 	long err;
14981da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
14991da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15001da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15011da177e4SLinus Torvalds 
15021da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15031da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15041da177e4SLinus Torvalds 
15051da177e4SLinus Torvalds 	if (nmask)
15061da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15071da177e4SLinus Torvalds 
15081da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15091da177e4SLinus Torvalds 
15101da177e4SLinus Torvalds 	if (!err && nmask) {
15112bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15122bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15132bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15141da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15151da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15161da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15171da177e4SLinus Torvalds 	}
15181da177e4SLinus Torvalds 
15191da177e4SLinus Torvalds 	return err;
15201da177e4SLinus Torvalds }
15211da177e4SLinus Torvalds 
1522c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1523c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
15241da177e4SLinus Torvalds {
15251da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15261da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15271da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15281da177e4SLinus Torvalds 
15291da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15301da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15311da177e4SLinus Torvalds 
15321da177e4SLinus Torvalds 	if (nmask) {
1533cf01fb99SChris Salls 		if (compat_get_bitmap(bm, nmask, nr_bits))
15341da177e4SLinus Torvalds 			return -EFAULT;
1535cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1536cf01fb99SChris Salls 		if (copy_to_user(nm, bm, alloc_size))
1537cf01fb99SChris Salls 			return -EFAULT;
1538cf01fb99SChris Salls 	}
15391da177e4SLinus Torvalds 
15401da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
15411da177e4SLinus Torvalds }
15421da177e4SLinus Torvalds 
1543c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1544c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1545c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
15461da177e4SLinus Torvalds {
15471da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15481da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1549dfcd3c0dSAndi Kleen 	nodemask_t bm;
15501da177e4SLinus Torvalds 
15511da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15521da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds 	if (nmask) {
1555cf01fb99SChris Salls 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
15561da177e4SLinus Torvalds 			return -EFAULT;
1557cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1558cf01fb99SChris Salls 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1559cf01fb99SChris Salls 			return -EFAULT;
1560cf01fb99SChris Salls 	}
15611da177e4SLinus Torvalds 
15621da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
15631da177e4SLinus Torvalds }
15641da177e4SLinus Torvalds 
15651da177e4SLinus Torvalds #endif
15661da177e4SLinus Torvalds 
156774d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
156874d2c3a0SOleg Nesterov 						unsigned long addr)
15691da177e4SLinus Torvalds {
15708d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
15711da177e4SLinus Torvalds 
15721da177e4SLinus Torvalds 	if (vma) {
1573480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
15748d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
157500442ad0SMel Gorman 		} else if (vma->vm_policy) {
15761da177e4SLinus Torvalds 			pol = vma->vm_policy;
157700442ad0SMel Gorman 
157800442ad0SMel Gorman 			/*
157900442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
158000442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
158100442ad0SMel Gorman 			 * count on these policies which will be dropped by
158200442ad0SMel Gorman 			 * mpol_cond_put() later
158300442ad0SMel Gorman 			 */
158400442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
158500442ad0SMel Gorman 				mpol_get(pol);
158600442ad0SMel Gorman 		}
15871da177e4SLinus Torvalds 	}
1588f15ca78eSOleg Nesterov 
158974d2c3a0SOleg Nesterov 	return pol;
159074d2c3a0SOleg Nesterov }
159174d2c3a0SOleg Nesterov 
159274d2c3a0SOleg Nesterov /*
1593dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
159474d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
159574d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
159674d2c3a0SOleg Nesterov  *
159774d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1598dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
159974d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
160074d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
160174d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
160274d2c3a0SOleg Nesterov  * extra reference for shared policies.
160374d2c3a0SOleg Nesterov  */
1604dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1605dd6eecb9SOleg Nesterov 						unsigned long addr)
160674d2c3a0SOleg Nesterov {
160774d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
160874d2c3a0SOleg Nesterov 
16098d90274bSOleg Nesterov 	if (!pol)
1610dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
16118d90274bSOleg Nesterov 
16121da177e4SLinus Torvalds 	return pol;
16131da177e4SLinus Torvalds }
16141da177e4SLinus Torvalds 
16156b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1616fc314724SMel Gorman {
16176b6482bbSOleg Nesterov 	struct mempolicy *pol;
1618f15ca78eSOleg Nesterov 
1619fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1620fc314724SMel Gorman 		bool ret = false;
1621fc314724SMel Gorman 
1622fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1623fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1624fc314724SMel Gorman 			ret = true;
1625fc314724SMel Gorman 		mpol_cond_put(pol);
1626fc314724SMel Gorman 
1627fc314724SMel Gorman 		return ret;
16288d90274bSOleg Nesterov 	}
16298d90274bSOleg Nesterov 
1630fc314724SMel Gorman 	pol = vma->vm_policy;
16318d90274bSOleg Nesterov 	if (!pol)
16326b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1633fc314724SMel Gorman 
1634fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1635fc314724SMel Gorman }
1636fc314724SMel Gorman 
1637d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1638d3eb1570SLai Jiangshan {
1639d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1640d3eb1570SLai Jiangshan 
1641d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1642d3eb1570SLai Jiangshan 
1643d3eb1570SLai Jiangshan 	/*
1644d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1645d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1646d3eb1570SLai Jiangshan 	 *
1647d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1648d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1649d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1650d3eb1570SLai Jiangshan 	 */
1651d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1652d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1653d3eb1570SLai Jiangshan 
1654d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1655d3eb1570SLai Jiangshan }
1656d3eb1570SLai Jiangshan 
165752cd3b07SLee Schermerhorn /*
165852cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
165952cd3b07SLee Schermerhorn  * page allocation
166052cd3b07SLee Schermerhorn  */
166152cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
166219770b32SMel Gorman {
166319770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
166445c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1665d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
166619770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
166719770b32SMel Gorman 		return &policy->v.nodes;
166819770b32SMel Gorman 
166919770b32SMel Gorman 	return NULL;
167019770b32SMel Gorman }
167119770b32SMel Gorman 
1672*04ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */
1673*04ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy,
16742f5f9486SAndi Kleen 								int nd)
16751da177e4SLinus Torvalds {
16766d840958SMichal Hocko 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
16771da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
16786d840958SMichal Hocko 	else {
167919770b32SMel Gorman 		/*
16806d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
16816d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
16826d840958SMichal Hocko 		 * requested node and not break the policy.
168319770b32SMel Gorman 		 */
16846d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
16851da177e4SLinus Torvalds 	}
16866d840958SMichal Hocko 
1687*04ec6264SVlastimil Babka 	return nd;
16881da177e4SLinus Torvalds }
16891da177e4SLinus Torvalds 
16901da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
16911da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
16921da177e4SLinus Torvalds {
169345816682SVlastimil Babka 	unsigned next;
16941da177e4SLinus Torvalds 	struct task_struct *me = current;
16951da177e4SLinus Torvalds 
169645816682SVlastimil Babka 	next = next_node_in(me->il_prev, policy->v.nodes);
1697f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
169845816682SVlastimil Babka 		me->il_prev = next;
169945816682SVlastimil Babka 	return next;
17001da177e4SLinus Torvalds }
17011da177e4SLinus Torvalds 
1702dc85da15SChristoph Lameter /*
1703dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1704dc85da15SChristoph Lameter  * next slab entry.
1705dc85da15SChristoph Lameter  */
17062a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1707dc85da15SChristoph Lameter {
1708e7b691b0SAndi Kleen 	struct mempolicy *policy;
17092a389610SDavid Rientjes 	int node = numa_mem_id();
1710e7b691b0SAndi Kleen 
1711e7b691b0SAndi Kleen 	if (in_interrupt())
17122a389610SDavid Rientjes 		return node;
1713e7b691b0SAndi Kleen 
1714e7b691b0SAndi Kleen 	policy = current->mempolicy;
1715fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
17162a389610SDavid Rientjes 		return node;
1717765c4507SChristoph Lameter 
1718bea904d5SLee Schermerhorn 	switch (policy->mode) {
1719bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1720fc36b8d3SLee Schermerhorn 		/*
1721fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1722fc36b8d3SLee Schermerhorn 		 */
1723bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1724bea904d5SLee Schermerhorn 
1725dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1726dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1727dc85da15SChristoph Lameter 
1728dd1a239fSMel Gorman 	case MPOL_BIND: {
1729c33d6c06SMel Gorman 		struct zoneref *z;
1730c33d6c06SMel Gorman 
1731dc85da15SChristoph Lameter 		/*
1732dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1733dc85da15SChristoph Lameter 		 * first node.
1734dc85da15SChristoph Lameter 		 */
173519770b32SMel Gorman 		struct zonelist *zonelist;
173619770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1737c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1738c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1739c33d6c06SMel Gorman 							&policy->v.nodes);
1740c33d6c06SMel Gorman 		return z->zone ? z->zone->node : node;
1741dd1a239fSMel Gorman 	}
1742dc85da15SChristoph Lameter 
1743dc85da15SChristoph Lameter 	default:
1744bea904d5SLee Schermerhorn 		BUG();
1745dc85da15SChristoph Lameter 	}
1746dc85da15SChristoph Lameter }
1747dc85da15SChristoph Lameter 
1748fee83b3aSAndrew Morton /*
1749fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1750fee83b3aSAndrew Morton  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1751fee83b3aSAndrew Morton  * number of present nodes.
1752fee83b3aSAndrew Morton  */
17531da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
1754fee83b3aSAndrew Morton 			       struct vm_area_struct *vma, unsigned long n)
17551da177e4SLinus Torvalds {
1756dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1757f5b087b5SDavid Rientjes 	unsigned target;
1758fee83b3aSAndrew Morton 	int i;
1759fee83b3aSAndrew Morton 	int nid;
17601da177e4SLinus Torvalds 
1761f5b087b5SDavid Rientjes 	if (!nnodes)
1762f5b087b5SDavid Rientjes 		return numa_node_id();
1763fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1764fee83b3aSAndrew Morton 	nid = first_node(pol->v.nodes);
1765fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1766dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
17671da177e4SLinus Torvalds 	return nid;
17681da177e4SLinus Torvalds }
17691da177e4SLinus Torvalds 
17705da7ca86SChristoph Lameter /* Determine a node number for interleave */
17715da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
17725da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
17735da7ca86SChristoph Lameter {
17745da7ca86SChristoph Lameter 	if (vma) {
17755da7ca86SChristoph Lameter 		unsigned long off;
17765da7ca86SChristoph Lameter 
17773b98b087SNishanth Aravamudan 		/*
17783b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
17793b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
17803b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
17813b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
17823b98b087SNishanth Aravamudan 		 * a useful offset.
17833b98b087SNishanth Aravamudan 		 */
17843b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
17853b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
17865da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
17875da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
17885da7ca86SChristoph Lameter 	} else
17895da7ca86SChristoph Lameter 		return interleave_nodes(pol);
17905da7ca86SChristoph Lameter }
17915da7ca86SChristoph Lameter 
179200ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1793480eccf9SLee Schermerhorn /*
1794*04ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1795b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1796b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1797b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1798b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1799b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1800480eccf9SLee Schermerhorn  *
1801*04ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
180252cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
180352cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
180452cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1805c0ff7453SMiao Xie  *
1806d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1807480eccf9SLee Schermerhorn  */
1808*04ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1809*04ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
18105da7ca86SChristoph Lameter {
1811*04ec6264SVlastimil Babka 	int nid;
18125da7ca86SChristoph Lameter 
1813dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
181419770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
18155da7ca86SChristoph Lameter 
181652cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1817*04ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
1818*04ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
181952cd3b07SLee Schermerhorn 	} else {
1820*04ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
182152cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
182252cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1823480eccf9SLee Schermerhorn 	}
1824*04ec6264SVlastimil Babka 	return nid;
18255da7ca86SChristoph Lameter }
182606808b08SLee Schermerhorn 
182706808b08SLee Schermerhorn /*
182806808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
182906808b08SLee Schermerhorn  *
183006808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
183106808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
183206808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
183306808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
183406808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
183506808b08SLee Schermerhorn  * of non-default mempolicy.
183606808b08SLee Schermerhorn  *
183706808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
183806808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
183906808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
184006808b08SLee Schermerhorn  *
184106808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
184206808b08SLee Schermerhorn  */
184306808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
184406808b08SLee Schermerhorn {
184506808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
184606808b08SLee Schermerhorn 	int nid;
184706808b08SLee Schermerhorn 
184806808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
184906808b08SLee Schermerhorn 		return false;
185006808b08SLee Schermerhorn 
1851c0ff7453SMiao Xie 	task_lock(current);
185206808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
185306808b08SLee Schermerhorn 	switch (mempolicy->mode) {
185406808b08SLee Schermerhorn 	case MPOL_PREFERRED:
185506808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
185606808b08SLee Schermerhorn 			nid = numa_node_id();
185706808b08SLee Schermerhorn 		else
185806808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
185906808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
186006808b08SLee Schermerhorn 		break;
186106808b08SLee Schermerhorn 
186206808b08SLee Schermerhorn 	case MPOL_BIND:
186306808b08SLee Schermerhorn 		/* Fall through */
186406808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
186506808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
186606808b08SLee Schermerhorn 		break;
186706808b08SLee Schermerhorn 
186806808b08SLee Schermerhorn 	default:
186906808b08SLee Schermerhorn 		BUG();
187006808b08SLee Schermerhorn 	}
1871c0ff7453SMiao Xie 	task_unlock(current);
187206808b08SLee Schermerhorn 
187306808b08SLee Schermerhorn 	return true;
187406808b08SLee Schermerhorn }
187500ac59adSChen, Kenneth W #endif
18765da7ca86SChristoph Lameter 
18776f48d0ebSDavid Rientjes /*
18786f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
18796f48d0ebSDavid Rientjes  *
18806f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
18816f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
18826f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
18836f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
18846f48d0ebSDavid Rientjes  *
18856f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
18866f48d0ebSDavid Rientjes  */
18876f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
18886f48d0ebSDavid Rientjes 					const nodemask_t *mask)
18896f48d0ebSDavid Rientjes {
18906f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
18916f48d0ebSDavid Rientjes 	bool ret = true;
18926f48d0ebSDavid Rientjes 
18936f48d0ebSDavid Rientjes 	if (!mask)
18946f48d0ebSDavid Rientjes 		return ret;
18956f48d0ebSDavid Rientjes 	task_lock(tsk);
18966f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
18976f48d0ebSDavid Rientjes 	if (!mempolicy)
18986f48d0ebSDavid Rientjes 		goto out;
18996f48d0ebSDavid Rientjes 
19006f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19016f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19026f48d0ebSDavid Rientjes 		/*
19036f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
19046f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
19056f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
19066f48d0ebSDavid Rientjes 		 * nodes in mask.
19076f48d0ebSDavid Rientjes 		 */
19086f48d0ebSDavid Rientjes 		break;
19096f48d0ebSDavid Rientjes 	case MPOL_BIND:
19106f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19116f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19126f48d0ebSDavid Rientjes 		break;
19136f48d0ebSDavid Rientjes 	default:
19146f48d0ebSDavid Rientjes 		BUG();
19156f48d0ebSDavid Rientjes 	}
19166f48d0ebSDavid Rientjes out:
19176f48d0ebSDavid Rientjes 	task_unlock(tsk);
19186f48d0ebSDavid Rientjes 	return ret;
19196f48d0ebSDavid Rientjes }
19206f48d0ebSDavid Rientjes 
19211da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
19221da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1923662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1924662f3a0bSAndi Kleen 					unsigned nid)
19251da177e4SLinus Torvalds {
19261da177e4SLinus Torvalds 	struct page *page;
19271da177e4SLinus Torvalds 
1928*04ec6264SVlastimil Babka 	page = __alloc_pages(gfp, order, nid);
1929*04ec6264SVlastimil Babka 	if (page && page_to_nid(page) == nid)
1930ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
19311da177e4SLinus Torvalds 	return page;
19321da177e4SLinus Torvalds }
19331da177e4SLinus Torvalds 
19341da177e4SLinus Torvalds /**
19350bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
19361da177e4SLinus Torvalds  *
19371da177e4SLinus Torvalds  * 	@gfp:
19381da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
19391da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
19401da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
19411da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
19421da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
19431da177e4SLinus Torvalds  *
19440bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
19451da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
19461da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1947be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
1948be97a41bSVlastimil Babka  *	@hugepage: for hugepages try only the preferred node if possible
19491da177e4SLinus Torvalds  *
19501da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
19511da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
19521da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
19531da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
1954be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
1955be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
19561da177e4SLinus Torvalds  */
19571da177e4SLinus Torvalds struct page *
19580bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1959be97a41bSVlastimil Babka 		unsigned long addr, int node, bool hugepage)
19601da177e4SLinus Torvalds {
1961cc9a6c87SMel Gorman 	struct mempolicy *pol;
1962c0ff7453SMiao Xie 	struct page *page;
1963*04ec6264SVlastimil Babka 	int preferred_nid;
1964cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
1965be97a41bSVlastimil Babka 	nodemask_t *nmask;
19661da177e4SLinus Torvalds 
1967cc9a6c87SMel Gorman retry_cpuset:
1968dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
1969d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
1970cc9a6c87SMel Gorman 
1971be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
19721da177e4SLinus Torvalds 		unsigned nid;
19735da7ca86SChristoph Lameter 
19748eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
197552cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
19760bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
1977be97a41bSVlastimil Babka 		goto out;
19781da177e4SLinus Torvalds 	}
19791da177e4SLinus Torvalds 
19800867a57cSVlastimil Babka 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
19810867a57cSVlastimil Babka 		int hpage_node = node;
19820867a57cSVlastimil Babka 
19830867a57cSVlastimil Babka 		/*
19840867a57cSVlastimil Babka 		 * For hugepage allocation and non-interleave policy which
19850867a57cSVlastimil Babka 		 * allows the current node (or other explicitly preferred
19860867a57cSVlastimil Babka 		 * node) we only try to allocate from the current/preferred
19870867a57cSVlastimil Babka 		 * node and don't fall back to other nodes, as the cost of
19880867a57cSVlastimil Babka 		 * remote accesses would likely offset THP benefits.
19890867a57cSVlastimil Babka 		 *
19900867a57cSVlastimil Babka 		 * If the policy is interleave, or does not allow the current
19910867a57cSVlastimil Babka 		 * node in its nodemask, we allocate the standard way.
19920867a57cSVlastimil Babka 		 */
19930867a57cSVlastimil Babka 		if (pol->mode == MPOL_PREFERRED &&
19940867a57cSVlastimil Babka 						!(pol->flags & MPOL_F_LOCAL))
19950867a57cSVlastimil Babka 			hpage_node = pol->v.preferred_node;
19960867a57cSVlastimil Babka 
19970867a57cSVlastimil Babka 		nmask = policy_nodemask(gfp, pol);
19980867a57cSVlastimil Babka 		if (!nmask || node_isset(hpage_node, *nmask)) {
19990867a57cSVlastimil Babka 			mpol_cond_put(pol);
200096db800fSVlastimil Babka 			page = __alloc_pages_node(hpage_node,
20010867a57cSVlastimil Babka 						gfp | __GFP_THISNODE, order);
20020867a57cSVlastimil Babka 			goto out;
20030867a57cSVlastimil Babka 		}
20040867a57cSVlastimil Babka 	}
20050867a57cSVlastimil Babka 
2006077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
2007*04ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
2008*04ec6264SVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2009d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2010be97a41bSVlastimil Babka out:
2011be97a41bSVlastimil Babka 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2012077fcf11SAneesh Kumar K.V 		goto retry_cpuset;
2013077fcf11SAneesh Kumar K.V 	return page;
2014077fcf11SAneesh Kumar K.V }
2015077fcf11SAneesh Kumar K.V 
20161da177e4SLinus Torvalds /**
20171da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20181da177e4SLinus Torvalds  *
20191da177e4SLinus Torvalds  *	@gfp:
20201da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20211da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20221da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20231da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20241da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20251da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20261da177e4SLinus Torvalds  *
20271da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
20281da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
20291da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
20301da177e4SLinus Torvalds  *
2031cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
20321da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
20331da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
20341da177e4SLinus Torvalds  */
2035dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
20361da177e4SLinus Torvalds {
20378d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2038c0ff7453SMiao Xie 	struct page *page;
2039cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20401da177e4SLinus Torvalds 
20418d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
20428d90274bSOleg Nesterov 		pol = get_task_policy(current);
204352cd3b07SLee Schermerhorn 
2044cc9a6c87SMel Gorman retry_cpuset:
2045d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
2046cc9a6c87SMel Gorman 
204752cd3b07SLee Schermerhorn 	/*
204852cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
204952cd3b07SLee Schermerhorn 	 * nor system default_policy
205052cd3b07SLee Schermerhorn 	 */
205145c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2052c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2053c0ff7453SMiao Xie 	else
2054c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
2055*04ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
20565c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2057cc9a6c87SMel Gorman 
2058d26914d1SMel Gorman 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2059cc9a6c87SMel Gorman 		goto retry_cpuset;
2060cc9a6c87SMel Gorman 
2061c0ff7453SMiao Xie 	return page;
20621da177e4SLinus Torvalds }
20631da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
20641da177e4SLinus Torvalds 
2065ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2066ef0855d3SOleg Nesterov {
2067ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2068ef0855d3SOleg Nesterov 
2069ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2070ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2071ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2072ef0855d3SOleg Nesterov 	return 0;
2073ef0855d3SOleg Nesterov }
2074ef0855d3SOleg Nesterov 
20754225399aSPaul Jackson /*
2076846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
20774225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
20784225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
20794225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
20804225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2081708c1bbcSMiao Xie  *
2082708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2083708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
20844225399aSPaul Jackson  */
20854225399aSPaul Jackson 
2086846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2087846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
20881da177e4SLinus Torvalds {
20891da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
20901da177e4SLinus Torvalds 
20911da177e4SLinus Torvalds 	if (!new)
20921da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2093708c1bbcSMiao Xie 
2094708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2095708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2096708c1bbcSMiao Xie 		task_lock(current);
2097708c1bbcSMiao Xie 		*new = *old;
2098708c1bbcSMiao Xie 		task_unlock(current);
2099708c1bbcSMiao Xie 	} else
2100708c1bbcSMiao Xie 		*new = *old;
2101708c1bbcSMiao Xie 
21024225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21034225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2104708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
2105708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2106708c1bbcSMiao Xie 		else
2107708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
21084225399aSPaul Jackson 	}
21091da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21101da177e4SLinus Torvalds 	return new;
21111da177e4SLinus Torvalds }
21121da177e4SLinus Torvalds 
21131da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2114fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21151da177e4SLinus Torvalds {
21161da177e4SLinus Torvalds 	if (!a || !b)
2117fcfb4dccSKOSAKI Motohiro 		return false;
211845c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2119fcfb4dccSKOSAKI Motohiro 		return false;
212019800502SBob Liu 	if (a->flags != b->flags)
2121fcfb4dccSKOSAKI Motohiro 		return false;
212219800502SBob Liu 	if (mpol_store_user_nodemask(a))
212319800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2124fcfb4dccSKOSAKI Motohiro 			return false;
212519800502SBob Liu 
212645c4745aSLee Schermerhorn 	switch (a->mode) {
212719770b32SMel Gorman 	case MPOL_BIND:
212819770b32SMel Gorman 		/* Fall through */
21291da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2130fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
21311da177e4SLinus Torvalds 	case MPOL_PREFERRED:
213275719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
21331da177e4SLinus Torvalds 	default:
21341da177e4SLinus Torvalds 		BUG();
2135fcfb4dccSKOSAKI Motohiro 		return false;
21361da177e4SLinus Torvalds 	}
21371da177e4SLinus Torvalds }
21381da177e4SLinus Torvalds 
21391da177e4SLinus Torvalds /*
21401da177e4SLinus Torvalds  * Shared memory backing store policy support.
21411da177e4SLinus Torvalds  *
21421da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
21431da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
21444a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
21451da177e4SLinus Torvalds  * for any accesses to the tree.
21461da177e4SLinus Torvalds  */
21471da177e4SLinus Torvalds 
21484a8c7bb5SNathan Zimmer /*
21494a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
21504a8c7bb5SNathan Zimmer  * reading or for writing
21514a8c7bb5SNathan Zimmer  */
21521da177e4SLinus Torvalds static struct sp_node *
21531da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21541da177e4SLinus Torvalds {
21551da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
21561da177e4SLinus Torvalds 
21571da177e4SLinus Torvalds 	while (n) {
21581da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
21591da177e4SLinus Torvalds 
21601da177e4SLinus Torvalds 		if (start >= p->end)
21611da177e4SLinus Torvalds 			n = n->rb_right;
21621da177e4SLinus Torvalds 		else if (end <= p->start)
21631da177e4SLinus Torvalds 			n = n->rb_left;
21641da177e4SLinus Torvalds 		else
21651da177e4SLinus Torvalds 			break;
21661da177e4SLinus Torvalds 	}
21671da177e4SLinus Torvalds 	if (!n)
21681da177e4SLinus Torvalds 		return NULL;
21691da177e4SLinus Torvalds 	for (;;) {
21701da177e4SLinus Torvalds 		struct sp_node *w = NULL;
21711da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
21721da177e4SLinus Torvalds 		if (!prev)
21731da177e4SLinus Torvalds 			break;
21741da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
21751da177e4SLinus Torvalds 		if (w->end <= start)
21761da177e4SLinus Torvalds 			break;
21771da177e4SLinus Torvalds 		n = prev;
21781da177e4SLinus Torvalds 	}
21791da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
21801da177e4SLinus Torvalds }
21811da177e4SLinus Torvalds 
21824a8c7bb5SNathan Zimmer /*
21834a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
21844a8c7bb5SNathan Zimmer  * writing.
21854a8c7bb5SNathan Zimmer  */
21861da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
21871da177e4SLinus Torvalds {
21881da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
21891da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
21901da177e4SLinus Torvalds 	struct sp_node *nd;
21911da177e4SLinus Torvalds 
21921da177e4SLinus Torvalds 	while (*p) {
21931da177e4SLinus Torvalds 		parent = *p;
21941da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
21951da177e4SLinus Torvalds 		if (new->start < nd->start)
21961da177e4SLinus Torvalds 			p = &(*p)->rb_left;
21971da177e4SLinus Torvalds 		else if (new->end > nd->end)
21981da177e4SLinus Torvalds 			p = &(*p)->rb_right;
21991da177e4SLinus Torvalds 		else
22001da177e4SLinus Torvalds 			BUG();
22011da177e4SLinus Torvalds 	}
22021da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
22031da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2204140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
220545c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22061da177e4SLinus Torvalds }
22071da177e4SLinus Torvalds 
22081da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22091da177e4SLinus Torvalds struct mempolicy *
22101da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22111da177e4SLinus Torvalds {
22121da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22131da177e4SLinus Torvalds 	struct sp_node *sn;
22141da177e4SLinus Torvalds 
22151da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22161da177e4SLinus Torvalds 		return NULL;
22174a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
22181da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22191da177e4SLinus Torvalds 	if (sn) {
22201da177e4SLinus Torvalds 		mpol_get(sn->policy);
22211da177e4SLinus Torvalds 		pol = sn->policy;
22221da177e4SLinus Torvalds 	}
22234a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
22241da177e4SLinus Torvalds 	return pol;
22251da177e4SLinus Torvalds }
22261da177e4SLinus Torvalds 
222763f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
222863f74ca2SKOSAKI Motohiro {
222963f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
223063f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
223163f74ca2SKOSAKI Motohiro }
223263f74ca2SKOSAKI Motohiro 
2233771fb4d8SLee Schermerhorn /**
2234771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2235771fb4d8SLee Schermerhorn  *
2236b46e14acSFabian Frederick  * @page: page to be checked
2237b46e14acSFabian Frederick  * @vma: vm area where page mapped
2238b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2239771fb4d8SLee Schermerhorn  *
2240771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2241771fb4d8SLee Schermerhorn  * node id.
2242771fb4d8SLee Schermerhorn  *
2243771fb4d8SLee Schermerhorn  * Returns:
2244771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2245771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2246771fb4d8SLee Schermerhorn  *
2247771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2248771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2249771fb4d8SLee Schermerhorn  */
2250771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2251771fb4d8SLee Schermerhorn {
2252771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2253c33d6c06SMel Gorman 	struct zoneref *z;
2254771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2255771fb4d8SLee Schermerhorn 	unsigned long pgoff;
225690572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
225790572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
2258771fb4d8SLee Schermerhorn 	int polnid = -1;
2259771fb4d8SLee Schermerhorn 	int ret = -1;
2260771fb4d8SLee Schermerhorn 
2261771fb4d8SLee Schermerhorn 	BUG_ON(!vma);
2262771fb4d8SLee Schermerhorn 
2263dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2264771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2265771fb4d8SLee Schermerhorn 		goto out;
2266771fb4d8SLee Schermerhorn 
2267771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2268771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2269771fb4d8SLee Schermerhorn 		BUG_ON(addr >= vma->vm_end);
2270771fb4d8SLee Schermerhorn 		BUG_ON(addr < vma->vm_start);
2271771fb4d8SLee Schermerhorn 
2272771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2273771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2274771fb4d8SLee Schermerhorn 		polnid = offset_il_node(pol, vma, pgoff);
2275771fb4d8SLee Schermerhorn 		break;
2276771fb4d8SLee Schermerhorn 
2277771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2278771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2279771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2280771fb4d8SLee Schermerhorn 		else
2281771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2282771fb4d8SLee Schermerhorn 		break;
2283771fb4d8SLee Schermerhorn 
2284771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2285c33d6c06SMel Gorman 
2286771fb4d8SLee Schermerhorn 		/*
2287771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2288771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2289771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2290771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2291771fb4d8SLee Schermerhorn 		 */
2292771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2293771fb4d8SLee Schermerhorn 			goto out;
2294c33d6c06SMel Gorman 		z = first_zones_zonelist(
2295771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2296771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2297c33d6c06SMel Gorman 				&pol->v.nodes);
2298c33d6c06SMel Gorman 		polnid = z->zone->node;
2299771fb4d8SLee Schermerhorn 		break;
2300771fb4d8SLee Schermerhorn 
2301771fb4d8SLee Schermerhorn 	default:
2302771fb4d8SLee Schermerhorn 		BUG();
2303771fb4d8SLee Schermerhorn 	}
23045606e387SMel Gorman 
23055606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2306e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
230790572890SPeter Zijlstra 		polnid = thisnid;
23085606e387SMel Gorman 
230910f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2310de1c9ce6SRik van Riel 			goto out;
2311de1c9ce6SRik van Riel 	}
2312e42c8ff2SMel Gorman 
2313771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2314771fb4d8SLee Schermerhorn 		ret = polnid;
2315771fb4d8SLee Schermerhorn out:
2316771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2317771fb4d8SLee Schermerhorn 
2318771fb4d8SLee Schermerhorn 	return ret;
2319771fb4d8SLee Schermerhorn }
2320771fb4d8SLee Schermerhorn 
2321c11600e4SDavid Rientjes /*
2322c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2323c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2324c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2325c11600e4SDavid Rientjes  * policy.
2326c11600e4SDavid Rientjes  */
2327c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2328c11600e4SDavid Rientjes {
2329c11600e4SDavid Rientjes 	struct mempolicy *pol;
2330c11600e4SDavid Rientjes 
2331c11600e4SDavid Rientjes 	task_lock(task);
2332c11600e4SDavid Rientjes 	pol = task->mempolicy;
2333c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2334c11600e4SDavid Rientjes 	task_unlock(task);
2335c11600e4SDavid Rientjes 	mpol_put(pol);
2336c11600e4SDavid Rientjes }
2337c11600e4SDavid Rientjes 
23381da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
23391da177e4SLinus Torvalds {
2340140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
23411da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
234263f74ca2SKOSAKI Motohiro 	sp_free(n);
23431da177e4SLinus Torvalds }
23441da177e4SLinus Torvalds 
234542288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
234642288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
234742288fe3SMel Gorman {
234842288fe3SMel Gorman 	node->start = start;
234942288fe3SMel Gorman 	node->end = end;
235042288fe3SMel Gorman 	node->policy = pol;
235142288fe3SMel Gorman }
235242288fe3SMel Gorman 
2353dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2354dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
23551da177e4SLinus Torvalds {
2356869833f2SKOSAKI Motohiro 	struct sp_node *n;
2357869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
23581da177e4SLinus Torvalds 
2359869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
23601da177e4SLinus Torvalds 	if (!n)
23611da177e4SLinus Torvalds 		return NULL;
2362869833f2SKOSAKI Motohiro 
2363869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2364869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2365869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2366869833f2SKOSAKI Motohiro 		return NULL;
2367869833f2SKOSAKI Motohiro 	}
2368869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
236942288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2370869833f2SKOSAKI Motohiro 
23711da177e4SLinus Torvalds 	return n;
23721da177e4SLinus Torvalds }
23731da177e4SLinus Torvalds 
23741da177e4SLinus Torvalds /* Replace a policy range. */
23751da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
23761da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
23771da177e4SLinus Torvalds {
2378b22d127aSMel Gorman 	struct sp_node *n;
237942288fe3SMel Gorman 	struct sp_node *n_new = NULL;
238042288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2381b22d127aSMel Gorman 	int ret = 0;
23821da177e4SLinus Torvalds 
238342288fe3SMel Gorman restart:
23844a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
23851da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
23861da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
23871da177e4SLinus Torvalds 	while (n && n->start < end) {
23881da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
23891da177e4SLinus Torvalds 		if (n->start >= start) {
23901da177e4SLinus Torvalds 			if (n->end <= end)
23911da177e4SLinus Torvalds 				sp_delete(sp, n);
23921da177e4SLinus Torvalds 			else
23931da177e4SLinus Torvalds 				n->start = end;
23941da177e4SLinus Torvalds 		} else {
23951da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
23961da177e4SLinus Torvalds 			if (n->end > end) {
239742288fe3SMel Gorman 				if (!n_new)
239842288fe3SMel Gorman 					goto alloc_new;
239942288fe3SMel Gorman 
240042288fe3SMel Gorman 				*mpol_new = *n->policy;
240142288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
24027880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
24031da177e4SLinus Torvalds 				n->end = start;
24045ca39575SHillf Danton 				sp_insert(sp, n_new);
240542288fe3SMel Gorman 				n_new = NULL;
240642288fe3SMel Gorman 				mpol_new = NULL;
24071da177e4SLinus Torvalds 				break;
24081da177e4SLinus Torvalds 			} else
24091da177e4SLinus Torvalds 				n->end = start;
24101da177e4SLinus Torvalds 		}
24111da177e4SLinus Torvalds 		if (!next)
24121da177e4SLinus Torvalds 			break;
24131da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
24141da177e4SLinus Torvalds 	}
24151da177e4SLinus Torvalds 	if (new)
24161da177e4SLinus Torvalds 		sp_insert(sp, new);
24174a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
241842288fe3SMel Gorman 	ret = 0;
241942288fe3SMel Gorman 
242042288fe3SMel Gorman err_out:
242142288fe3SMel Gorman 	if (mpol_new)
242242288fe3SMel Gorman 		mpol_put(mpol_new);
242342288fe3SMel Gorman 	if (n_new)
242442288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
242542288fe3SMel Gorman 
2426b22d127aSMel Gorman 	return ret;
242742288fe3SMel Gorman 
242842288fe3SMel Gorman alloc_new:
24294a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
243042288fe3SMel Gorman 	ret = -ENOMEM;
243142288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
243242288fe3SMel Gorman 	if (!n_new)
243342288fe3SMel Gorman 		goto err_out;
243442288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
243542288fe3SMel Gorman 	if (!mpol_new)
243642288fe3SMel Gorman 		goto err_out;
243742288fe3SMel Gorman 	goto restart;
24381da177e4SLinus Torvalds }
24391da177e4SLinus Torvalds 
244071fe804bSLee Schermerhorn /**
244171fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
244271fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
244371fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
244471fe804bSLee Schermerhorn  *
244571fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
244671fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
244771fe804bSLee Schermerhorn  * This must be released on exit.
24484bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
244971fe804bSLee Schermerhorn  */
245071fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24517339ff83SRobin Holt {
245258568d2aSMiao Xie 	int ret;
245358568d2aSMiao Xie 
245471fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
24554a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
24567339ff83SRobin Holt 
245771fe804bSLee Schermerhorn 	if (mpol) {
24587339ff83SRobin Holt 		struct vm_area_struct pvma;
245971fe804bSLee Schermerhorn 		struct mempolicy *new;
24604bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
24617339ff83SRobin Holt 
24624bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
24635c0c1654SLee Schermerhorn 			goto put_mpol;
246471fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
246571fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
246615d77835SLee Schermerhorn 		if (IS_ERR(new))
24670cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
246858568d2aSMiao Xie 
246958568d2aSMiao Xie 		task_lock(current);
24704bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
247158568d2aSMiao Xie 		task_unlock(current);
247215d77835SLee Schermerhorn 		if (ret)
24735c0c1654SLee Schermerhorn 			goto put_new;
247471fe804bSLee Schermerhorn 
247571fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
24767339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
247771fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
247871fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
247915d77835SLee Schermerhorn 
24805c0c1654SLee Schermerhorn put_new:
248171fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
24820cae3457SDan Carpenter free_scratch:
24834bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
24845c0c1654SLee Schermerhorn put_mpol:
24855c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
24867339ff83SRobin Holt 	}
24877339ff83SRobin Holt }
24887339ff83SRobin Holt 
24891da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
24901da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
24911da177e4SLinus Torvalds {
24921da177e4SLinus Torvalds 	int err;
24931da177e4SLinus Torvalds 	struct sp_node *new = NULL;
24941da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
24951da177e4SLinus Torvalds 
2496028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
24971da177e4SLinus Torvalds 		 vma->vm_pgoff,
249845c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2499028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
250000ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
25011da177e4SLinus Torvalds 
25021da177e4SLinus Torvalds 	if (npol) {
25031da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
25041da177e4SLinus Torvalds 		if (!new)
25051da177e4SLinus Torvalds 			return -ENOMEM;
25061da177e4SLinus Torvalds 	}
25071da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
25081da177e4SLinus Torvalds 	if (err && new)
250963f74ca2SKOSAKI Motohiro 		sp_free(new);
25101da177e4SLinus Torvalds 	return err;
25111da177e4SLinus Torvalds }
25121da177e4SLinus Torvalds 
25131da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
25141da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
25151da177e4SLinus Torvalds {
25161da177e4SLinus Torvalds 	struct sp_node *n;
25171da177e4SLinus Torvalds 	struct rb_node *next;
25181da177e4SLinus Torvalds 
25191da177e4SLinus Torvalds 	if (!p->root.rb_node)
25201da177e4SLinus Torvalds 		return;
25214a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
25221da177e4SLinus Torvalds 	next = rb_first(&p->root);
25231da177e4SLinus Torvalds 	while (next) {
25241da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25251da177e4SLinus Torvalds 		next = rb_next(&n->nd);
252663f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
25271da177e4SLinus Torvalds 	}
25284a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
25291da177e4SLinus Torvalds }
25301da177e4SLinus Torvalds 
25311a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2532c297663cSMel Gorman static int __initdata numabalancing_override;
25331a687c2eSMel Gorman 
25341a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
25351a687c2eSMel Gorman {
25361a687c2eSMel Gorman 	bool numabalancing_default = false;
25371a687c2eSMel Gorman 
25381a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
25391a687c2eSMel Gorman 		numabalancing_default = true;
25401a687c2eSMel Gorman 
2541c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2542c297663cSMel Gorman 	if (numabalancing_override)
2543c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2544c297663cSMel Gorman 
2545b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2546756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2547c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
25481a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
25491a687c2eSMel Gorman 	}
25501a687c2eSMel Gorman }
25511a687c2eSMel Gorman 
25521a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
25531a687c2eSMel Gorman {
25541a687c2eSMel Gorman 	int ret = 0;
25551a687c2eSMel Gorman 	if (!str)
25561a687c2eSMel Gorman 		goto out;
25571a687c2eSMel Gorman 
25581a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2559c297663cSMel Gorman 		numabalancing_override = 1;
25601a687c2eSMel Gorman 		ret = 1;
25611a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2562c297663cSMel Gorman 		numabalancing_override = -1;
25631a687c2eSMel Gorman 		ret = 1;
25641a687c2eSMel Gorman 	}
25651a687c2eSMel Gorman out:
25661a687c2eSMel Gorman 	if (!ret)
25674a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
25681a687c2eSMel Gorman 
25691a687c2eSMel Gorman 	return ret;
25701a687c2eSMel Gorman }
25711a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
25721a687c2eSMel Gorman #else
25731a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
25741a687c2eSMel Gorman {
25751a687c2eSMel Gorman }
25761a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
25771a687c2eSMel Gorman 
25781da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
25791da177e4SLinus Torvalds void __init numa_policy_init(void)
25801da177e4SLinus Torvalds {
2581b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2582b71636e2SPaul Mundt 	unsigned long largest = 0;
2583b71636e2SPaul Mundt 	int nid, prefer = 0;
2584b71636e2SPaul Mundt 
25851da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
25861da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
258720c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
25881da177e4SLinus Torvalds 
25891da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
25901da177e4SLinus Torvalds 				     sizeof(struct sp_node),
259120c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
25921da177e4SLinus Torvalds 
25935606e387SMel Gorman 	for_each_node(nid) {
25945606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
25955606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
25965606e387SMel Gorman 			.mode = MPOL_PREFERRED,
25975606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
25985606e387SMel Gorman 			.v = { .preferred_node = nid, },
25995606e387SMel Gorman 		};
26005606e387SMel Gorman 	}
26015606e387SMel Gorman 
2602b71636e2SPaul Mundt 	/*
2603b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2604b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2605b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2606b71636e2SPaul Mundt 	 */
2607b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
260801f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2609b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
26101da177e4SLinus Torvalds 
2611b71636e2SPaul Mundt 		/* Preserve the largest node */
2612b71636e2SPaul Mundt 		if (largest < total_pages) {
2613b71636e2SPaul Mundt 			largest = total_pages;
2614b71636e2SPaul Mundt 			prefer = nid;
2615b71636e2SPaul Mundt 		}
2616b71636e2SPaul Mundt 
2617b71636e2SPaul Mundt 		/* Interleave this node? */
2618b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2619b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2620b71636e2SPaul Mundt 	}
2621b71636e2SPaul Mundt 
2622b71636e2SPaul Mundt 	/* All too small, use the largest */
2623b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2624b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2625b71636e2SPaul Mundt 
2626028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2627b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
26281a687c2eSMel Gorman 
26291a687c2eSMel Gorman 	check_numabalancing_enable();
26301da177e4SLinus Torvalds }
26311da177e4SLinus Torvalds 
26328bccd85fSChristoph Lameter /* Reset policy of current process to default */
26331da177e4SLinus Torvalds void numa_default_policy(void)
26341da177e4SLinus Torvalds {
2635028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
26361da177e4SLinus Torvalds }
263768860ec1SPaul Jackson 
26384225399aSPaul Jackson /*
2639095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2640095f1fc4SLee Schermerhorn  */
2641095f1fc4SLee Schermerhorn 
2642095f1fc4SLee Schermerhorn /*
2643f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
26441a75a6c8SChristoph Lameter  */
2645345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2646345ace9cSLee Schermerhorn {
2647345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2648345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2649345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2650345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2651d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2652345ace9cSLee Schermerhorn };
26531a75a6c8SChristoph Lameter 
2654095f1fc4SLee Schermerhorn 
2655095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2656095f1fc4SLee Schermerhorn /**
2657f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2658095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
265971fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2660095f1fc4SLee Schermerhorn  *
2661095f1fc4SLee Schermerhorn  * Format of input:
2662095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2663095f1fc4SLee Schermerhorn  *
266471fe804bSLee Schermerhorn  * On success, returns 0, else 1
2665095f1fc4SLee Schermerhorn  */
2666a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2667095f1fc4SLee Schermerhorn {
266871fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2669b4652e84SLee Schermerhorn 	unsigned short mode;
2670f2a07f40SHugh Dickins 	unsigned short mode_flags;
267171fe804bSLee Schermerhorn 	nodemask_t nodes;
2672095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2673095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2674095f1fc4SLee Schermerhorn 	int err = 1;
2675095f1fc4SLee Schermerhorn 
2676095f1fc4SLee Schermerhorn 	if (nodelist) {
2677095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2678095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
267971fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2680095f1fc4SLee Schermerhorn 			goto out;
268101f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2682095f1fc4SLee Schermerhorn 			goto out;
268371fe804bSLee Schermerhorn 	} else
268471fe804bSLee Schermerhorn 		nodes_clear(nodes);
268571fe804bSLee Schermerhorn 
2686095f1fc4SLee Schermerhorn 	if (flags)
2687095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2688095f1fc4SLee Schermerhorn 
2689479e2802SPeter Zijlstra 	for (mode = 0; mode < MPOL_MAX; mode++) {
2690345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2691095f1fc4SLee Schermerhorn 			break;
2692095f1fc4SLee Schermerhorn 		}
2693095f1fc4SLee Schermerhorn 	}
2694a720094dSMel Gorman 	if (mode >= MPOL_MAX)
2695095f1fc4SLee Schermerhorn 		goto out;
2696095f1fc4SLee Schermerhorn 
269771fe804bSLee Schermerhorn 	switch (mode) {
2698095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
269971fe804bSLee Schermerhorn 		/*
270071fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
270171fe804bSLee Schermerhorn 		 */
2702095f1fc4SLee Schermerhorn 		if (nodelist) {
2703095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2704095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2705095f1fc4SLee Schermerhorn 				rest++;
2706926f2ae0SKOSAKI Motohiro 			if (*rest)
2707926f2ae0SKOSAKI Motohiro 				goto out;
2708095f1fc4SLee Schermerhorn 		}
2709095f1fc4SLee Schermerhorn 		break;
2710095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2711095f1fc4SLee Schermerhorn 		/*
2712095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2713095f1fc4SLee Schermerhorn 		 */
2714095f1fc4SLee Schermerhorn 		if (!nodelist)
271501f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
27163f226aa1SLee Schermerhorn 		break;
271771fe804bSLee Schermerhorn 	case MPOL_LOCAL:
27183f226aa1SLee Schermerhorn 		/*
271971fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
27203f226aa1SLee Schermerhorn 		 */
272171fe804bSLee Schermerhorn 		if (nodelist)
27223f226aa1SLee Schermerhorn 			goto out;
272371fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
27243f226aa1SLee Schermerhorn 		break;
2725413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2726413b43deSRavikiran G Thirumalai 		/*
2727413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2728413b43deSRavikiran G Thirumalai 		 */
2729413b43deSRavikiran G Thirumalai 		if (!nodelist)
2730413b43deSRavikiran G Thirumalai 			err = 0;
2731413b43deSRavikiran G Thirumalai 		goto out;
2732d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
273371fe804bSLee Schermerhorn 		/*
2734d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
273571fe804bSLee Schermerhorn 		 */
2736d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2737d69b2e63SKOSAKI Motohiro 			goto out;
2738095f1fc4SLee Schermerhorn 	}
2739095f1fc4SLee Schermerhorn 
274071fe804bSLee Schermerhorn 	mode_flags = 0;
2741095f1fc4SLee Schermerhorn 	if (flags) {
2742095f1fc4SLee Schermerhorn 		/*
2743095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2744095f1fc4SLee Schermerhorn 		 * mode flags.
2745095f1fc4SLee Schermerhorn 		 */
2746095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
274771fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2748095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
274971fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2750095f1fc4SLee Schermerhorn 		else
2751926f2ae0SKOSAKI Motohiro 			goto out;
2752095f1fc4SLee Schermerhorn 	}
275371fe804bSLee Schermerhorn 
275471fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
275571fe804bSLee Schermerhorn 	if (IS_ERR(new))
2756926f2ae0SKOSAKI Motohiro 		goto out;
2757926f2ae0SKOSAKI Motohiro 
2758f2a07f40SHugh Dickins 	/*
2759f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2760f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2761f2a07f40SHugh Dickins 	 */
2762f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2763f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2764f2a07f40SHugh Dickins 	else if (nodelist)
2765f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2766f2a07f40SHugh Dickins 	else
2767f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2768f2a07f40SHugh Dickins 
2769f2a07f40SHugh Dickins 	/*
2770f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2771f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2772f2a07f40SHugh Dickins 	 */
2773e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2774f2a07f40SHugh Dickins 
2775926f2ae0SKOSAKI Motohiro 	err = 0;
277671fe804bSLee Schermerhorn 
2777095f1fc4SLee Schermerhorn out:
2778095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2779095f1fc4SLee Schermerhorn 	if (nodelist)
2780095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2781095f1fc4SLee Schermerhorn 	if (flags)
2782095f1fc4SLee Schermerhorn 		*--flags = '=';
278371fe804bSLee Schermerhorn 	if (!err)
278471fe804bSLee Schermerhorn 		*mpol = new;
2785095f1fc4SLee Schermerhorn 	return err;
2786095f1fc4SLee Schermerhorn }
2787095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2788095f1fc4SLee Schermerhorn 
278971fe804bSLee Schermerhorn /**
279071fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
279171fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
279271fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
279371fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
279471fe804bSLee Schermerhorn  *
2795948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2796948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2797948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
27981a75a6c8SChristoph Lameter  */
2799948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
28001a75a6c8SChristoph Lameter {
28011a75a6c8SChristoph Lameter 	char *p = buffer;
2802948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2803948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2804948927eeSDavid Rientjes 	unsigned short flags = 0;
28051a75a6c8SChristoph Lameter 
28068790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2807bea904d5SLee Schermerhorn 		mode = pol->mode;
2808948927eeSDavid Rientjes 		flags = pol->flags;
2809948927eeSDavid Rientjes 	}
2810bea904d5SLee Schermerhorn 
28111a75a6c8SChristoph Lameter 	switch (mode) {
28121a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
28131a75a6c8SChristoph Lameter 		break;
28141a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2815fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2816f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
281753f2556bSLee Schermerhorn 		else
2818fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
28191a75a6c8SChristoph Lameter 		break;
28201a75a6c8SChristoph Lameter 	case MPOL_BIND:
28211a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
28221a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
28231a75a6c8SChristoph Lameter 		break;
28241a75a6c8SChristoph Lameter 	default:
2825948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2826948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2827948927eeSDavid Rientjes 		return;
28281a75a6c8SChristoph Lameter 	}
28291a75a6c8SChristoph Lameter 
2830b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
28311a75a6c8SChristoph Lameter 
2832fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2833948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2834f5b087b5SDavid Rientjes 
28352291990aSLee Schermerhorn 		/*
28362291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
28372291990aSLee Schermerhorn 		 */
2838f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
28392291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
28402291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
28412291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2842f5b087b5SDavid Rientjes 	}
2843f5b087b5SDavid Rientjes 
28449e763e0fSTejun Heo 	if (!nodes_empty(nodes))
28459e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
28469e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
28471a75a6c8SChristoph Lameter }
2848