xref: /openbmc/linux/mm/mempolicy.c (revision 2c0346a36cc8ac6cb85ab585964590974c84bdf0)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/mm.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
761da177e4SLinus Torvalds #include <linux/nodemask.h>
771da177e4SLinus Torvalds #include <linux/cpuset.h>
781da177e4SLinus Torvalds #include <linux/slab.h>
791da177e4SLinus Torvalds #include <linux/string.h>
80b95f1b31SPaul Gortmaker #include <linux/export.h>
81b488893aSPavel Emelyanov #include <linux/nsproxy.h>
821da177e4SLinus Torvalds #include <linux/interrupt.h>
831da177e4SLinus Torvalds #include <linux/init.h>
841da177e4SLinus Torvalds #include <linux/compat.h>
85dc9aa5b9SChristoph Lameter #include <linux/swap.h>
861a75a6c8SChristoph Lameter #include <linux/seq_file.h>
871a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
88b20a3503SChristoph Lameter #include <linux/migrate.h>
8962b61f61SHugh Dickins #include <linux/ksm.h>
9095a402c3SChristoph Lameter #include <linux/rmap.h>
9186c3a764SDavid Quigley #include <linux/security.h>
92dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
93095f1fc4SLee Schermerhorn #include <linux/ctype.h>
946d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
95b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
96b1de0d13SMitchel Humpherys #include <linux/printk.h>
97dc9aa5b9SChristoph Lameter 
981da177e4SLinus Torvalds #include <asm/tlbflush.h>
991da177e4SLinus Torvalds #include <asm/uaccess.h>
100778d3b0fSMichal Hocko #include <linux/random.h>
1011da177e4SLinus Torvalds 
10262695a84SNick Piggin #include "internal.h"
10362695a84SNick Piggin 
10438e35860SChristoph Lameter /* Internal flags */
105dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10638e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
107dc9aa5b9SChristoph Lameter 
108fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
109fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1101da177e4SLinus Torvalds 
1111da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1121da177e4SLinus Torvalds    policied. */
1136267276fSChristoph Lameter enum zone_type policy_zone = 0;
1141da177e4SLinus Torvalds 
115bea904d5SLee Schermerhorn /*
116bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
117bea904d5SLee Schermerhorn  */
118e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1191da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
120bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
121fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1221da177e4SLinus Torvalds };
1231da177e4SLinus Torvalds 
1245606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1255606e387SMel Gorman 
12674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1275606e387SMel Gorman {
1285606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
129f15ca78eSOleg Nesterov 	int node;
1305606e387SMel Gorman 
131f15ca78eSOleg Nesterov 	if (pol)
132f15ca78eSOleg Nesterov 		return pol;
1335606e387SMel Gorman 
134f15ca78eSOleg Nesterov 	node = numa_node_id();
1351da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1361da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
137f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
138f15ca78eSOleg Nesterov 		if (pol->mode)
139f15ca78eSOleg Nesterov 			return pol;
1401da6f0e1SJianguo Wu 	}
1415606e387SMel Gorman 
142f15ca78eSOleg Nesterov 	return &default_policy;
1435606e387SMel Gorman }
1445606e387SMel Gorman 
14537012946SDavid Rientjes static const struct mempolicy_operations {
14637012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
147708c1bbcSMiao Xie 	/*
148708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
149708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
150708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
151708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
152708c1bbcSMiao Xie 	 * page.
153708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
154708c1bbcSMiao Xie 	 * rebind directly.
155708c1bbcSMiao Xie 	 *
156708c1bbcSMiao Xie 	 * step:
157708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
158708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
159708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
160708c1bbcSMiao Xie 	 */
161708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162708c1bbcSMiao Xie 			enum mpol_rebind_step step);
16337012946SDavid Rientjes } mpol_ops[MPOL_MAX];
16437012946SDavid Rientjes 
16519770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
16637012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask)
1671da177e4SLinus Torvalds {
168d3eb1570SLai Jiangshan 	return nodes_intersects(*nodemask, node_states[N_MEMORY]);
1691da177e4SLinus Torvalds }
1701da177e4SLinus Torvalds 
171f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
172f5b087b5SDavid Rientjes {
1736d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1744c50bc01SDavid Rientjes }
1754c50bc01SDavid Rientjes 
1764c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1774c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1784c50bc01SDavid Rientjes {
1794c50bc01SDavid Rientjes 	nodemask_t tmp;
1804c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1814c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
182f5b087b5SDavid Rientjes }
183f5b087b5SDavid Rientjes 
18437012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
18537012946SDavid Rientjes {
18637012946SDavid Rientjes 	if (nodes_empty(*nodes))
18737012946SDavid Rientjes 		return -EINVAL;
18837012946SDavid Rientjes 	pol->v.nodes = *nodes;
18937012946SDavid Rientjes 	return 0;
19037012946SDavid Rientjes }
19137012946SDavid Rientjes 
19237012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
19337012946SDavid Rientjes {
19437012946SDavid Rientjes 	if (!nodes)
195fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
19637012946SDavid Rientjes 	else if (nodes_empty(*nodes))
19737012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
19837012946SDavid Rientjes 	else
19937012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
20037012946SDavid Rientjes 	return 0;
20137012946SDavid Rientjes }
20237012946SDavid Rientjes 
20337012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
20437012946SDavid Rientjes {
20537012946SDavid Rientjes 	if (!is_valid_nodemask(nodes))
20637012946SDavid Rientjes 		return -EINVAL;
20737012946SDavid Rientjes 	pol->v.nodes = *nodes;
20837012946SDavid Rientjes 	return 0;
20937012946SDavid Rientjes }
21037012946SDavid Rientjes 
21158568d2aSMiao Xie /*
21258568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
21358568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
21458568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
21558568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
21658568d2aSMiao Xie  *
21758568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
21858568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
21958568d2aSMiao Xie  */
2204bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2214bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
22258568d2aSMiao Xie {
22358568d2aSMiao Xie 	int ret;
22458568d2aSMiao Xie 
22558568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
22658568d2aSMiao Xie 	if (pol == NULL)
22758568d2aSMiao Xie 		return 0;
22801f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2294bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
23001f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
23158568d2aSMiao Xie 
23258568d2aSMiao Xie 	VM_BUG_ON(!nodes);
23358568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
23458568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
23558568d2aSMiao Xie 	else {
23658568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2374bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
23858568d2aSMiao Xie 		else
2394bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2404bfc4495SKAMEZAWA Hiroyuki 
24158568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
24258568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
24358568d2aSMiao Xie 		else
24458568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
24558568d2aSMiao Xie 						cpuset_current_mems_allowed;
24658568d2aSMiao Xie 	}
24758568d2aSMiao Xie 
2484bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2494bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2504bfc4495SKAMEZAWA Hiroyuki 	else
2514bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
25258568d2aSMiao Xie 	return ret;
25358568d2aSMiao Xie }
25458568d2aSMiao Xie 
25558568d2aSMiao Xie /*
25658568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25758568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
25858568d2aSMiao Xie  */
259028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
260028fec41SDavid Rientjes 				  nodemask_t *nodes)
2611da177e4SLinus Torvalds {
2621da177e4SLinus Torvalds 	struct mempolicy *policy;
2631da177e4SLinus Torvalds 
264028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26500ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
266140d5a49SPaul Mundt 
2673e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2683e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
26937012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
270d3a71033SLee Schermerhorn 		return NULL;
27137012946SDavid Rientjes 	}
2723e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2733e1f0645SDavid Rientjes 
2743e1f0645SDavid Rientjes 	/*
2753e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2763e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2773e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2783e1f0645SDavid Rientjes 	 */
2793e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2803e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2813e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2823e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2833e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2843e1f0645SDavid Rientjes 		}
285479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
286479e2802SPeter Zijlstra 		if (!nodes_empty(*nodes))
287479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
288479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2893e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2903e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2911da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2921da177e4SLinus Torvalds 	if (!policy)
2931da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2941da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
29545c4745aSLee Schermerhorn 	policy->mode = mode;
29637012946SDavid Rientjes 	policy->flags = flags;
2973e1f0645SDavid Rientjes 
29837012946SDavid Rientjes 	return policy;
29937012946SDavid Rientjes }
30037012946SDavid Rientjes 
30152cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30252cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30352cd3b07SLee Schermerhorn {
30452cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
30552cd3b07SLee Schermerhorn 		return;
30652cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
30752cd3b07SLee Schermerhorn }
30852cd3b07SLee Schermerhorn 
309708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
310708c1bbcSMiao Xie 				enum mpol_rebind_step step)
31137012946SDavid Rientjes {
31237012946SDavid Rientjes }
31337012946SDavid Rientjes 
314708c1bbcSMiao Xie /*
315708c1bbcSMiao Xie  * step:
316708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
317708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
318708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
319708c1bbcSMiao Xie  */
320708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
321708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3221d0d2680SDavid Rientjes {
3231d0d2680SDavid Rientjes 	nodemask_t tmp;
3241d0d2680SDavid Rientjes 
32537012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32637012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32737012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32837012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3291d0d2680SDavid Rientjes 	else {
330708c1bbcSMiao Xie 		/*
331708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
332708c1bbcSMiao Xie 		 * result
333708c1bbcSMiao Xie 		 */
334708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
335708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
336708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
337708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
338708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
339708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
34037012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
341708c1bbcSMiao Xie 		} else
342708c1bbcSMiao Xie 			BUG();
3431d0d2680SDavid Rientjes 	}
34437012946SDavid Rientjes 
345708c1bbcSMiao Xie 	if (nodes_empty(tmp))
346708c1bbcSMiao Xie 		tmp = *nodes;
347708c1bbcSMiao Xie 
348708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
349708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
350708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3511d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
352708c1bbcSMiao Xie 	else
353708c1bbcSMiao Xie 		BUG();
354708c1bbcSMiao Xie 
3551d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
3561d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
3571d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3581d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
3591d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3601d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3611d0d2680SDavid Rientjes 	}
36237012946SDavid Rientjes }
36337012946SDavid Rientjes 
36437012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
365708c1bbcSMiao Xie 				  const nodemask_t *nodes,
366708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
36737012946SDavid Rientjes {
36837012946SDavid Rientjes 	nodemask_t tmp;
36937012946SDavid Rientjes 
37037012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3711d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3721d0d2680SDavid Rientjes 
373fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3741d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
375fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
376fc36b8d3SLee Schermerhorn 		} else
377fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
37837012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
37937012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3801d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
381fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3821d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
38337012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
38437012946SDavid Rientjes 						   *nodes);
38537012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3861d0d2680SDavid Rientjes 	}
3871d0d2680SDavid Rientjes }
38837012946SDavid Rientjes 
389708c1bbcSMiao Xie /*
390708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
391708c1bbcSMiao Xie  *
392708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
393708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
394708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
395708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
396708c1bbcSMiao Xie  * page.
397708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
398708c1bbcSMiao Xie  * rebind directly.
399708c1bbcSMiao Xie  *
400708c1bbcSMiao Xie  * step:
401708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
402708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
403708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
404708c1bbcSMiao Xie  */
405708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
406708c1bbcSMiao Xie 				enum mpol_rebind_step step)
40737012946SDavid Rientjes {
40837012946SDavid Rientjes 	if (!pol)
40937012946SDavid Rientjes 		return;
41089c522c7SWang Sheng-Hui 	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
41137012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
41237012946SDavid Rientjes 		return;
413708c1bbcSMiao Xie 
414708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
415708c1bbcSMiao Xie 		return;
416708c1bbcSMiao Xie 
417708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
418708c1bbcSMiao Xie 		BUG();
419708c1bbcSMiao Xie 
420708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
421708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
422708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
423708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
424708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
425708c1bbcSMiao Xie 		BUG();
426708c1bbcSMiao Xie 
427708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4281d0d2680SDavid Rientjes }
4291d0d2680SDavid Rientjes 
4301d0d2680SDavid Rientjes /*
4311d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4321d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
43358568d2aSMiao Xie  *
43458568d2aSMiao Xie  * Called with task's alloc_lock held.
4351d0d2680SDavid Rientjes  */
4361d0d2680SDavid Rientjes 
437708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
438708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4391d0d2680SDavid Rientjes {
440708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4411d0d2680SDavid Rientjes }
4421d0d2680SDavid Rientjes 
4431d0d2680SDavid Rientjes /*
4441d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4451d0d2680SDavid Rientjes  *
4461d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4471d0d2680SDavid Rientjes  */
4481d0d2680SDavid Rientjes 
4491d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4501d0d2680SDavid Rientjes {
4511d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4521d0d2680SDavid Rientjes 
4531d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4541d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
455708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4561d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4571d0d2680SDavid Rientjes }
4581d0d2680SDavid Rientjes 
45937012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
46037012946SDavid Rientjes 	[MPOL_DEFAULT] = {
46137012946SDavid Rientjes 		.rebind = mpol_rebind_default,
46237012946SDavid Rientjes 	},
46337012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
46437012946SDavid Rientjes 		.create = mpol_new_interleave,
46537012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46637012946SDavid Rientjes 	},
46737012946SDavid Rientjes 	[MPOL_PREFERRED] = {
46837012946SDavid Rientjes 		.create = mpol_new_preferred,
46937012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
47037012946SDavid Rientjes 	},
47137012946SDavid Rientjes 	[MPOL_BIND] = {
47237012946SDavid Rientjes 		.create = mpol_new_bind,
47337012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
47437012946SDavid Rientjes 	},
47537012946SDavid Rientjes };
47637012946SDavid Rientjes 
477fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
478fc301289SChristoph Lameter 				unsigned long flags);
4791a75a6c8SChristoph Lameter 
48098094945SNaoya Horiguchi /*
48198094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
48298094945SNaoya Horiguchi  * and move them to the pagelist if they do.
48398094945SNaoya Horiguchi  */
48498094945SNaoya Horiguchi static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
485dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
486dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
48738e35860SChristoph Lameter 		void *private)
4881da177e4SLinus Torvalds {
48991612e0dSHugh Dickins 	pte_t *orig_pte;
49091612e0dSHugh Dickins 	pte_t *pte;
491705e87c0SHugh Dickins 	spinlock_t *ptl;
492941150a3SHugh Dickins 
493705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
49491612e0dSHugh Dickins 	do {
4956aab341eSLinus Torvalds 		struct page *page;
49625ba77c1SAndy Whitcroft 		int nid;
49791612e0dSHugh Dickins 
49891612e0dSHugh Dickins 		if (!pte_present(*pte))
49991612e0dSHugh Dickins 			continue;
5006aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5016aab341eSLinus Torvalds 		if (!page)
50291612e0dSHugh Dickins 			continue;
503053837fcSNick Piggin 		/*
50462b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
50562b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
506053837fcSNick Piggin 		 */
507b79bc0a0SHugh Dickins 		if (PageReserved(page))
508f4598c8bSChristoph Lameter 			continue;
5096aab341eSLinus Torvalds 		nid = page_to_nid(page);
51038e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
51138e35860SChristoph Lameter 			continue;
51238e35860SChristoph Lameter 
513b1f72d18SStephen Wilson 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
514fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
515dc9aa5b9SChristoph Lameter 		else
5161da177e4SLinus Torvalds 			break;
51791612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
518705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
51991612e0dSHugh Dickins 	return addr != end;
52091612e0dSHugh Dickins }
52191612e0dSHugh Dickins 
52298094945SNaoya Horiguchi static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
52398094945SNaoya Horiguchi 		pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
524e2d8cf40SNaoya Horiguchi 				    void *private)
525e2d8cf40SNaoya Horiguchi {
526e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
527e2d8cf40SNaoya Horiguchi 	int nid;
528e2d8cf40SNaoya Horiguchi 	struct page *page;
529cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
530d4c54919SNaoya Horiguchi 	pte_t entry;
531e2d8cf40SNaoya Horiguchi 
532cb900f41SKirill A. Shutemov 	ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
533d4c54919SNaoya Horiguchi 	entry = huge_ptep_get((pte_t *)pmd);
534d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
535d4c54919SNaoya Horiguchi 		goto unlock;
536d4c54919SNaoya Horiguchi 	page = pte_page(entry);
537e2d8cf40SNaoya Horiguchi 	nid = page_to_nid(page);
538e2d8cf40SNaoya Horiguchi 	if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
539e2d8cf40SNaoya Horiguchi 		goto unlock;
540e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
541e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
542e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
543e2d8cf40SNaoya Horiguchi 		isolate_huge_page(page, private);
544e2d8cf40SNaoya Horiguchi unlock:
545cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
546e2d8cf40SNaoya Horiguchi #else
547e2d8cf40SNaoya Horiguchi 	BUG();
548e2d8cf40SNaoya Horiguchi #endif
549e2d8cf40SNaoya Horiguchi }
550e2d8cf40SNaoya Horiguchi 
55198094945SNaoya Horiguchi static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
552dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
553dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
55438e35860SChristoph Lameter 		void *private)
55591612e0dSHugh Dickins {
55691612e0dSHugh Dickins 	pmd_t *pmd;
55791612e0dSHugh Dickins 	unsigned long next;
55891612e0dSHugh Dickins 
55991612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
56091612e0dSHugh Dickins 	do {
56191612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
562e2d8cf40SNaoya Horiguchi 		if (!pmd_present(*pmd))
563e2d8cf40SNaoya Horiguchi 			continue;
564e2d8cf40SNaoya Horiguchi 		if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
56598094945SNaoya Horiguchi 			queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
566e2d8cf40SNaoya Horiguchi 						flags, private);
567e2d8cf40SNaoya Horiguchi 			continue;
568e2d8cf40SNaoya Horiguchi 		}
569e180377fSKirill A. Shutemov 		split_huge_page_pmd(vma, addr, pmd);
5701a5a9906SAndrea Arcangeli 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
57191612e0dSHugh Dickins 			continue;
57298094945SNaoya Horiguchi 		if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
57338e35860SChristoph Lameter 				    flags, private))
57491612e0dSHugh Dickins 			return -EIO;
57591612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
57691612e0dSHugh Dickins 	return 0;
57791612e0dSHugh Dickins }
57891612e0dSHugh Dickins 
57998094945SNaoya Horiguchi static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
580dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
581dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
58238e35860SChristoph Lameter 		void *private)
58391612e0dSHugh Dickins {
58491612e0dSHugh Dickins 	pud_t *pud;
58591612e0dSHugh Dickins 	unsigned long next;
58691612e0dSHugh Dickins 
58791612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
58891612e0dSHugh Dickins 	do {
58991612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
590e2d8cf40SNaoya Horiguchi 		if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
591e2d8cf40SNaoya Horiguchi 			continue;
59291612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
59391612e0dSHugh Dickins 			continue;
59498094945SNaoya Horiguchi 		if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
59538e35860SChristoph Lameter 				    flags, private))
59691612e0dSHugh Dickins 			return -EIO;
59791612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
59891612e0dSHugh Dickins 	return 0;
59991612e0dSHugh Dickins }
60091612e0dSHugh Dickins 
60198094945SNaoya Horiguchi static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
602dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
603dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
60438e35860SChristoph Lameter 		void *private)
60591612e0dSHugh Dickins {
60691612e0dSHugh Dickins 	pgd_t *pgd;
60791612e0dSHugh Dickins 	unsigned long next;
60891612e0dSHugh Dickins 
609b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
61091612e0dSHugh Dickins 	do {
61191612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
61291612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
61391612e0dSHugh Dickins 			continue;
61498094945SNaoya Horiguchi 		if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
61538e35860SChristoph Lameter 				    flags, private))
61691612e0dSHugh Dickins 			return -EIO;
61791612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
61891612e0dSHugh Dickins 	return 0;
6191da177e4SLinus Torvalds }
6201da177e4SLinus Torvalds 
6215877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
622b24f53a0SLee Schermerhorn /*
6234b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6244b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6254b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6264b10e7d5SMel Gorman  *
6274b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6284b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6294b10e7d5SMel Gorman  * changes to the core.
630b24f53a0SLee Schermerhorn  */
6314b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6324b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
633b24f53a0SLee Schermerhorn {
6344b10e7d5SMel Gorman 	int nr_updated;
635b24f53a0SLee Schermerhorn 
6364b10e7d5SMel Gorman 	nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
63703c5a6e1SMel Gorman 	if (nr_updated)
63803c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
639b24f53a0SLee Schermerhorn 
6404b10e7d5SMel Gorman 	return nr_updated;
641b24f53a0SLee Schermerhorn }
642b24f53a0SLee Schermerhorn #else
643b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
644b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
645b24f53a0SLee Schermerhorn {
646b24f53a0SLee Schermerhorn 	return 0;
647b24f53a0SLee Schermerhorn }
6485877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
649b24f53a0SLee Schermerhorn 
650dc9aa5b9SChristoph Lameter /*
65198094945SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
65298094945SNaoya Horiguchi  *
65398094945SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
65498094945SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
65598094945SNaoya Horiguchi  * passed via @private.)
656dc9aa5b9SChristoph Lameter  */
657d05f0cdcSHugh Dickins static int
65898094945SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
65938e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
6601da177e4SLinus Torvalds {
661d05f0cdcSHugh Dickins 	int err = 0;
662d05f0cdcSHugh Dickins 	struct vm_area_struct *vma, *prev;
6631da177e4SLinus Torvalds 
664d05f0cdcSHugh Dickins 	vma = find_vma(mm, start);
665d05f0cdcSHugh Dickins 	if (!vma)
666d05f0cdcSHugh Dickins 		return -EFAULT;
6671da177e4SLinus Torvalds 	prev = NULL;
668d05f0cdcSHugh Dickins 	for (; vma && vma->vm_start < end; vma = vma->vm_next) {
6695b952b3cSAndi Kleen 		unsigned long endvma = vma->vm_end;
670dc9aa5b9SChristoph Lameter 
6715b952b3cSAndi Kleen 		if (endvma > end)
6725b952b3cSAndi Kleen 			endvma = end;
6735b952b3cSAndi Kleen 		if (vma->vm_start > start)
6745b952b3cSAndi Kleen 			start = vma->vm_start;
675b24f53a0SLee Schermerhorn 
676b24f53a0SLee Schermerhorn 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
677b24f53a0SLee Schermerhorn 			if (!vma->vm_next && vma->vm_end < end)
678d05f0cdcSHugh Dickins 				return -EFAULT;
679b24f53a0SLee Schermerhorn 			if (prev && prev->vm_end < vma->vm_start)
680d05f0cdcSHugh Dickins 				return -EFAULT;
681b24f53a0SLee Schermerhorn 		}
682b24f53a0SLee Schermerhorn 
683b24f53a0SLee Schermerhorn 		if (flags & MPOL_MF_LAZY) {
684*2c0346a3SMel Gorman 			/* Similar to task_numa_work, skip inaccessible VMAs */
685*2c0346a3SMel Gorman 			if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
686b24f53a0SLee Schermerhorn 				change_prot_numa(vma, start, endvma);
687b24f53a0SLee Schermerhorn 			goto next;
688b24f53a0SLee Schermerhorn 		}
689b24f53a0SLee Schermerhorn 
690b24f53a0SLee Schermerhorn 		if ((flags & MPOL_MF_STRICT) ||
691b24f53a0SLee Schermerhorn 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
692b24f53a0SLee Schermerhorn 		      vma_migratable(vma))) {
693b24f53a0SLee Schermerhorn 
69498094945SNaoya Horiguchi 			err = queue_pages_pgd_range(vma, start, endvma, nodes,
69538e35860SChristoph Lameter 						flags, private);
696d05f0cdcSHugh Dickins 			if (err)
6971da177e4SLinus Torvalds 				break;
6981da177e4SLinus Torvalds 		}
699b24f53a0SLee Schermerhorn next:
7001da177e4SLinus Torvalds 		prev = vma;
7011da177e4SLinus Torvalds 	}
702d05f0cdcSHugh Dickins 	return err;
7031da177e4SLinus Torvalds }
7041da177e4SLinus Torvalds 
705869833f2SKOSAKI Motohiro /*
706869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
707869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
708869833f2SKOSAKI Motohiro  */
709869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
710869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7118d34694cSKOSAKI Motohiro {
712869833f2SKOSAKI Motohiro 	int err;
713869833f2SKOSAKI Motohiro 	struct mempolicy *old;
714869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7158d34694cSKOSAKI Motohiro 
7168d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7178d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7188d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7198d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7208d34694cSKOSAKI Motohiro 
721869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
722869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
723869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
724869833f2SKOSAKI Motohiro 
725869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7268d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
727869833f2SKOSAKI Motohiro 		if (err)
728869833f2SKOSAKI Motohiro 			goto err_out;
7298d34694cSKOSAKI Motohiro 	}
730869833f2SKOSAKI Motohiro 
731869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
732869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
733869833f2SKOSAKI Motohiro 	mpol_put(old);
734869833f2SKOSAKI Motohiro 
735869833f2SKOSAKI Motohiro 	return 0;
736869833f2SKOSAKI Motohiro  err_out:
737869833f2SKOSAKI Motohiro 	mpol_put(new);
7388d34694cSKOSAKI Motohiro 	return err;
7398d34694cSKOSAKI Motohiro }
7408d34694cSKOSAKI Motohiro 
7411da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7429d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7439d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7441da177e4SLinus Torvalds {
7451da177e4SLinus Torvalds 	struct vm_area_struct *next;
7469d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7479d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7489d8cebd4SKOSAKI Motohiro 	int err = 0;
749e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7509d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7519d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7521da177e4SLinus Torvalds 
753097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7549d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7559d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7569d8cebd4SKOSAKI Motohiro 
757097d5910SLinus Torvalds 	prev = vma->vm_prev;
758e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
759e26a5114SKOSAKI Motohiro 		prev = vma;
760e26a5114SKOSAKI Motohiro 
7619d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7621da177e4SLinus Torvalds 		next = vma->vm_next;
7639d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7649d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7659d8cebd4SKOSAKI Motohiro 
766e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
767e26a5114SKOSAKI Motohiro 			continue;
768e26a5114SKOSAKI Motohiro 
769e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
770e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7719d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
772e26a5114SKOSAKI Motohiro 				  vma->anon_vma, vma->vm_file, pgoff,
7738aacc9f5SCaspar Zhang 				  new_pol);
7749d8cebd4SKOSAKI Motohiro 		if (prev) {
7759d8cebd4SKOSAKI Motohiro 			vma = prev;
7769d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7773964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7789d8cebd4SKOSAKI Motohiro 				continue;
7793964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7803964acd0SOleg Nesterov 			goto replace;
7811da177e4SLinus Torvalds 		}
7829d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7839d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7849d8cebd4SKOSAKI Motohiro 			if (err)
7859d8cebd4SKOSAKI Motohiro 				goto out;
7869d8cebd4SKOSAKI Motohiro 		}
7879d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7889d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7899d8cebd4SKOSAKI Motohiro 			if (err)
7909d8cebd4SKOSAKI Motohiro 				goto out;
7919d8cebd4SKOSAKI Motohiro 		}
7923964acd0SOleg Nesterov  replace:
793869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7949d8cebd4SKOSAKI Motohiro 		if (err)
7959d8cebd4SKOSAKI Motohiro 			goto out;
7969d8cebd4SKOSAKI Motohiro 	}
7979d8cebd4SKOSAKI Motohiro 
7989d8cebd4SKOSAKI Motohiro  out:
7991da177e4SLinus Torvalds 	return err;
8001da177e4SLinus Torvalds }
8011da177e4SLinus Torvalds 
8021da177e4SLinus Torvalds /* Set the process memory policy */
803028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
804028fec41SDavid Rientjes 			     nodemask_t *nodes)
8051da177e4SLinus Torvalds {
80658568d2aSMiao Xie 	struct mempolicy *new, *old;
8074bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
80858568d2aSMiao Xie 	int ret;
8091da177e4SLinus Torvalds 
8104bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8114bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
812f4e53d91SLee Schermerhorn 
8134bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8144bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8154bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8164bfc4495SKAMEZAWA Hiroyuki 		goto out;
8174bfc4495SKAMEZAWA Hiroyuki 	}
8182c7c3a7dSOleg Nesterov 
81958568d2aSMiao Xie 	task_lock(current);
8204bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
82158568d2aSMiao Xie 	if (ret) {
82258568d2aSMiao Xie 		task_unlock(current);
82358568d2aSMiao Xie 		mpol_put(new);
8244bfc4495SKAMEZAWA Hiroyuki 		goto out;
82558568d2aSMiao Xie 	}
82658568d2aSMiao Xie 	old = current->mempolicy;
8271da177e4SLinus Torvalds 	current->mempolicy = new;
82845c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
829f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
830dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
83158568d2aSMiao Xie 	task_unlock(current);
83258568d2aSMiao Xie 	mpol_put(old);
8334bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8344bfc4495SKAMEZAWA Hiroyuki out:
8354bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8364bfc4495SKAMEZAWA Hiroyuki 	return ret;
8371da177e4SLinus Torvalds }
8381da177e4SLinus Torvalds 
839bea904d5SLee Schermerhorn /*
840bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
84158568d2aSMiao Xie  *
84258568d2aSMiao Xie  * Called with task's alloc_lock held
843bea904d5SLee Schermerhorn  */
844bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8451da177e4SLinus Torvalds {
846dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
847bea904d5SLee Schermerhorn 	if (p == &default_policy)
848bea904d5SLee Schermerhorn 		return;
849bea904d5SLee Schermerhorn 
85045c4745aSLee Schermerhorn 	switch (p->mode) {
85119770b32SMel Gorman 	case MPOL_BIND:
85219770b32SMel Gorman 		/* Fall through */
8531da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
854dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8551da177e4SLinus Torvalds 		break;
8561da177e4SLinus Torvalds 	case MPOL_PREFERRED:
857fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
858dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
85953f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8601da177e4SLinus Torvalds 		break;
8611da177e4SLinus Torvalds 	default:
8621da177e4SLinus Torvalds 		BUG();
8631da177e4SLinus Torvalds 	}
8641da177e4SLinus Torvalds }
8651da177e4SLinus Torvalds 
8661da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
8671da177e4SLinus Torvalds {
8681da177e4SLinus Torvalds 	struct page *p;
8691da177e4SLinus Torvalds 	int err;
8701da177e4SLinus Torvalds 
8711da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
8721da177e4SLinus Torvalds 	if (err >= 0) {
8731da177e4SLinus Torvalds 		err = page_to_nid(p);
8741da177e4SLinus Torvalds 		put_page(p);
8751da177e4SLinus Torvalds 	}
8761da177e4SLinus Torvalds 	return err;
8771da177e4SLinus Torvalds }
8781da177e4SLinus Torvalds 
8791da177e4SLinus Torvalds /* Retrieve NUMA policy */
880dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8811da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8821da177e4SLinus Torvalds {
8838bccd85fSChristoph Lameter 	int err;
8841da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8851da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8861da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8871da177e4SLinus Torvalds 
888754af6f5SLee Schermerhorn 	if (flags &
889754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8901da177e4SLinus Torvalds 		return -EINVAL;
891754af6f5SLee Schermerhorn 
892754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
893754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
894754af6f5SLee Schermerhorn 			return -EINVAL;
895754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
89658568d2aSMiao Xie 		task_lock(current);
897754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
89858568d2aSMiao Xie 		task_unlock(current);
899754af6f5SLee Schermerhorn 		return 0;
900754af6f5SLee Schermerhorn 	}
901754af6f5SLee Schermerhorn 
9021da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
903bea904d5SLee Schermerhorn 		/*
904bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
905bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
906bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
907bea904d5SLee Schermerhorn 		 */
9081da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
9091da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
9101da177e4SLinus Torvalds 		if (!vma) {
9111da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
9121da177e4SLinus Torvalds 			return -EFAULT;
9131da177e4SLinus Torvalds 		}
9141da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9151da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9161da177e4SLinus Torvalds 		else
9171da177e4SLinus Torvalds 			pol = vma->vm_policy;
9181da177e4SLinus Torvalds 	} else if (addr)
9191da177e4SLinus Torvalds 		return -EINVAL;
9201da177e4SLinus Torvalds 
9211da177e4SLinus Torvalds 	if (!pol)
922bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9231da177e4SLinus Torvalds 
9241da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9251da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9261da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
9271da177e4SLinus Torvalds 			if (err < 0)
9281da177e4SLinus Torvalds 				goto out;
9298bccd85fSChristoph Lameter 			*policy = err;
9301da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
93145c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
9328bccd85fSChristoph Lameter 			*policy = current->il_next;
9331da177e4SLinus Torvalds 		} else {
9341da177e4SLinus Torvalds 			err = -EINVAL;
9351da177e4SLinus Torvalds 			goto out;
9361da177e4SLinus Torvalds 		}
937bea904d5SLee Schermerhorn 	} else {
938bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
939bea904d5SLee Schermerhorn 						pol->mode;
940d79df630SDavid Rientjes 		/*
941d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
942d79df630SDavid Rientjes 		 * the policy to userspace.
943d79df630SDavid Rientjes 		 */
944d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
945bea904d5SLee Schermerhorn 	}
9461da177e4SLinus Torvalds 
9471da177e4SLinus Torvalds 	if (vma) {
9481da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9491da177e4SLinus Torvalds 		vma = NULL;
9501da177e4SLinus Torvalds 	}
9511da177e4SLinus Torvalds 
9521da177e4SLinus Torvalds 	err = 0;
95358568d2aSMiao Xie 	if (nmask) {
954c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
955c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
956c6b6ef8bSLee Schermerhorn 		} else {
95758568d2aSMiao Xie 			task_lock(current);
958bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
95958568d2aSMiao Xie 			task_unlock(current);
96058568d2aSMiao Xie 		}
961c6b6ef8bSLee Schermerhorn 	}
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds  out:
96452cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9651da177e4SLinus Torvalds 	if (vma)
9661da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9671da177e4SLinus Torvalds 	return err;
9681da177e4SLinus Torvalds }
9691da177e4SLinus Torvalds 
970b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9718bccd85fSChristoph Lameter /*
9726ce3c4c0SChristoph Lameter  * page migration
9736ce3c4c0SChristoph Lameter  */
974fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
975fc301289SChristoph Lameter 				unsigned long flags)
9766ce3c4c0SChristoph Lameter {
9776ce3c4c0SChristoph Lameter 	/*
978fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9796ce3c4c0SChristoph Lameter 	 */
98062695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
98162695a84SNick Piggin 		if (!isolate_lru_page(page)) {
98262695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
9836d9c285aSKOSAKI Motohiro 			inc_zone_page_state(page, NR_ISOLATED_ANON +
9846d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
98562695a84SNick Piggin 		}
98662695a84SNick Piggin 	}
9876ce3c4c0SChristoph Lameter }
9886ce3c4c0SChristoph Lameter 
989742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
99095a402c3SChristoph Lameter {
991e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
992e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
993e2d8cf40SNaoya Horiguchi 					node);
994e2d8cf40SNaoya Horiguchi 	else
9956484eb3eSMel Gorman 		return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
99695a402c3SChristoph Lameter }
99795a402c3SChristoph Lameter 
9986ce3c4c0SChristoph Lameter /*
9997e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10007e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10017e2ab150SChristoph Lameter  */
1002dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1003dbcb0f19SAdrian Bunk 			   int flags)
10047e2ab150SChristoph Lameter {
10057e2ab150SChristoph Lameter 	nodemask_t nmask;
10067e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10077e2ab150SChristoph Lameter 	int err = 0;
10087e2ab150SChristoph Lameter 
10097e2ab150SChristoph Lameter 	nodes_clear(nmask);
10107e2ab150SChristoph Lameter 	node_set(source, nmask);
10117e2ab150SChristoph Lameter 
101208270807SMinchan Kim 	/*
101308270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
101408270807SMinchan Kim 	 * need migration.  Between passing in the full user address
101508270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
101608270807SMinchan Kim 	 */
101708270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
101898094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10197e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10207e2ab150SChristoph Lameter 
1021cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
102268711a74SDavid Rientjes 		err = migrate_pages(&pagelist, new_node_page, NULL, dest,
10239c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1024cf608ac1SMinchan Kim 		if (err)
1025e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1026cf608ac1SMinchan Kim 	}
102795a402c3SChristoph Lameter 
10287e2ab150SChristoph Lameter 	return err;
10297e2ab150SChristoph Lameter }
10307e2ab150SChristoph Lameter 
10317e2ab150SChristoph Lameter /*
10327e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10337e2ab150SChristoph Lameter  * layout as much as possible.
103439743889SChristoph Lameter  *
103539743889SChristoph Lameter  * Returns the number of page that could not be moved.
103639743889SChristoph Lameter  */
10370ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10380ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
103939743889SChristoph Lameter {
10407e2ab150SChristoph Lameter 	int busy = 0;
10410aedadf9SChristoph Lameter 	int err;
10427e2ab150SChristoph Lameter 	nodemask_t tmp;
104339743889SChristoph Lameter 
10440aedadf9SChristoph Lameter 	err = migrate_prep();
10450aedadf9SChristoph Lameter 	if (err)
10460aedadf9SChristoph Lameter 		return err;
10470aedadf9SChristoph Lameter 
104839743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1049d4984711SChristoph Lameter 
10500ce72d4fSAndrew Morton 	err = migrate_vmas(mm, from, to, flags);
10517b2259b3SChristoph Lameter 	if (err)
10527b2259b3SChristoph Lameter 		goto out;
10537b2259b3SChristoph Lameter 
10547e2ab150SChristoph Lameter 	/*
10557e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10567e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10577e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10587e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10597e2ab150SChristoph Lameter 	 *
10607e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10617e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10627e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10637e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10647e2ab150SChristoph Lameter 	 *
10657e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10667e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10677e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10687e2ab150SChristoph Lameter 	 *
10697e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10707e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10717e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10727e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10737e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10747e2ab150SChristoph Lameter 	 *
10757e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10767e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10777e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10787e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1079ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10807e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10817e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10827e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10837e2ab150SChristoph Lameter 	 */
10847e2ab150SChristoph Lameter 
10850ce72d4fSAndrew Morton 	tmp = *from;
10867e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10877e2ab150SChristoph Lameter 		int s,d;
1088b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
10897e2ab150SChristoph Lameter 		int dest = 0;
10907e2ab150SChristoph Lameter 
10917e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10924a5b18ccSLarry Woodman 
10934a5b18ccSLarry Woodman 			/*
10944a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
10954a5b18ccSLarry Woodman 			 * node relationship of the pages established between
10964a5b18ccSLarry Woodman 			 * threads and memory areas.
10974a5b18ccSLarry Woodman                          *
10984a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
10994a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11004a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11014a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11024a5b18ccSLarry Woodman 			 * mask.
11034a5b18ccSLarry Woodman 			 *
11044a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11054a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11064a5b18ccSLarry Woodman 			 */
11074a5b18ccSLarry Woodman 
11080ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11090ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11104a5b18ccSLarry Woodman 				continue;
11114a5b18ccSLarry Woodman 
11120ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11137e2ab150SChristoph Lameter 			if (s == d)
11147e2ab150SChristoph Lameter 				continue;
11157e2ab150SChristoph Lameter 
11167e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11177e2ab150SChristoph Lameter 			dest = d;
11187e2ab150SChristoph Lameter 
11197e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11207e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11217e2ab150SChristoph Lameter 				break;
11227e2ab150SChristoph Lameter 		}
1123b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11247e2ab150SChristoph Lameter 			break;
11257e2ab150SChristoph Lameter 
11267e2ab150SChristoph Lameter 		node_clear(source, tmp);
11277e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11287e2ab150SChristoph Lameter 		if (err > 0)
11297e2ab150SChristoph Lameter 			busy += err;
11307e2ab150SChristoph Lameter 		if (err < 0)
11317e2ab150SChristoph Lameter 			break;
113239743889SChristoph Lameter 	}
11337b2259b3SChristoph Lameter out:
113439743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11357e2ab150SChristoph Lameter 	if (err < 0)
11367e2ab150SChristoph Lameter 		return err;
11377e2ab150SChristoph Lameter 	return busy;
1138b20a3503SChristoph Lameter 
113939743889SChristoph Lameter }
114039743889SChristoph Lameter 
11413ad33b24SLee Schermerhorn /*
11423ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1143d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
11443ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11453ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11463ad33b24SLee Schermerhorn  * is in virtual address order.
11473ad33b24SLee Schermerhorn  */
1148d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
114995a402c3SChristoph Lameter {
1150d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11513ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
115295a402c3SChristoph Lameter 
1153d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
11543ad33b24SLee Schermerhorn 	while (vma) {
11553ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11563ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11573ad33b24SLee Schermerhorn 			break;
11583ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11593ad33b24SLee Schermerhorn 	}
11603ad33b24SLee Schermerhorn 
116111c731e8SWanpeng Li 	if (PageHuge(page)) {
1162cc81717eSMichal Hocko 		BUG_ON(!vma);
116374060e4dSNaoya Horiguchi 		return alloc_huge_page_noerr(vma, address, 1);
116411c731e8SWanpeng Li 	}
116511c731e8SWanpeng Li 	/*
116611c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
116711c731e8SWanpeng Li 	 */
11683ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
116995a402c3SChristoph Lameter }
1170b20a3503SChristoph Lameter #else
1171b20a3503SChristoph Lameter 
1172b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1173b20a3503SChristoph Lameter 				unsigned long flags)
1174b20a3503SChristoph Lameter {
1175b20a3503SChristoph Lameter }
1176b20a3503SChristoph Lameter 
11770ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11780ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1179b20a3503SChristoph Lameter {
1180b20a3503SChristoph Lameter 	return -ENOSYS;
1181b20a3503SChristoph Lameter }
118295a402c3SChristoph Lameter 
1183d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
118495a402c3SChristoph Lameter {
118595a402c3SChristoph Lameter 	return NULL;
118695a402c3SChristoph Lameter }
1187b20a3503SChristoph Lameter #endif
1188b20a3503SChristoph Lameter 
1189dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1190028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1191028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
11926ce3c4c0SChristoph Lameter {
11936ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
11946ce3c4c0SChristoph Lameter 	struct mempolicy *new;
11956ce3c4c0SChristoph Lameter 	unsigned long end;
11966ce3c4c0SChristoph Lameter 	int err;
11976ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
11986ce3c4c0SChristoph Lameter 
1199b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12006ce3c4c0SChristoph Lameter 		return -EINVAL;
120174c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12026ce3c4c0SChristoph Lameter 		return -EPERM;
12036ce3c4c0SChristoph Lameter 
12046ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12056ce3c4c0SChristoph Lameter 		return -EINVAL;
12066ce3c4c0SChristoph Lameter 
12076ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12086ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12096ce3c4c0SChristoph Lameter 
12106ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12116ce3c4c0SChristoph Lameter 	end = start + len;
12126ce3c4c0SChristoph Lameter 
12136ce3c4c0SChristoph Lameter 	if (end < start)
12146ce3c4c0SChristoph Lameter 		return -EINVAL;
12156ce3c4c0SChristoph Lameter 	if (end == start)
12166ce3c4c0SChristoph Lameter 		return 0;
12176ce3c4c0SChristoph Lameter 
1218028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12196ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12206ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12216ce3c4c0SChristoph Lameter 
1222b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1223b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1224b24f53a0SLee Schermerhorn 
12256ce3c4c0SChristoph Lameter 	/*
12266ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12276ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12286ce3c4c0SChristoph Lameter 	 */
12296ce3c4c0SChristoph Lameter 	if (!new)
12306ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12316ce3c4c0SChristoph Lameter 
1232028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1233028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
123400ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12356ce3c4c0SChristoph Lameter 
12360aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12370aedadf9SChristoph Lameter 
12380aedadf9SChristoph Lameter 		err = migrate_prep();
12390aedadf9SChristoph Lameter 		if (err)
1240b05ca738SKOSAKI Motohiro 			goto mpol_out;
12410aedadf9SChristoph Lameter 	}
12424bfc4495SKAMEZAWA Hiroyuki 	{
12434bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12444bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12456ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
124658568d2aSMiao Xie 			task_lock(current);
12474bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
124858568d2aSMiao Xie 			task_unlock(current);
12494bfc4495SKAMEZAWA Hiroyuki 			if (err)
125058568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12514bfc4495SKAMEZAWA Hiroyuki 		} else
12524bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12534bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12544bfc4495SKAMEZAWA Hiroyuki 	}
1255b05ca738SKOSAKI Motohiro 	if (err)
1256b05ca738SKOSAKI Motohiro 		goto mpol_out;
1257b05ca738SKOSAKI Motohiro 
1258d05f0cdcSHugh Dickins 	err = queue_pages_range(mm, start, end, nmask,
12596ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1260d05f0cdcSHugh Dickins 	if (!err)
12619d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12627e2ab150SChristoph Lameter 
1263b24f53a0SLee Schermerhorn 	if (!err) {
1264b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1265b24f53a0SLee Schermerhorn 
1266cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1267b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1268d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1269d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1270cf608ac1SMinchan Kim 			if (nr_failed)
127174060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1272cf608ac1SMinchan Kim 		}
12736ce3c4c0SChristoph Lameter 
1274b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
12756ce3c4c0SChristoph Lameter 			err = -EIO;
1276ab8a3e14SKOSAKI Motohiro 	} else
1277b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1278b20a3503SChristoph Lameter 
12796ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1280b05ca738SKOSAKI Motohiro  mpol_out:
1281f0be3d32SLee Schermerhorn 	mpol_put(new);
12826ce3c4c0SChristoph Lameter 	return err;
12836ce3c4c0SChristoph Lameter }
12846ce3c4c0SChristoph Lameter 
128539743889SChristoph Lameter /*
12868bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
12878bccd85fSChristoph Lameter  */
12888bccd85fSChristoph Lameter 
12898bccd85fSChristoph Lameter /* Copy a node mask from user space. */
129039743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
12918bccd85fSChristoph Lameter 		     unsigned long maxnode)
12928bccd85fSChristoph Lameter {
12938bccd85fSChristoph Lameter 	unsigned long k;
12948bccd85fSChristoph Lameter 	unsigned long nlongs;
12958bccd85fSChristoph Lameter 	unsigned long endmask;
12968bccd85fSChristoph Lameter 
12978bccd85fSChristoph Lameter 	--maxnode;
12988bccd85fSChristoph Lameter 	nodes_clear(*nodes);
12998bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13008bccd85fSChristoph Lameter 		return 0;
1301a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1302636f13c1SChris Wright 		return -EINVAL;
13038bccd85fSChristoph Lameter 
13048bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
13058bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
13068bccd85fSChristoph Lameter 		endmask = ~0UL;
13078bccd85fSChristoph Lameter 	else
13088bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
13098bccd85fSChristoph Lameter 
13108bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
13118bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
13128bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
13138bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
13148bccd85fSChristoph Lameter 			return -EINVAL;
13158bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
13168bccd85fSChristoph Lameter 			unsigned long t;
13178bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
13188bccd85fSChristoph Lameter 				return -EFAULT;
13198bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
13208bccd85fSChristoph Lameter 				if (t & endmask)
13218bccd85fSChristoph Lameter 					return -EINVAL;
13228bccd85fSChristoph Lameter 			} else if (t)
13238bccd85fSChristoph Lameter 				return -EINVAL;
13248bccd85fSChristoph Lameter 		}
13258bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13268bccd85fSChristoph Lameter 		endmask = ~0UL;
13278bccd85fSChristoph Lameter 	}
13288bccd85fSChristoph Lameter 
13298bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13308bccd85fSChristoph Lameter 		return -EFAULT;
13318bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13328bccd85fSChristoph Lameter 	return 0;
13338bccd85fSChristoph Lameter }
13348bccd85fSChristoph Lameter 
13358bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13368bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13378bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13388bccd85fSChristoph Lameter {
13398bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
13408bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
13418bccd85fSChristoph Lameter 
13428bccd85fSChristoph Lameter 	if (copy > nbytes) {
13438bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13448bccd85fSChristoph Lameter 			return -EINVAL;
13458bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13468bccd85fSChristoph Lameter 			return -EFAULT;
13478bccd85fSChristoph Lameter 		copy = nbytes;
13488bccd85fSChristoph Lameter 	}
13498bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13508bccd85fSChristoph Lameter }
13518bccd85fSChristoph Lameter 
1352938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1353f7f28ca9SRasmus Villemoes 		unsigned long, mode, const unsigned long __user *, nmask,
1354938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
13558bccd85fSChristoph Lameter {
13568bccd85fSChristoph Lameter 	nodemask_t nodes;
13578bccd85fSChristoph Lameter 	int err;
1358028fec41SDavid Rientjes 	unsigned short mode_flags;
13598bccd85fSChristoph Lameter 
1360028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1361028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1362a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1363a3b51e01SDavid Rientjes 		return -EINVAL;
13644c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13654c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13664c50bc01SDavid Rientjes 		return -EINVAL;
13678bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13688bccd85fSChristoph Lameter 	if (err)
13698bccd85fSChristoph Lameter 		return err;
1370028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
13718bccd85fSChristoph Lameter }
13728bccd85fSChristoph Lameter 
13738bccd85fSChristoph Lameter /* Set the process memory policy */
137423c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1375938bb9f5SHeiko Carstens 		unsigned long, maxnode)
13768bccd85fSChristoph Lameter {
13778bccd85fSChristoph Lameter 	int err;
13788bccd85fSChristoph Lameter 	nodemask_t nodes;
1379028fec41SDavid Rientjes 	unsigned short flags;
13808bccd85fSChristoph Lameter 
1381028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1382028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1383028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
13848bccd85fSChristoph Lameter 		return -EINVAL;
13854c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
13864c50bc01SDavid Rientjes 		return -EINVAL;
13878bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13888bccd85fSChristoph Lameter 	if (err)
13898bccd85fSChristoph Lameter 		return err;
1390028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
13918bccd85fSChristoph Lameter }
13928bccd85fSChristoph Lameter 
1393938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1394938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1395938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
139639743889SChristoph Lameter {
1397c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
1398596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
139939743889SChristoph Lameter 	struct task_struct *task;
140039743889SChristoph Lameter 	nodemask_t task_nodes;
140139743889SChristoph Lameter 	int err;
1402596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1403596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1404596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
140539743889SChristoph Lameter 
1406596d7cfaSKOSAKI Motohiro 	if (!scratch)
1407596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
140839743889SChristoph Lameter 
1409596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1410596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1411596d7cfaSKOSAKI Motohiro 
1412596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
141339743889SChristoph Lameter 	if (err)
1414596d7cfaSKOSAKI Motohiro 		goto out;
1415596d7cfaSKOSAKI Motohiro 
1416596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1417596d7cfaSKOSAKI Motohiro 	if (err)
1418596d7cfaSKOSAKI Motohiro 		goto out;
141939743889SChristoph Lameter 
142039743889SChristoph Lameter 	/* Find the mm_struct */
142155cfaa3cSZeng Zhaoming 	rcu_read_lock();
1422228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
142339743889SChristoph Lameter 	if (!task) {
142455cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1425596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1426596d7cfaSKOSAKI Motohiro 		goto out;
142739743889SChristoph Lameter 	}
14283268c63eSChristoph Lameter 	get_task_struct(task);
142939743889SChristoph Lameter 
1430596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
143139743889SChristoph Lameter 
143239743889SChristoph Lameter 	/*
143339743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
143439743889SChristoph Lameter 	 * process. The right exists if the process has administrative
14357f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
143639743889SChristoph Lameter 	 * userid as the target process.
143739743889SChristoph Lameter 	 */
1438c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1439b38a86ebSEric W. Biederman 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1440b38a86ebSEric W. Biederman 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
144174c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1442c69e8d9cSDavid Howells 		rcu_read_unlock();
144339743889SChristoph Lameter 		err = -EPERM;
14443268c63eSChristoph Lameter 		goto out_put;
144539743889SChristoph Lameter 	}
1446c69e8d9cSDavid Howells 	rcu_read_unlock();
144739743889SChristoph Lameter 
144839743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
144939743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1450596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
145139743889SChristoph Lameter 		err = -EPERM;
14523268c63eSChristoph Lameter 		goto out_put;
145339743889SChristoph Lameter 	}
145439743889SChristoph Lameter 
145501f13bd6SLai Jiangshan 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
14563b42d28bSChristoph Lameter 		err = -EINVAL;
14573268c63eSChristoph Lameter 		goto out_put;
14583b42d28bSChristoph Lameter 	}
14593b42d28bSChristoph Lameter 
146086c3a764SDavid Quigley 	err = security_task_movememory(task);
146186c3a764SDavid Quigley 	if (err)
14623268c63eSChristoph Lameter 		goto out_put;
146386c3a764SDavid Quigley 
14643268c63eSChristoph Lameter 	mm = get_task_mm(task);
14653268c63eSChristoph Lameter 	put_task_struct(task);
1466f2a9ef88SSasha Levin 
1467f2a9ef88SSasha Levin 	if (!mm) {
1468f2a9ef88SSasha Levin 		err = -EINVAL;
1469f2a9ef88SSasha Levin 		goto out;
1470f2a9ef88SSasha Levin 	}
1471f2a9ef88SSasha Levin 
1472596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
147374c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
14743268c63eSChristoph Lameter 
147539743889SChristoph Lameter 	mmput(mm);
14763268c63eSChristoph Lameter out:
1477596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1478596d7cfaSKOSAKI Motohiro 
147939743889SChristoph Lameter 	return err;
14803268c63eSChristoph Lameter 
14813268c63eSChristoph Lameter out_put:
14823268c63eSChristoph Lameter 	put_task_struct(task);
14833268c63eSChristoph Lameter 	goto out;
14843268c63eSChristoph Lameter 
148539743889SChristoph Lameter }
148639743889SChristoph Lameter 
148739743889SChristoph Lameter 
14888bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1489938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1490938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1491938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
14928bccd85fSChristoph Lameter {
1493dbcb0f19SAdrian Bunk 	int err;
1494dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
14958bccd85fSChristoph Lameter 	nodemask_t nodes;
14968bccd85fSChristoph Lameter 
14978bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
14988bccd85fSChristoph Lameter 		return -EINVAL;
14998bccd85fSChristoph Lameter 
15008bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
15018bccd85fSChristoph Lameter 
15028bccd85fSChristoph Lameter 	if (err)
15038bccd85fSChristoph Lameter 		return err;
15048bccd85fSChristoph Lameter 
15058bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
15068bccd85fSChristoph Lameter 		return -EFAULT;
15078bccd85fSChristoph Lameter 
15088bccd85fSChristoph Lameter 	if (nmask)
15098bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
15108bccd85fSChristoph Lameter 
15118bccd85fSChristoph Lameter 	return err;
15128bccd85fSChristoph Lameter }
15138bccd85fSChristoph Lameter 
15141da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
15151da177e4SLinus Torvalds 
1516c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1517c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1518c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1519c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
15201da177e4SLinus Torvalds {
15211da177e4SLinus Torvalds 	long err;
15221da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15231da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15241da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15251da177e4SLinus Torvalds 
15261da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15271da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15281da177e4SLinus Torvalds 
15291da177e4SLinus Torvalds 	if (nmask)
15301da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15311da177e4SLinus Torvalds 
15321da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15331da177e4SLinus Torvalds 
15341da177e4SLinus Torvalds 	if (!err && nmask) {
15352bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15362bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15372bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15381da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15391da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15401da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15411da177e4SLinus Torvalds 	}
15421da177e4SLinus Torvalds 
15431da177e4SLinus Torvalds 	return err;
15441da177e4SLinus Torvalds }
15451da177e4SLinus Torvalds 
1546c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1547c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
15481da177e4SLinus Torvalds {
15491da177e4SLinus Torvalds 	long err = 0;
15501da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15511da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15521da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15551da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15561da177e4SLinus Torvalds 
15571da177e4SLinus Torvalds 	if (nmask) {
15581da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
15591da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15601da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
15611da177e4SLinus Torvalds 	}
15621da177e4SLinus Torvalds 
15631da177e4SLinus Torvalds 	if (err)
15641da177e4SLinus Torvalds 		return -EFAULT;
15651da177e4SLinus Torvalds 
15661da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
15671da177e4SLinus Torvalds }
15681da177e4SLinus Torvalds 
1569c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1570c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1571c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
15721da177e4SLinus Torvalds {
15731da177e4SLinus Torvalds 	long err = 0;
15741da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15751da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1576dfcd3c0dSAndi Kleen 	nodemask_t bm;
15771da177e4SLinus Torvalds 
15781da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15791da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15801da177e4SLinus Torvalds 
15811da177e4SLinus Torvalds 	if (nmask) {
1582dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
15831da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1584dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
15851da177e4SLinus Torvalds 	}
15861da177e4SLinus Torvalds 
15871da177e4SLinus Torvalds 	if (err)
15881da177e4SLinus Torvalds 		return -EFAULT;
15891da177e4SLinus Torvalds 
15901da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
15911da177e4SLinus Torvalds }
15921da177e4SLinus Torvalds 
15931da177e4SLinus Torvalds #endif
15941da177e4SLinus Torvalds 
159574d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
159674d2c3a0SOleg Nesterov 						unsigned long addr)
15971da177e4SLinus Torvalds {
15988d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
15991da177e4SLinus Torvalds 
16001da177e4SLinus Torvalds 	if (vma) {
1601480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
16028d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
160300442ad0SMel Gorman 		} else if (vma->vm_policy) {
16041da177e4SLinus Torvalds 			pol = vma->vm_policy;
160500442ad0SMel Gorman 
160600442ad0SMel Gorman 			/*
160700442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
160800442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
160900442ad0SMel Gorman 			 * count on these policies which will be dropped by
161000442ad0SMel Gorman 			 * mpol_cond_put() later
161100442ad0SMel Gorman 			 */
161200442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
161300442ad0SMel Gorman 				mpol_get(pol);
161400442ad0SMel Gorman 		}
16151da177e4SLinus Torvalds 	}
1616f15ca78eSOleg Nesterov 
161774d2c3a0SOleg Nesterov 	return pol;
161874d2c3a0SOleg Nesterov }
161974d2c3a0SOleg Nesterov 
162074d2c3a0SOleg Nesterov /*
1621dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
162274d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
162374d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
162474d2c3a0SOleg Nesterov  *
162574d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1626dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
162774d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
162874d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
162974d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
163074d2c3a0SOleg Nesterov  * extra reference for shared policies.
163174d2c3a0SOleg Nesterov  */
1632dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1633dd6eecb9SOleg Nesterov 						unsigned long addr)
163474d2c3a0SOleg Nesterov {
163574d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
163674d2c3a0SOleg Nesterov 
16378d90274bSOleg Nesterov 	if (!pol)
1638dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
16398d90274bSOleg Nesterov 
16401da177e4SLinus Torvalds 	return pol;
16411da177e4SLinus Torvalds }
16421da177e4SLinus Torvalds 
16436b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1644fc314724SMel Gorman {
16456b6482bbSOleg Nesterov 	struct mempolicy *pol;
1646f15ca78eSOleg Nesterov 
1647fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1648fc314724SMel Gorman 		bool ret = false;
1649fc314724SMel Gorman 
1650fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1651fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1652fc314724SMel Gorman 			ret = true;
1653fc314724SMel Gorman 		mpol_cond_put(pol);
1654fc314724SMel Gorman 
1655fc314724SMel Gorman 		return ret;
16568d90274bSOleg Nesterov 	}
16578d90274bSOleg Nesterov 
1658fc314724SMel Gorman 	pol = vma->vm_policy;
16598d90274bSOleg Nesterov 	if (!pol)
16606b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1661fc314724SMel Gorman 
1662fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1663fc314724SMel Gorman }
1664fc314724SMel Gorman 
1665d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1666d3eb1570SLai Jiangshan {
1667d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1668d3eb1570SLai Jiangshan 
1669d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1670d3eb1570SLai Jiangshan 
1671d3eb1570SLai Jiangshan 	/*
1672d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1673d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1674d3eb1570SLai Jiangshan 	 *
1675d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1676d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1677d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1678d3eb1570SLai Jiangshan 	 */
1679d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1680d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1681d3eb1570SLai Jiangshan 
1682d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1683d3eb1570SLai Jiangshan }
1684d3eb1570SLai Jiangshan 
168552cd3b07SLee Schermerhorn /*
168652cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
168752cd3b07SLee Schermerhorn  * page allocation
168852cd3b07SLee Schermerhorn  */
168952cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
169019770b32SMel Gorman {
169119770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
169245c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1693d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
169419770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
169519770b32SMel Gorman 		return &policy->v.nodes;
169619770b32SMel Gorman 
169719770b32SMel Gorman 	return NULL;
169819770b32SMel Gorman }
169919770b32SMel Gorman 
170052cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
17012f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
17022f5f9486SAndi Kleen 	int nd)
17031da177e4SLinus Torvalds {
170445c4745aSLee Schermerhorn 	switch (policy->mode) {
17051da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1706fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
17071da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
17081da177e4SLinus Torvalds 		break;
17091da177e4SLinus Torvalds 	case MPOL_BIND:
171019770b32SMel Gorman 		/*
171152cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
171252cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
17136eb27e1fSBob Liu 		 * current node isn't part of the mask, we use the zonelist for
171452cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
171519770b32SMel Gorman 		 */
171619770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
171719770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
171819770b32SMel Gorman 			nd = first_node(policy->v.nodes);
171919770b32SMel Gorman 		break;
17201da177e4SLinus Torvalds 	default:
17211da177e4SLinus Torvalds 		BUG();
17221da177e4SLinus Torvalds 	}
17230e88460dSMel Gorman 	return node_zonelist(nd, gfp);
17241da177e4SLinus Torvalds }
17251da177e4SLinus Torvalds 
17261da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
17271da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
17281da177e4SLinus Torvalds {
17291da177e4SLinus Torvalds 	unsigned nid, next;
17301da177e4SLinus Torvalds 	struct task_struct *me = current;
17311da177e4SLinus Torvalds 
17321da177e4SLinus Torvalds 	nid = me->il_next;
1733dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
17341da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1735dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1736f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
17371da177e4SLinus Torvalds 		me->il_next = next;
17381da177e4SLinus Torvalds 	return nid;
17391da177e4SLinus Torvalds }
17401da177e4SLinus Torvalds 
1741dc85da15SChristoph Lameter /*
1742dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1743dc85da15SChristoph Lameter  * next slab entry.
1744dc85da15SChristoph Lameter  */
17452a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1746dc85da15SChristoph Lameter {
1747e7b691b0SAndi Kleen 	struct mempolicy *policy;
17482a389610SDavid Rientjes 	int node = numa_mem_id();
1749e7b691b0SAndi Kleen 
1750e7b691b0SAndi Kleen 	if (in_interrupt())
17512a389610SDavid Rientjes 		return node;
1752e7b691b0SAndi Kleen 
1753e7b691b0SAndi Kleen 	policy = current->mempolicy;
1754fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
17552a389610SDavid Rientjes 		return node;
1756765c4507SChristoph Lameter 
1757bea904d5SLee Schermerhorn 	switch (policy->mode) {
1758bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1759fc36b8d3SLee Schermerhorn 		/*
1760fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1761fc36b8d3SLee Schermerhorn 		 */
1762bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1763bea904d5SLee Schermerhorn 
1764dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1765dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1766dc85da15SChristoph Lameter 
1767dd1a239fSMel Gorman 	case MPOL_BIND: {
1768dc85da15SChristoph Lameter 		/*
1769dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1770dc85da15SChristoph Lameter 		 * first node.
1771dc85da15SChristoph Lameter 		 */
177219770b32SMel Gorman 		struct zonelist *zonelist;
177319770b32SMel Gorman 		struct zone *zone;
177419770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
17752a389610SDavid Rientjes 		zonelist = &NODE_DATA(node)->node_zonelists[0];
177619770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
177719770b32SMel Gorman 							&policy->v.nodes,
177819770b32SMel Gorman 							&zone);
17792a389610SDavid Rientjes 		return zone ? zone->node : node;
1780dd1a239fSMel Gorman 	}
1781dc85da15SChristoph Lameter 
1782dc85da15SChristoph Lameter 	default:
1783bea904d5SLee Schermerhorn 		BUG();
1784dc85da15SChristoph Lameter 	}
1785dc85da15SChristoph Lameter }
1786dc85da15SChristoph Lameter 
17871da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
17881da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
17891da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
17901da177e4SLinus Torvalds {
1791dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1792f5b087b5SDavid Rientjes 	unsigned target;
17931da177e4SLinus Torvalds 	int c;
1794b76ac7e7SJianguo Wu 	int nid = NUMA_NO_NODE;
17951da177e4SLinus Torvalds 
1796f5b087b5SDavid Rientjes 	if (!nnodes)
1797f5b087b5SDavid Rientjes 		return numa_node_id();
1798f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
17991da177e4SLinus Torvalds 	c = 0;
18001da177e4SLinus Torvalds 	do {
1801dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
18021da177e4SLinus Torvalds 		c++;
18031da177e4SLinus Torvalds 	} while (c <= target);
18041da177e4SLinus Torvalds 	return nid;
18051da177e4SLinus Torvalds }
18061da177e4SLinus Torvalds 
18075da7ca86SChristoph Lameter /* Determine a node number for interleave */
18085da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
18095da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
18105da7ca86SChristoph Lameter {
18115da7ca86SChristoph Lameter 	if (vma) {
18125da7ca86SChristoph Lameter 		unsigned long off;
18135da7ca86SChristoph Lameter 
18143b98b087SNishanth Aravamudan 		/*
18153b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
18163b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
18173b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
18183b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
18193b98b087SNishanth Aravamudan 		 * a useful offset.
18203b98b087SNishanth Aravamudan 		 */
18213b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
18223b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
18235da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
18245da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
18255da7ca86SChristoph Lameter 	} else
18265da7ca86SChristoph Lameter 		return interleave_nodes(pol);
18275da7ca86SChristoph Lameter }
18285da7ca86SChristoph Lameter 
1829778d3b0fSMichal Hocko /*
1830778d3b0fSMichal Hocko  * Return the bit number of a random bit set in the nodemask.
1831b76ac7e7SJianguo Wu  * (returns NUMA_NO_NODE if nodemask is empty)
1832778d3b0fSMichal Hocko  */
1833778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp)
1834778d3b0fSMichal Hocko {
1835b76ac7e7SJianguo Wu 	int w, bit = NUMA_NO_NODE;
1836778d3b0fSMichal Hocko 
1837778d3b0fSMichal Hocko 	w = nodes_weight(*maskp);
1838778d3b0fSMichal Hocko 	if (w)
1839778d3b0fSMichal Hocko 		bit = bitmap_ord_to_pos(maskp->bits,
1840778d3b0fSMichal Hocko 			get_random_int() % w, MAX_NUMNODES);
1841778d3b0fSMichal Hocko 	return bit;
1842778d3b0fSMichal Hocko }
1843778d3b0fSMichal Hocko 
184400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1845480eccf9SLee Schermerhorn /*
1846480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1847b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1848b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1849b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1850b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1851b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1852480eccf9SLee Schermerhorn  *
185352cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
185452cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
185552cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
185652cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1857c0ff7453SMiao Xie  *
1858d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1859480eccf9SLee Schermerhorn  */
1860396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
186119770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
186219770b32SMel Gorman 				nodemask_t **nodemask)
18635da7ca86SChristoph Lameter {
1864480eccf9SLee Schermerhorn 	struct zonelist *zl;
18655da7ca86SChristoph Lameter 
1866dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
186719770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
18685da7ca86SChristoph Lameter 
186952cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
187052cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1871a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
187252cd3b07SLee Schermerhorn 	} else {
18732f5f9486SAndi Kleen 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
187452cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
187552cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1876480eccf9SLee Schermerhorn 	}
1877480eccf9SLee Schermerhorn 	return zl;
18785da7ca86SChristoph Lameter }
187906808b08SLee Schermerhorn 
188006808b08SLee Schermerhorn /*
188106808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
188206808b08SLee Schermerhorn  *
188306808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
188406808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
188506808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
188606808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
188706808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
188806808b08SLee Schermerhorn  * of non-default mempolicy.
188906808b08SLee Schermerhorn  *
189006808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
189106808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
189206808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
189306808b08SLee Schermerhorn  *
189406808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
189506808b08SLee Schermerhorn  */
189606808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
189706808b08SLee Schermerhorn {
189806808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
189906808b08SLee Schermerhorn 	int nid;
190006808b08SLee Schermerhorn 
190106808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
190206808b08SLee Schermerhorn 		return false;
190306808b08SLee Schermerhorn 
1904c0ff7453SMiao Xie 	task_lock(current);
190506808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
190606808b08SLee Schermerhorn 	switch (mempolicy->mode) {
190706808b08SLee Schermerhorn 	case MPOL_PREFERRED:
190806808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
190906808b08SLee Schermerhorn 			nid = numa_node_id();
191006808b08SLee Schermerhorn 		else
191106808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
191206808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
191306808b08SLee Schermerhorn 		break;
191406808b08SLee Schermerhorn 
191506808b08SLee Schermerhorn 	case MPOL_BIND:
191606808b08SLee Schermerhorn 		/* Fall through */
191706808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
191806808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
191906808b08SLee Schermerhorn 		break;
192006808b08SLee Schermerhorn 
192106808b08SLee Schermerhorn 	default:
192206808b08SLee Schermerhorn 		BUG();
192306808b08SLee Schermerhorn 	}
1924c0ff7453SMiao Xie 	task_unlock(current);
192506808b08SLee Schermerhorn 
192606808b08SLee Schermerhorn 	return true;
192706808b08SLee Schermerhorn }
192800ac59adSChen, Kenneth W #endif
19295da7ca86SChristoph Lameter 
19306f48d0ebSDavid Rientjes /*
19316f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
19326f48d0ebSDavid Rientjes  *
19336f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
19346f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
19356f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
19366f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
19376f48d0ebSDavid Rientjes  *
19386f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
19396f48d0ebSDavid Rientjes  */
19406f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
19416f48d0ebSDavid Rientjes 					const nodemask_t *mask)
19426f48d0ebSDavid Rientjes {
19436f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
19446f48d0ebSDavid Rientjes 	bool ret = true;
19456f48d0ebSDavid Rientjes 
19466f48d0ebSDavid Rientjes 	if (!mask)
19476f48d0ebSDavid Rientjes 		return ret;
19486f48d0ebSDavid Rientjes 	task_lock(tsk);
19496f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
19506f48d0ebSDavid Rientjes 	if (!mempolicy)
19516f48d0ebSDavid Rientjes 		goto out;
19526f48d0ebSDavid Rientjes 
19536f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19546f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19556f48d0ebSDavid Rientjes 		/*
19566f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
19576f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
19586f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
19596f48d0ebSDavid Rientjes 		 * nodes in mask.
19606f48d0ebSDavid Rientjes 		 */
19616f48d0ebSDavid Rientjes 		break;
19626f48d0ebSDavid Rientjes 	case MPOL_BIND:
19636f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19646f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19656f48d0ebSDavid Rientjes 		break;
19666f48d0ebSDavid Rientjes 	default:
19676f48d0ebSDavid Rientjes 		BUG();
19686f48d0ebSDavid Rientjes 	}
19696f48d0ebSDavid Rientjes out:
19706f48d0ebSDavid Rientjes 	task_unlock(tsk);
19716f48d0ebSDavid Rientjes 	return ret;
19726f48d0ebSDavid Rientjes }
19736f48d0ebSDavid Rientjes 
19741da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
19751da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1976662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1977662f3a0bSAndi Kleen 					unsigned nid)
19781da177e4SLinus Torvalds {
19791da177e4SLinus Torvalds 	struct zonelist *zl;
19801da177e4SLinus Torvalds 	struct page *page;
19811da177e4SLinus Torvalds 
19820e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
19831da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1984dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1985ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
19861da177e4SLinus Torvalds 	return page;
19871da177e4SLinus Torvalds }
19881da177e4SLinus Torvalds 
19891da177e4SLinus Torvalds /**
19900bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
19911da177e4SLinus Torvalds  *
19921da177e4SLinus Torvalds  * 	@gfp:
19931da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
19941da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
19951da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
19961da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
19971da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
19981da177e4SLinus Torvalds  *
19990bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
20001da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
20011da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
20021da177e4SLinus Torvalds  *
20031da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
20041da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
20051da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
20061da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
20071da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
20081da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
20091da177e4SLinus Torvalds  *
20101da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
20111da177e4SLinus Torvalds  */
20121da177e4SLinus Torvalds struct page *
20130bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
20142f5f9486SAndi Kleen 		unsigned long addr, int node)
20151da177e4SLinus Torvalds {
2016cc9a6c87SMel Gorman 	struct mempolicy *pol;
2017c0ff7453SMiao Xie 	struct page *page;
2018cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20191da177e4SLinus Torvalds 
2020cc9a6c87SMel Gorman retry_cpuset:
2021dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2022d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
2023cc9a6c87SMel Gorman 
202445c4745aSLee Schermerhorn 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
20251da177e4SLinus Torvalds 		unsigned nid;
20265da7ca86SChristoph Lameter 
20278eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
202852cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
20290bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2030d26914d1SMel Gorman 		if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2031cc9a6c87SMel Gorman 			goto retry_cpuset;
2032cc9a6c87SMel Gorman 
2033c0ff7453SMiao Xie 		return page;
20341da177e4SLinus Torvalds 	}
2035212a0a6fSDavid Rientjes 	page = __alloc_pages_nodemask(gfp, order,
2036212a0a6fSDavid Rientjes 				      policy_zonelist(gfp, pol, node),
20370bbbc0b3SAndrea Arcangeli 				      policy_nodemask(gfp, pol));
20382386740dSOleg Nesterov 	mpol_cond_put(pol);
2039d26914d1SMel Gorman 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2040cc9a6c87SMel Gorman 		goto retry_cpuset;
2041c0ff7453SMiao Xie 	return page;
20421da177e4SLinus Torvalds }
20431da177e4SLinus Torvalds 
20441da177e4SLinus Torvalds /**
20451da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20461da177e4SLinus Torvalds  *
20471da177e4SLinus Torvalds  *	@gfp:
20481da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20491da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20501da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20511da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20521da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20531da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20541da177e4SLinus Torvalds  *
20551da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
20561da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
20571da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
20581da177e4SLinus Torvalds  *
2059cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
20601da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
20611da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
20621da177e4SLinus Torvalds  */
2063dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
20641da177e4SLinus Torvalds {
20658d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2066c0ff7453SMiao Xie 	struct page *page;
2067cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20681da177e4SLinus Torvalds 
20698d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
20708d90274bSOleg Nesterov 		pol = get_task_policy(current);
207152cd3b07SLee Schermerhorn 
2072cc9a6c87SMel Gorman retry_cpuset:
2073d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
2074cc9a6c87SMel Gorman 
207552cd3b07SLee Schermerhorn 	/*
207652cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
207752cd3b07SLee Schermerhorn 	 * nor system default_policy
207852cd3b07SLee Schermerhorn 	 */
207945c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2080c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2081c0ff7453SMiao Xie 	else
2082c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
20835c4b4be3SAndi Kleen 				policy_zonelist(gfp, pol, numa_node_id()),
20845c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2085cc9a6c87SMel Gorman 
2086d26914d1SMel Gorman 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2087cc9a6c87SMel Gorman 		goto retry_cpuset;
2088cc9a6c87SMel Gorman 
2089c0ff7453SMiao Xie 	return page;
20901da177e4SLinus Torvalds }
20911da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
20921da177e4SLinus Torvalds 
2093ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2094ef0855d3SOleg Nesterov {
2095ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2096ef0855d3SOleg Nesterov 
2097ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2098ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2099ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2100ef0855d3SOleg Nesterov 	return 0;
2101ef0855d3SOleg Nesterov }
2102ef0855d3SOleg Nesterov 
21034225399aSPaul Jackson /*
2104846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
21054225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
21064225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
21074225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
21084225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2109708c1bbcSMiao Xie  *
2110708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2111708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
21124225399aSPaul Jackson  */
21134225399aSPaul Jackson 
2114846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2115846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
21161da177e4SLinus Torvalds {
21171da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
21181da177e4SLinus Torvalds 
21191da177e4SLinus Torvalds 	if (!new)
21201da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2121708c1bbcSMiao Xie 
2122708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2123708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2124708c1bbcSMiao Xie 		task_lock(current);
2125708c1bbcSMiao Xie 		*new = *old;
2126708c1bbcSMiao Xie 		task_unlock(current);
2127708c1bbcSMiao Xie 	} else
2128708c1bbcSMiao Xie 		*new = *old;
2129708c1bbcSMiao Xie 
21304225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21314225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2132708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
2133708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2134708c1bbcSMiao Xie 		else
2135708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
21364225399aSPaul Jackson 	}
21371da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21381da177e4SLinus Torvalds 	return new;
21391da177e4SLinus Torvalds }
21401da177e4SLinus Torvalds 
21411da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2142fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21431da177e4SLinus Torvalds {
21441da177e4SLinus Torvalds 	if (!a || !b)
2145fcfb4dccSKOSAKI Motohiro 		return false;
214645c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2147fcfb4dccSKOSAKI Motohiro 		return false;
214819800502SBob Liu 	if (a->flags != b->flags)
2149fcfb4dccSKOSAKI Motohiro 		return false;
215019800502SBob Liu 	if (mpol_store_user_nodemask(a))
215119800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2152fcfb4dccSKOSAKI Motohiro 			return false;
215319800502SBob Liu 
215445c4745aSLee Schermerhorn 	switch (a->mode) {
215519770b32SMel Gorman 	case MPOL_BIND:
215619770b32SMel Gorman 		/* Fall through */
21571da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2158fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
21591da177e4SLinus Torvalds 	case MPOL_PREFERRED:
216075719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
21611da177e4SLinus Torvalds 	default:
21621da177e4SLinus Torvalds 		BUG();
2163fcfb4dccSKOSAKI Motohiro 		return false;
21641da177e4SLinus Torvalds 	}
21651da177e4SLinus Torvalds }
21661da177e4SLinus Torvalds 
21671da177e4SLinus Torvalds /*
21681da177e4SLinus Torvalds  * Shared memory backing store policy support.
21691da177e4SLinus Torvalds  *
21701da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
21711da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
21721da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
21731da177e4SLinus Torvalds  * for any accesses to the tree.
21741da177e4SLinus Torvalds  */
21751da177e4SLinus Torvalds 
21761da177e4SLinus Torvalds /* lookup first element intersecting start-end */
217742288fe3SMel Gorman /* Caller holds sp->lock */
21781da177e4SLinus Torvalds static struct sp_node *
21791da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21801da177e4SLinus Torvalds {
21811da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
21821da177e4SLinus Torvalds 
21831da177e4SLinus Torvalds 	while (n) {
21841da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
21851da177e4SLinus Torvalds 
21861da177e4SLinus Torvalds 		if (start >= p->end)
21871da177e4SLinus Torvalds 			n = n->rb_right;
21881da177e4SLinus Torvalds 		else if (end <= p->start)
21891da177e4SLinus Torvalds 			n = n->rb_left;
21901da177e4SLinus Torvalds 		else
21911da177e4SLinus Torvalds 			break;
21921da177e4SLinus Torvalds 	}
21931da177e4SLinus Torvalds 	if (!n)
21941da177e4SLinus Torvalds 		return NULL;
21951da177e4SLinus Torvalds 	for (;;) {
21961da177e4SLinus Torvalds 		struct sp_node *w = NULL;
21971da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
21981da177e4SLinus Torvalds 		if (!prev)
21991da177e4SLinus Torvalds 			break;
22001da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
22011da177e4SLinus Torvalds 		if (w->end <= start)
22021da177e4SLinus Torvalds 			break;
22031da177e4SLinus Torvalds 		n = prev;
22041da177e4SLinus Torvalds 	}
22051da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
22061da177e4SLinus Torvalds }
22071da177e4SLinus Torvalds 
22081da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
22091da177e4SLinus Torvalds /* Caller holds sp->lock */
22101da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
22111da177e4SLinus Torvalds {
22121da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
22131da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
22141da177e4SLinus Torvalds 	struct sp_node *nd;
22151da177e4SLinus Torvalds 
22161da177e4SLinus Torvalds 	while (*p) {
22171da177e4SLinus Torvalds 		parent = *p;
22181da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
22191da177e4SLinus Torvalds 		if (new->start < nd->start)
22201da177e4SLinus Torvalds 			p = &(*p)->rb_left;
22211da177e4SLinus Torvalds 		else if (new->end > nd->end)
22221da177e4SLinus Torvalds 			p = &(*p)->rb_right;
22231da177e4SLinus Torvalds 		else
22241da177e4SLinus Torvalds 			BUG();
22251da177e4SLinus Torvalds 	}
22261da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
22271da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2228140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
222945c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22301da177e4SLinus Torvalds }
22311da177e4SLinus Torvalds 
22321da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22331da177e4SLinus Torvalds struct mempolicy *
22341da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22351da177e4SLinus Torvalds {
22361da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22371da177e4SLinus Torvalds 	struct sp_node *sn;
22381da177e4SLinus Torvalds 
22391da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22401da177e4SLinus Torvalds 		return NULL;
224142288fe3SMel Gorman 	spin_lock(&sp->lock);
22421da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22431da177e4SLinus Torvalds 	if (sn) {
22441da177e4SLinus Torvalds 		mpol_get(sn->policy);
22451da177e4SLinus Torvalds 		pol = sn->policy;
22461da177e4SLinus Torvalds 	}
224742288fe3SMel Gorman 	spin_unlock(&sp->lock);
22481da177e4SLinus Torvalds 	return pol;
22491da177e4SLinus Torvalds }
22501da177e4SLinus Torvalds 
225163f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
225263f74ca2SKOSAKI Motohiro {
225363f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
225463f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
225563f74ca2SKOSAKI Motohiro }
225663f74ca2SKOSAKI Motohiro 
2257771fb4d8SLee Schermerhorn /**
2258771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2259771fb4d8SLee Schermerhorn  *
2260b46e14acSFabian Frederick  * @page: page to be checked
2261b46e14acSFabian Frederick  * @vma: vm area where page mapped
2262b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2263771fb4d8SLee Schermerhorn  *
2264771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2265771fb4d8SLee Schermerhorn  * node id.
2266771fb4d8SLee Schermerhorn  *
2267771fb4d8SLee Schermerhorn  * Returns:
2268771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2269771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2270771fb4d8SLee Schermerhorn  *
2271771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2272771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2273771fb4d8SLee Schermerhorn  */
2274771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2275771fb4d8SLee Schermerhorn {
2276771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2277771fb4d8SLee Schermerhorn 	struct zone *zone;
2278771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2279771fb4d8SLee Schermerhorn 	unsigned long pgoff;
228090572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
228190572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
2282771fb4d8SLee Schermerhorn 	int polnid = -1;
2283771fb4d8SLee Schermerhorn 	int ret = -1;
2284771fb4d8SLee Schermerhorn 
2285771fb4d8SLee Schermerhorn 	BUG_ON(!vma);
2286771fb4d8SLee Schermerhorn 
2287dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2288771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2289771fb4d8SLee Schermerhorn 		goto out;
2290771fb4d8SLee Schermerhorn 
2291771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2292771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2293771fb4d8SLee Schermerhorn 		BUG_ON(addr >= vma->vm_end);
2294771fb4d8SLee Schermerhorn 		BUG_ON(addr < vma->vm_start);
2295771fb4d8SLee Schermerhorn 
2296771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2297771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2298771fb4d8SLee Schermerhorn 		polnid = offset_il_node(pol, vma, pgoff);
2299771fb4d8SLee Schermerhorn 		break;
2300771fb4d8SLee Schermerhorn 
2301771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2302771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2303771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2304771fb4d8SLee Schermerhorn 		else
2305771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2306771fb4d8SLee Schermerhorn 		break;
2307771fb4d8SLee Schermerhorn 
2308771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2309771fb4d8SLee Schermerhorn 		/*
2310771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2311771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2312771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2313771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2314771fb4d8SLee Schermerhorn 		 */
2315771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2316771fb4d8SLee Schermerhorn 			goto out;
2317771fb4d8SLee Schermerhorn 		(void)first_zones_zonelist(
2318771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2319771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2320771fb4d8SLee Schermerhorn 				&pol->v.nodes, &zone);
2321771fb4d8SLee Schermerhorn 		polnid = zone->node;
2322771fb4d8SLee Schermerhorn 		break;
2323771fb4d8SLee Schermerhorn 
2324771fb4d8SLee Schermerhorn 	default:
2325771fb4d8SLee Schermerhorn 		BUG();
2326771fb4d8SLee Schermerhorn 	}
23275606e387SMel Gorman 
23285606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2329e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
233090572890SPeter Zijlstra 		polnid = thisnid;
23315606e387SMel Gorman 
233210f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2333de1c9ce6SRik van Riel 			goto out;
2334de1c9ce6SRik van Riel 	}
2335e42c8ff2SMel Gorman 
2336771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2337771fb4d8SLee Schermerhorn 		ret = polnid;
2338771fb4d8SLee Schermerhorn out:
2339771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2340771fb4d8SLee Schermerhorn 
2341771fb4d8SLee Schermerhorn 	return ret;
2342771fb4d8SLee Schermerhorn }
2343771fb4d8SLee Schermerhorn 
23441da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
23451da177e4SLinus Torvalds {
2346140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
23471da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
234863f74ca2SKOSAKI Motohiro 	sp_free(n);
23491da177e4SLinus Torvalds }
23501da177e4SLinus Torvalds 
235142288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
235242288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
235342288fe3SMel Gorman {
235442288fe3SMel Gorman 	node->start = start;
235542288fe3SMel Gorman 	node->end = end;
235642288fe3SMel Gorman 	node->policy = pol;
235742288fe3SMel Gorman }
235842288fe3SMel Gorman 
2359dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2360dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
23611da177e4SLinus Torvalds {
2362869833f2SKOSAKI Motohiro 	struct sp_node *n;
2363869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
23641da177e4SLinus Torvalds 
2365869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
23661da177e4SLinus Torvalds 	if (!n)
23671da177e4SLinus Torvalds 		return NULL;
2368869833f2SKOSAKI Motohiro 
2369869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2370869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2371869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2372869833f2SKOSAKI Motohiro 		return NULL;
2373869833f2SKOSAKI Motohiro 	}
2374869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
237542288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2376869833f2SKOSAKI Motohiro 
23771da177e4SLinus Torvalds 	return n;
23781da177e4SLinus Torvalds }
23791da177e4SLinus Torvalds 
23801da177e4SLinus Torvalds /* Replace a policy range. */
23811da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
23821da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
23831da177e4SLinus Torvalds {
2384b22d127aSMel Gorman 	struct sp_node *n;
238542288fe3SMel Gorman 	struct sp_node *n_new = NULL;
238642288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2387b22d127aSMel Gorman 	int ret = 0;
23881da177e4SLinus Torvalds 
238942288fe3SMel Gorman restart:
239042288fe3SMel Gorman 	spin_lock(&sp->lock);
23911da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
23921da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
23931da177e4SLinus Torvalds 	while (n && n->start < end) {
23941da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
23951da177e4SLinus Torvalds 		if (n->start >= start) {
23961da177e4SLinus Torvalds 			if (n->end <= end)
23971da177e4SLinus Torvalds 				sp_delete(sp, n);
23981da177e4SLinus Torvalds 			else
23991da177e4SLinus Torvalds 				n->start = end;
24001da177e4SLinus Torvalds 		} else {
24011da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
24021da177e4SLinus Torvalds 			if (n->end > end) {
240342288fe3SMel Gorman 				if (!n_new)
240442288fe3SMel Gorman 					goto alloc_new;
240542288fe3SMel Gorman 
240642288fe3SMel Gorman 				*mpol_new = *n->policy;
240742288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
24087880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
24091da177e4SLinus Torvalds 				n->end = start;
24105ca39575SHillf Danton 				sp_insert(sp, n_new);
241142288fe3SMel Gorman 				n_new = NULL;
241242288fe3SMel Gorman 				mpol_new = NULL;
24131da177e4SLinus Torvalds 				break;
24141da177e4SLinus Torvalds 			} else
24151da177e4SLinus Torvalds 				n->end = start;
24161da177e4SLinus Torvalds 		}
24171da177e4SLinus Torvalds 		if (!next)
24181da177e4SLinus Torvalds 			break;
24191da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
24201da177e4SLinus Torvalds 	}
24211da177e4SLinus Torvalds 	if (new)
24221da177e4SLinus Torvalds 		sp_insert(sp, new);
242342288fe3SMel Gorman 	spin_unlock(&sp->lock);
242442288fe3SMel Gorman 	ret = 0;
242542288fe3SMel Gorman 
242642288fe3SMel Gorman err_out:
242742288fe3SMel Gorman 	if (mpol_new)
242842288fe3SMel Gorman 		mpol_put(mpol_new);
242942288fe3SMel Gorman 	if (n_new)
243042288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
243142288fe3SMel Gorman 
2432b22d127aSMel Gorman 	return ret;
243342288fe3SMel Gorman 
243442288fe3SMel Gorman alloc_new:
243542288fe3SMel Gorman 	spin_unlock(&sp->lock);
243642288fe3SMel Gorman 	ret = -ENOMEM;
243742288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
243842288fe3SMel Gorman 	if (!n_new)
243942288fe3SMel Gorman 		goto err_out;
244042288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
244142288fe3SMel Gorman 	if (!mpol_new)
244242288fe3SMel Gorman 		goto err_out;
244342288fe3SMel Gorman 	goto restart;
24441da177e4SLinus Torvalds }
24451da177e4SLinus Torvalds 
244671fe804bSLee Schermerhorn /**
244771fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
244871fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
244971fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
245071fe804bSLee Schermerhorn  *
245171fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
245271fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
245371fe804bSLee Schermerhorn  * This must be released on exit.
24544bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
245571fe804bSLee Schermerhorn  */
245671fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24577339ff83SRobin Holt {
245858568d2aSMiao Xie 	int ret;
245958568d2aSMiao Xie 
246071fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
246142288fe3SMel Gorman 	spin_lock_init(&sp->lock);
24627339ff83SRobin Holt 
246371fe804bSLee Schermerhorn 	if (mpol) {
24647339ff83SRobin Holt 		struct vm_area_struct pvma;
246571fe804bSLee Schermerhorn 		struct mempolicy *new;
24664bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
24677339ff83SRobin Holt 
24684bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
24695c0c1654SLee Schermerhorn 			goto put_mpol;
247071fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
247171fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
247215d77835SLee Schermerhorn 		if (IS_ERR(new))
24730cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
247458568d2aSMiao Xie 
247558568d2aSMiao Xie 		task_lock(current);
24764bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
247758568d2aSMiao Xie 		task_unlock(current);
247815d77835SLee Schermerhorn 		if (ret)
24795c0c1654SLee Schermerhorn 			goto put_new;
248071fe804bSLee Schermerhorn 
248171fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
24827339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
248371fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
248471fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
248515d77835SLee Schermerhorn 
24865c0c1654SLee Schermerhorn put_new:
248771fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
24880cae3457SDan Carpenter free_scratch:
24894bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
24905c0c1654SLee Schermerhorn put_mpol:
24915c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
24927339ff83SRobin Holt 	}
24937339ff83SRobin Holt }
24947339ff83SRobin Holt 
24951da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
24961da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
24971da177e4SLinus Torvalds {
24981da177e4SLinus Torvalds 	int err;
24991da177e4SLinus Torvalds 	struct sp_node *new = NULL;
25001da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
25011da177e4SLinus Torvalds 
2502028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
25031da177e4SLinus Torvalds 		 vma->vm_pgoff,
250445c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2505028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
250600ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
25071da177e4SLinus Torvalds 
25081da177e4SLinus Torvalds 	if (npol) {
25091da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
25101da177e4SLinus Torvalds 		if (!new)
25111da177e4SLinus Torvalds 			return -ENOMEM;
25121da177e4SLinus Torvalds 	}
25131da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
25141da177e4SLinus Torvalds 	if (err && new)
251563f74ca2SKOSAKI Motohiro 		sp_free(new);
25161da177e4SLinus Torvalds 	return err;
25171da177e4SLinus Torvalds }
25181da177e4SLinus Torvalds 
25191da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
25201da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
25211da177e4SLinus Torvalds {
25221da177e4SLinus Torvalds 	struct sp_node *n;
25231da177e4SLinus Torvalds 	struct rb_node *next;
25241da177e4SLinus Torvalds 
25251da177e4SLinus Torvalds 	if (!p->root.rb_node)
25261da177e4SLinus Torvalds 		return;
252742288fe3SMel Gorman 	spin_lock(&p->lock);
25281da177e4SLinus Torvalds 	next = rb_first(&p->root);
25291da177e4SLinus Torvalds 	while (next) {
25301da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25311da177e4SLinus Torvalds 		next = rb_next(&n->nd);
253263f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
25331da177e4SLinus Torvalds 	}
253442288fe3SMel Gorman 	spin_unlock(&p->lock);
25351da177e4SLinus Torvalds }
25361da177e4SLinus Torvalds 
25371a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2538c297663cSMel Gorman static int __initdata numabalancing_override;
25391a687c2eSMel Gorman 
25401a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
25411a687c2eSMel Gorman {
25421a687c2eSMel Gorman 	bool numabalancing_default = false;
25431a687c2eSMel Gorman 
25441a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
25451a687c2eSMel Gorman 		numabalancing_default = true;
25461a687c2eSMel Gorman 
2547c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2548c297663cSMel Gorman 	if (numabalancing_override)
2549c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2550c297663cSMel Gorman 
25511a687c2eSMel Gorman 	if (nr_node_ids > 1 && !numabalancing_override) {
25524a404beaSAndrew Morton 		pr_info("%s automatic NUMA balancing. "
2553c297663cSMel Gorman 			"Configure with numa_balancing= or the "
2554c297663cSMel Gorman 			"kernel.numa_balancing sysctl",
2555c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
25561a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
25571a687c2eSMel Gorman 	}
25581a687c2eSMel Gorman }
25591a687c2eSMel Gorman 
25601a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
25611a687c2eSMel Gorman {
25621a687c2eSMel Gorman 	int ret = 0;
25631a687c2eSMel Gorman 	if (!str)
25641a687c2eSMel Gorman 		goto out;
25651a687c2eSMel Gorman 
25661a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2567c297663cSMel Gorman 		numabalancing_override = 1;
25681a687c2eSMel Gorman 		ret = 1;
25691a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2570c297663cSMel Gorman 		numabalancing_override = -1;
25711a687c2eSMel Gorman 		ret = 1;
25721a687c2eSMel Gorman 	}
25731a687c2eSMel Gorman out:
25741a687c2eSMel Gorman 	if (!ret)
25754a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
25761a687c2eSMel Gorman 
25771a687c2eSMel Gorman 	return ret;
25781a687c2eSMel Gorman }
25791a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
25801a687c2eSMel Gorman #else
25811a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
25821a687c2eSMel Gorman {
25831a687c2eSMel Gorman }
25841a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
25851a687c2eSMel Gorman 
25861da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
25871da177e4SLinus Torvalds void __init numa_policy_init(void)
25881da177e4SLinus Torvalds {
2589b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2590b71636e2SPaul Mundt 	unsigned long largest = 0;
2591b71636e2SPaul Mundt 	int nid, prefer = 0;
2592b71636e2SPaul Mundt 
25931da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
25941da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
259520c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
25961da177e4SLinus Torvalds 
25971da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
25981da177e4SLinus Torvalds 				     sizeof(struct sp_node),
259920c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
26001da177e4SLinus Torvalds 
26015606e387SMel Gorman 	for_each_node(nid) {
26025606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
26035606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
26045606e387SMel Gorman 			.mode = MPOL_PREFERRED,
26055606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
26065606e387SMel Gorman 			.v = { .preferred_node = nid, },
26075606e387SMel Gorman 		};
26085606e387SMel Gorman 	}
26095606e387SMel Gorman 
2610b71636e2SPaul Mundt 	/*
2611b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2612b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2613b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2614b71636e2SPaul Mundt 	 */
2615b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
261601f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2617b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
26181da177e4SLinus Torvalds 
2619b71636e2SPaul Mundt 		/* Preserve the largest node */
2620b71636e2SPaul Mundt 		if (largest < total_pages) {
2621b71636e2SPaul Mundt 			largest = total_pages;
2622b71636e2SPaul Mundt 			prefer = nid;
2623b71636e2SPaul Mundt 		}
2624b71636e2SPaul Mundt 
2625b71636e2SPaul Mundt 		/* Interleave this node? */
2626b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2627b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2628b71636e2SPaul Mundt 	}
2629b71636e2SPaul Mundt 
2630b71636e2SPaul Mundt 	/* All too small, use the largest */
2631b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2632b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2633b71636e2SPaul Mundt 
2634028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2635b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
26361a687c2eSMel Gorman 
26371a687c2eSMel Gorman 	check_numabalancing_enable();
26381da177e4SLinus Torvalds }
26391da177e4SLinus Torvalds 
26408bccd85fSChristoph Lameter /* Reset policy of current process to default */
26411da177e4SLinus Torvalds void numa_default_policy(void)
26421da177e4SLinus Torvalds {
2643028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
26441da177e4SLinus Torvalds }
264568860ec1SPaul Jackson 
26464225399aSPaul Jackson /*
2647095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2648095f1fc4SLee Schermerhorn  */
2649095f1fc4SLee Schermerhorn 
2650095f1fc4SLee Schermerhorn /*
2651f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
26521a75a6c8SChristoph Lameter  */
2653345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2654345ace9cSLee Schermerhorn {
2655345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2656345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2657345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2658345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2659d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2660345ace9cSLee Schermerhorn };
26611a75a6c8SChristoph Lameter 
2662095f1fc4SLee Schermerhorn 
2663095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2664095f1fc4SLee Schermerhorn /**
2665f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2666095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
266771fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2668095f1fc4SLee Schermerhorn  *
2669095f1fc4SLee Schermerhorn  * Format of input:
2670095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2671095f1fc4SLee Schermerhorn  *
267271fe804bSLee Schermerhorn  * On success, returns 0, else 1
2673095f1fc4SLee Schermerhorn  */
2674a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2675095f1fc4SLee Schermerhorn {
267671fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2677b4652e84SLee Schermerhorn 	unsigned short mode;
2678f2a07f40SHugh Dickins 	unsigned short mode_flags;
267971fe804bSLee Schermerhorn 	nodemask_t nodes;
2680095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2681095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2682095f1fc4SLee Schermerhorn 	int err = 1;
2683095f1fc4SLee Schermerhorn 
2684095f1fc4SLee Schermerhorn 	if (nodelist) {
2685095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2686095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
268771fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2688095f1fc4SLee Schermerhorn 			goto out;
268901f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2690095f1fc4SLee Schermerhorn 			goto out;
269171fe804bSLee Schermerhorn 	} else
269271fe804bSLee Schermerhorn 		nodes_clear(nodes);
269371fe804bSLee Schermerhorn 
2694095f1fc4SLee Schermerhorn 	if (flags)
2695095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2696095f1fc4SLee Schermerhorn 
2697479e2802SPeter Zijlstra 	for (mode = 0; mode < MPOL_MAX; mode++) {
2698345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2699095f1fc4SLee Schermerhorn 			break;
2700095f1fc4SLee Schermerhorn 		}
2701095f1fc4SLee Schermerhorn 	}
2702a720094dSMel Gorman 	if (mode >= MPOL_MAX)
2703095f1fc4SLee Schermerhorn 		goto out;
2704095f1fc4SLee Schermerhorn 
270571fe804bSLee Schermerhorn 	switch (mode) {
2706095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
270771fe804bSLee Schermerhorn 		/*
270871fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
270971fe804bSLee Schermerhorn 		 */
2710095f1fc4SLee Schermerhorn 		if (nodelist) {
2711095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2712095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2713095f1fc4SLee Schermerhorn 				rest++;
2714926f2ae0SKOSAKI Motohiro 			if (*rest)
2715926f2ae0SKOSAKI Motohiro 				goto out;
2716095f1fc4SLee Schermerhorn 		}
2717095f1fc4SLee Schermerhorn 		break;
2718095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2719095f1fc4SLee Schermerhorn 		/*
2720095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2721095f1fc4SLee Schermerhorn 		 */
2722095f1fc4SLee Schermerhorn 		if (!nodelist)
272301f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
27243f226aa1SLee Schermerhorn 		break;
272571fe804bSLee Schermerhorn 	case MPOL_LOCAL:
27263f226aa1SLee Schermerhorn 		/*
272771fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
27283f226aa1SLee Schermerhorn 		 */
272971fe804bSLee Schermerhorn 		if (nodelist)
27303f226aa1SLee Schermerhorn 			goto out;
273171fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
27323f226aa1SLee Schermerhorn 		break;
2733413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2734413b43deSRavikiran G Thirumalai 		/*
2735413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2736413b43deSRavikiran G Thirumalai 		 */
2737413b43deSRavikiran G Thirumalai 		if (!nodelist)
2738413b43deSRavikiran G Thirumalai 			err = 0;
2739413b43deSRavikiran G Thirumalai 		goto out;
2740d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
274171fe804bSLee Schermerhorn 		/*
2742d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
274371fe804bSLee Schermerhorn 		 */
2744d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2745d69b2e63SKOSAKI Motohiro 			goto out;
2746095f1fc4SLee Schermerhorn 	}
2747095f1fc4SLee Schermerhorn 
274871fe804bSLee Schermerhorn 	mode_flags = 0;
2749095f1fc4SLee Schermerhorn 	if (flags) {
2750095f1fc4SLee Schermerhorn 		/*
2751095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2752095f1fc4SLee Schermerhorn 		 * mode flags.
2753095f1fc4SLee Schermerhorn 		 */
2754095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
275571fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2756095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
275771fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2758095f1fc4SLee Schermerhorn 		else
2759926f2ae0SKOSAKI Motohiro 			goto out;
2760095f1fc4SLee Schermerhorn 	}
276171fe804bSLee Schermerhorn 
276271fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
276371fe804bSLee Schermerhorn 	if (IS_ERR(new))
2764926f2ae0SKOSAKI Motohiro 		goto out;
2765926f2ae0SKOSAKI Motohiro 
2766f2a07f40SHugh Dickins 	/*
2767f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2768f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2769f2a07f40SHugh Dickins 	 */
2770f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2771f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2772f2a07f40SHugh Dickins 	else if (nodelist)
2773f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2774f2a07f40SHugh Dickins 	else
2775f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2776f2a07f40SHugh Dickins 
2777f2a07f40SHugh Dickins 	/*
2778f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2779f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2780f2a07f40SHugh Dickins 	 */
2781e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2782f2a07f40SHugh Dickins 
2783926f2ae0SKOSAKI Motohiro 	err = 0;
278471fe804bSLee Schermerhorn 
2785095f1fc4SLee Schermerhorn out:
2786095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2787095f1fc4SLee Schermerhorn 	if (nodelist)
2788095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2789095f1fc4SLee Schermerhorn 	if (flags)
2790095f1fc4SLee Schermerhorn 		*--flags = '=';
279171fe804bSLee Schermerhorn 	if (!err)
279271fe804bSLee Schermerhorn 		*mpol = new;
2793095f1fc4SLee Schermerhorn 	return err;
2794095f1fc4SLee Schermerhorn }
2795095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2796095f1fc4SLee Schermerhorn 
279771fe804bSLee Schermerhorn /**
279871fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
279971fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
280071fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
280171fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
280271fe804bSLee Schermerhorn  *
2803948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2804948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2805948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
28061a75a6c8SChristoph Lameter  */
2807948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
28081a75a6c8SChristoph Lameter {
28091a75a6c8SChristoph Lameter 	char *p = buffer;
2810948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2811948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2812948927eeSDavid Rientjes 	unsigned short flags = 0;
28131a75a6c8SChristoph Lameter 
28148790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2815bea904d5SLee Schermerhorn 		mode = pol->mode;
2816948927eeSDavid Rientjes 		flags = pol->flags;
2817948927eeSDavid Rientjes 	}
2818bea904d5SLee Schermerhorn 
28191a75a6c8SChristoph Lameter 	switch (mode) {
28201a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
28211a75a6c8SChristoph Lameter 		break;
28221a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2823fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2824f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
282553f2556bSLee Schermerhorn 		else
2826fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
28271a75a6c8SChristoph Lameter 		break;
28281a75a6c8SChristoph Lameter 	case MPOL_BIND:
28291a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
28301a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
28311a75a6c8SChristoph Lameter 		break;
28321a75a6c8SChristoph Lameter 	default:
2833948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2834948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2835948927eeSDavid Rientjes 		return;
28361a75a6c8SChristoph Lameter 	}
28371a75a6c8SChristoph Lameter 
2838b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
28391a75a6c8SChristoph Lameter 
2840fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2841948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2842f5b087b5SDavid Rientjes 
28432291990aSLee Schermerhorn 		/*
28442291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
28452291990aSLee Schermerhorn 		 */
2846f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
28472291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
28482291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
28492291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2850f5b087b5SDavid Rientjes 	}
2851f5b087b5SDavid Rientjes 
28521a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
2853948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, ":");
28541a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
28551a75a6c8SChristoph Lameter 	}
28561a75a6c8SChristoph Lameter }
2857