xref: /openbmc/linux/mm/mempolicy.c (revision 097d59106a8e4b42d07c9892fdd7790f1659c6ff)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/mm.h>
701da177e4SLinus Torvalds #include <linux/highmem.h>
711da177e4SLinus Torvalds #include <linux/hugetlb.h>
721da177e4SLinus Torvalds #include <linux/kernel.h>
731da177e4SLinus Torvalds #include <linux/sched.h>
741da177e4SLinus Torvalds #include <linux/nodemask.h>
751da177e4SLinus Torvalds #include <linux/cpuset.h>
761da177e4SLinus Torvalds #include <linux/slab.h>
771da177e4SLinus Torvalds #include <linux/string.h>
78b95f1b31SPaul Gortmaker #include <linux/export.h>
79b488893aSPavel Emelyanov #include <linux/nsproxy.h>
801da177e4SLinus Torvalds #include <linux/interrupt.h>
811da177e4SLinus Torvalds #include <linux/init.h>
821da177e4SLinus Torvalds #include <linux/compat.h>
83dc9aa5b9SChristoph Lameter #include <linux/swap.h>
841a75a6c8SChristoph Lameter #include <linux/seq_file.h>
851a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
86b20a3503SChristoph Lameter #include <linux/migrate.h>
8762b61f61SHugh Dickins #include <linux/ksm.h>
8895a402c3SChristoph Lameter #include <linux/rmap.h>
8986c3a764SDavid Quigley #include <linux/security.h>
90dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
91095f1fc4SLee Schermerhorn #include <linux/ctype.h>
926d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
93dc9aa5b9SChristoph Lameter 
941da177e4SLinus Torvalds #include <asm/tlbflush.h>
951da177e4SLinus Torvalds #include <asm/uaccess.h>
96778d3b0fSMichal Hocko #include <linux/random.h>
971da177e4SLinus Torvalds 
9862695a84SNick Piggin #include "internal.h"
9962695a84SNick Piggin 
10038e35860SChristoph Lameter /* Internal flags */
101dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10238e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
103dc9aa5b9SChristoph Lameter 
104fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
105fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1081da177e4SLinus Torvalds    policied. */
1096267276fSChristoph Lameter enum zone_type policy_zone = 0;
1101da177e4SLinus Torvalds 
111bea904d5SLee Schermerhorn /*
112bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
113bea904d5SLee Schermerhorn  */
114e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1151da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
116bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
117fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1181da177e4SLinus Torvalds };
1191da177e4SLinus Torvalds 
12037012946SDavid Rientjes static const struct mempolicy_operations {
12137012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122708c1bbcSMiao Xie 	/*
123708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
124708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
125708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
126708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
127708c1bbcSMiao Xie 	 * page.
128708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
129708c1bbcSMiao Xie 	 * rebind directly.
130708c1bbcSMiao Xie 	 *
131708c1bbcSMiao Xie 	 * step:
132708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
133708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
134708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
135708c1bbcSMiao Xie 	 */
136708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
137708c1bbcSMiao Xie 			enum mpol_rebind_step step);
13837012946SDavid Rientjes } mpol_ops[MPOL_MAX];
13937012946SDavid Rientjes 
14019770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
14137012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask)
1421da177e4SLinus Torvalds {
14319770b32SMel Gorman 	int nd, k;
1441da177e4SLinus Torvalds 
14519770b32SMel Gorman 	for_each_node_mask(nd, *nodemask) {
14619770b32SMel Gorman 		struct zone *z;
14719770b32SMel Gorman 
14819770b32SMel Gorman 		for (k = 0; k <= policy_zone; k++) {
14919770b32SMel Gorman 			z = &NODE_DATA(nd)->node_zones[k];
150dd942ae3SAndi Kleen 			if (z->present_pages > 0)
15119770b32SMel Gorman 				return 1;
152dd942ae3SAndi Kleen 		}
153dd942ae3SAndi Kleen 	}
15419770b32SMel Gorman 
15519770b32SMel Gorman 	return 0;
1561da177e4SLinus Torvalds }
1571da177e4SLinus Torvalds 
158f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
159f5b087b5SDavid Rientjes {
1606d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1614c50bc01SDavid Rientjes }
1624c50bc01SDavid Rientjes 
1634c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1644c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1654c50bc01SDavid Rientjes {
1664c50bc01SDavid Rientjes 	nodemask_t tmp;
1674c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1684c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
169f5b087b5SDavid Rientjes }
170f5b087b5SDavid Rientjes 
17137012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
17237012946SDavid Rientjes {
17337012946SDavid Rientjes 	if (nodes_empty(*nodes))
17437012946SDavid Rientjes 		return -EINVAL;
17537012946SDavid Rientjes 	pol->v.nodes = *nodes;
17637012946SDavid Rientjes 	return 0;
17737012946SDavid Rientjes }
17837012946SDavid Rientjes 
17937012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
18037012946SDavid Rientjes {
18137012946SDavid Rientjes 	if (!nodes)
182fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
18337012946SDavid Rientjes 	else if (nodes_empty(*nodes))
18437012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
18537012946SDavid Rientjes 	else
18637012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
18737012946SDavid Rientjes 	return 0;
18837012946SDavid Rientjes }
18937012946SDavid Rientjes 
19037012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
19137012946SDavid Rientjes {
19237012946SDavid Rientjes 	if (!is_valid_nodemask(nodes))
19337012946SDavid Rientjes 		return -EINVAL;
19437012946SDavid Rientjes 	pol->v.nodes = *nodes;
19537012946SDavid Rientjes 	return 0;
19637012946SDavid Rientjes }
19737012946SDavid Rientjes 
19858568d2aSMiao Xie /*
19958568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
20058568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
20158568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
20258568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
20358568d2aSMiao Xie  *
20458568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
20558568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
20658568d2aSMiao Xie  */
2074bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2084bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
20958568d2aSMiao Xie {
21058568d2aSMiao Xie 	int ret;
21158568d2aSMiao Xie 
21258568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
21358568d2aSMiao Xie 	if (pol == NULL)
21458568d2aSMiao Xie 		return 0;
2154bfc4495SKAMEZAWA Hiroyuki 	/* Check N_HIGH_MEMORY */
2164bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
2174bfc4495SKAMEZAWA Hiroyuki 		  cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
21858568d2aSMiao Xie 
21958568d2aSMiao Xie 	VM_BUG_ON(!nodes);
22058568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
22158568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
22258568d2aSMiao Xie 	else {
22358568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2244bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
22558568d2aSMiao Xie 		else
2264bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2274bfc4495SKAMEZAWA Hiroyuki 
22858568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
22958568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
23058568d2aSMiao Xie 		else
23158568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
23258568d2aSMiao Xie 						cpuset_current_mems_allowed;
23358568d2aSMiao Xie 	}
23458568d2aSMiao Xie 
2354bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2364bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2374bfc4495SKAMEZAWA Hiroyuki 	else
2384bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
23958568d2aSMiao Xie 	return ret;
24058568d2aSMiao Xie }
24158568d2aSMiao Xie 
24258568d2aSMiao Xie /*
24358568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
24458568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
24558568d2aSMiao Xie  */
246028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
247028fec41SDavid Rientjes 				  nodemask_t *nodes)
2481da177e4SLinus Torvalds {
2491da177e4SLinus Torvalds 	struct mempolicy *policy;
2501da177e4SLinus Torvalds 
251028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
252028fec41SDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
253140d5a49SPaul Mundt 
2543e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2553e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
25637012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
257bea904d5SLee Schermerhorn 		return NULL;	/* simply delete any existing policy */
25837012946SDavid Rientjes 	}
2593e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2603e1f0645SDavid Rientjes 
2613e1f0645SDavid Rientjes 	/*
2623e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2633e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2643e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2653e1f0645SDavid Rientjes 	 */
2663e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2673e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2683e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2693e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2703e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2713e1f0645SDavid Rientjes 		}
2723e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2733e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2741da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2751da177e4SLinus Torvalds 	if (!policy)
2761da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2771da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
27845c4745aSLee Schermerhorn 	policy->mode = mode;
27937012946SDavid Rientjes 	policy->flags = flags;
2803e1f0645SDavid Rientjes 
28137012946SDavid Rientjes 	return policy;
28237012946SDavid Rientjes }
28337012946SDavid Rientjes 
28452cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
28552cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
28652cd3b07SLee Schermerhorn {
28752cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
28852cd3b07SLee Schermerhorn 		return;
28952cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
29052cd3b07SLee Schermerhorn }
29152cd3b07SLee Schermerhorn 
292708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
293708c1bbcSMiao Xie 				enum mpol_rebind_step step)
29437012946SDavid Rientjes {
29537012946SDavid Rientjes }
29637012946SDavid Rientjes 
297708c1bbcSMiao Xie /*
298708c1bbcSMiao Xie  * step:
299708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
300708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
301708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
302708c1bbcSMiao Xie  */
303708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
304708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3051d0d2680SDavid Rientjes {
3061d0d2680SDavid Rientjes 	nodemask_t tmp;
3071d0d2680SDavid Rientjes 
30837012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
30937012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
31037012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
31137012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3121d0d2680SDavid Rientjes 	else {
313708c1bbcSMiao Xie 		/*
314708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
315708c1bbcSMiao Xie 		 * result
316708c1bbcSMiao Xie 		 */
317708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
318708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
319708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
320708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
321708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
322708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
32337012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
324708c1bbcSMiao Xie 		} else
325708c1bbcSMiao Xie 			BUG();
3261d0d2680SDavid Rientjes 	}
32737012946SDavid Rientjes 
328708c1bbcSMiao Xie 	if (nodes_empty(tmp))
329708c1bbcSMiao Xie 		tmp = *nodes;
330708c1bbcSMiao Xie 
331708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
332708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
333708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3341d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
335708c1bbcSMiao Xie 	else
336708c1bbcSMiao Xie 		BUG();
337708c1bbcSMiao Xie 
3381d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
3391d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
3401d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3411d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
3421d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3431d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3441d0d2680SDavid Rientjes 	}
34537012946SDavid Rientjes }
34637012946SDavid Rientjes 
34737012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
348708c1bbcSMiao Xie 				  const nodemask_t *nodes,
349708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
35037012946SDavid Rientjes {
35137012946SDavid Rientjes 	nodemask_t tmp;
35237012946SDavid Rientjes 
35337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3541d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3551d0d2680SDavid Rientjes 
356fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3571d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
358fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
359fc36b8d3SLee Schermerhorn 		} else
360fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
36137012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
36237012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3631d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
364fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3651d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
36637012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
36737012946SDavid Rientjes 						   *nodes);
36837012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3691d0d2680SDavid Rientjes 	}
3701d0d2680SDavid Rientjes }
37137012946SDavid Rientjes 
372708c1bbcSMiao Xie /*
373708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
374708c1bbcSMiao Xie  *
375708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
376708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
377708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
378708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
379708c1bbcSMiao Xie  * page.
380708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
381708c1bbcSMiao Xie  * rebind directly.
382708c1bbcSMiao Xie  *
383708c1bbcSMiao Xie  * step:
384708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
385708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
386708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
387708c1bbcSMiao Xie  */
388708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
389708c1bbcSMiao Xie 				enum mpol_rebind_step step)
39037012946SDavid Rientjes {
39137012946SDavid Rientjes 	if (!pol)
39237012946SDavid Rientjes 		return;
393708c1bbcSMiao Xie 	if (!mpol_store_user_nodemask(pol) && step == 0 &&
39437012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
39537012946SDavid Rientjes 		return;
396708c1bbcSMiao Xie 
397708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
398708c1bbcSMiao Xie 		return;
399708c1bbcSMiao Xie 
400708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
401708c1bbcSMiao Xie 		BUG();
402708c1bbcSMiao Xie 
403708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
404708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
405708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
406708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
407708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
408708c1bbcSMiao Xie 		BUG();
409708c1bbcSMiao Xie 
410708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4111d0d2680SDavid Rientjes }
4121d0d2680SDavid Rientjes 
4131d0d2680SDavid Rientjes /*
4141d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4151d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
41658568d2aSMiao Xie  *
41758568d2aSMiao Xie  * Called with task's alloc_lock held.
4181d0d2680SDavid Rientjes  */
4191d0d2680SDavid Rientjes 
420708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
421708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4221d0d2680SDavid Rientjes {
423708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4241d0d2680SDavid Rientjes }
4251d0d2680SDavid Rientjes 
4261d0d2680SDavid Rientjes /*
4271d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4281d0d2680SDavid Rientjes  *
4291d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4301d0d2680SDavid Rientjes  */
4311d0d2680SDavid Rientjes 
4321d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4331d0d2680SDavid Rientjes {
4341d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4351d0d2680SDavid Rientjes 
4361d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4371d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
438708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4391d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4401d0d2680SDavid Rientjes }
4411d0d2680SDavid Rientjes 
44237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
44337012946SDavid Rientjes 	[MPOL_DEFAULT] = {
44437012946SDavid Rientjes 		.rebind = mpol_rebind_default,
44537012946SDavid Rientjes 	},
44637012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
44737012946SDavid Rientjes 		.create = mpol_new_interleave,
44837012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
44937012946SDavid Rientjes 	},
45037012946SDavid Rientjes 	[MPOL_PREFERRED] = {
45137012946SDavid Rientjes 		.create = mpol_new_preferred,
45237012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
45337012946SDavid Rientjes 	},
45437012946SDavid Rientjes 	[MPOL_BIND] = {
45537012946SDavid Rientjes 		.create = mpol_new_bind,
45637012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
45737012946SDavid Rientjes 	},
45837012946SDavid Rientjes };
45937012946SDavid Rientjes 
460fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
461fc301289SChristoph Lameter 				unsigned long flags);
4621a75a6c8SChristoph Lameter 
46338e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
464b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
465dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
466dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
46738e35860SChristoph Lameter 		void *private)
4681da177e4SLinus Torvalds {
46991612e0dSHugh Dickins 	pte_t *orig_pte;
47091612e0dSHugh Dickins 	pte_t *pte;
471705e87c0SHugh Dickins 	spinlock_t *ptl;
472941150a3SHugh Dickins 
473705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
47491612e0dSHugh Dickins 	do {
4756aab341eSLinus Torvalds 		struct page *page;
47625ba77c1SAndy Whitcroft 		int nid;
47791612e0dSHugh Dickins 
47891612e0dSHugh Dickins 		if (!pte_present(*pte))
47991612e0dSHugh Dickins 			continue;
4806aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
4816aab341eSLinus Torvalds 		if (!page)
48291612e0dSHugh Dickins 			continue;
483053837fcSNick Piggin 		/*
48462b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
48562b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
48662b61f61SHugh Dickins 		 * And we cannot move PageKsm pages sensibly or safely yet.
487053837fcSNick Piggin 		 */
48862b61f61SHugh Dickins 		if (PageReserved(page) || PageKsm(page))
489f4598c8bSChristoph Lameter 			continue;
4906aab341eSLinus Torvalds 		nid = page_to_nid(page);
49138e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
49238e35860SChristoph Lameter 			continue;
49338e35860SChristoph Lameter 
494b1f72d18SStephen Wilson 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
495fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
496dc9aa5b9SChristoph Lameter 		else
4971da177e4SLinus Torvalds 			break;
49891612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
499705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
50091612e0dSHugh Dickins 	return addr != end;
50191612e0dSHugh Dickins }
50291612e0dSHugh Dickins 
503b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
504dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
505dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
50638e35860SChristoph Lameter 		void *private)
50791612e0dSHugh Dickins {
50891612e0dSHugh Dickins 	pmd_t *pmd;
50991612e0dSHugh Dickins 	unsigned long next;
51091612e0dSHugh Dickins 
51191612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
51291612e0dSHugh Dickins 	do {
51391612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
514bae9c19bSAndrea Arcangeli 		split_huge_page_pmd(vma->vm_mm, pmd);
51591612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
51691612e0dSHugh Dickins 			continue;
517dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
51838e35860SChristoph Lameter 				    flags, private))
51991612e0dSHugh Dickins 			return -EIO;
52091612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
52191612e0dSHugh Dickins 	return 0;
52291612e0dSHugh Dickins }
52391612e0dSHugh Dickins 
524b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
525dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
526dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
52738e35860SChristoph Lameter 		void *private)
52891612e0dSHugh Dickins {
52991612e0dSHugh Dickins 	pud_t *pud;
53091612e0dSHugh Dickins 	unsigned long next;
53191612e0dSHugh Dickins 
53291612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
53391612e0dSHugh Dickins 	do {
53491612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
53591612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
53691612e0dSHugh Dickins 			continue;
537dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
53838e35860SChristoph Lameter 				    flags, private))
53991612e0dSHugh Dickins 			return -EIO;
54091612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
54191612e0dSHugh Dickins 	return 0;
54291612e0dSHugh Dickins }
54391612e0dSHugh Dickins 
544b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
545dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
546dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
54738e35860SChristoph Lameter 		void *private)
54891612e0dSHugh Dickins {
54991612e0dSHugh Dickins 	pgd_t *pgd;
55091612e0dSHugh Dickins 	unsigned long next;
55191612e0dSHugh Dickins 
552b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
55391612e0dSHugh Dickins 	do {
55491612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
55591612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
55691612e0dSHugh Dickins 			continue;
557dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
55838e35860SChristoph Lameter 				    flags, private))
55991612e0dSHugh Dickins 			return -EIO;
56091612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
56191612e0dSHugh Dickins 	return 0;
5621da177e4SLinus Torvalds }
5631da177e4SLinus Torvalds 
564dc9aa5b9SChristoph Lameter /*
565dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
566dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
567dc9aa5b9SChristoph Lameter  * put them on the pagelist.
568dc9aa5b9SChristoph Lameter  */
5691da177e4SLinus Torvalds static struct vm_area_struct *
5701da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
57138e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
5721da177e4SLinus Torvalds {
5731da177e4SLinus Torvalds 	int err;
5741da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
5751da177e4SLinus Torvalds 
576053837fcSNick Piggin 
5771da177e4SLinus Torvalds 	first = find_vma(mm, start);
5781da177e4SLinus Torvalds 	if (!first)
5791da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
5801da177e4SLinus Torvalds 	prev = NULL;
5811da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
582dc9aa5b9SChristoph Lameter 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
5831da177e4SLinus Torvalds 			if (!vma->vm_next && vma->vm_end < end)
5841da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
5851da177e4SLinus Torvalds 			if (prev && prev->vm_end < vma->vm_start)
5861da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
587dc9aa5b9SChristoph Lameter 		}
588dc9aa5b9SChristoph Lameter 		if (!is_vm_hugetlb_page(vma) &&
589dc9aa5b9SChristoph Lameter 		    ((flags & MPOL_MF_STRICT) ||
590dc9aa5b9SChristoph Lameter 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
591dc9aa5b9SChristoph Lameter 				vma_migratable(vma)))) {
5925b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
593dc9aa5b9SChristoph Lameter 
5945b952b3cSAndi Kleen 			if (endvma > end)
5955b952b3cSAndi Kleen 				endvma = end;
5965b952b3cSAndi Kleen 			if (vma->vm_start > start)
5975b952b3cSAndi Kleen 				start = vma->vm_start;
598dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
59938e35860SChristoph Lameter 						flags, private);
6001da177e4SLinus Torvalds 			if (err) {
6011da177e4SLinus Torvalds 				first = ERR_PTR(err);
6021da177e4SLinus Torvalds 				break;
6031da177e4SLinus Torvalds 			}
6041da177e4SLinus Torvalds 		}
6051da177e4SLinus Torvalds 		prev = vma;
6061da177e4SLinus Torvalds 	}
6071da177e4SLinus Torvalds 	return first;
6081da177e4SLinus Torvalds }
6091da177e4SLinus Torvalds 
6101da177e4SLinus Torvalds /* Apply policy to a single VMA */
6111da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
6121da177e4SLinus Torvalds {
6131da177e4SLinus Torvalds 	int err = 0;
6141da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
6151da177e4SLinus Torvalds 
616140d5a49SPaul Mundt 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
6171da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
6181da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
6191da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
6201da177e4SLinus Torvalds 
6211da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
6221da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
6231da177e4SLinus Torvalds 	if (!err) {
6241da177e4SLinus Torvalds 		mpol_get(new);
6251da177e4SLinus Torvalds 		vma->vm_policy = new;
626f0be3d32SLee Schermerhorn 		mpol_put(old);
6271da177e4SLinus Torvalds 	}
6281da177e4SLinus Torvalds 	return err;
6291da177e4SLinus Torvalds }
6301da177e4SLinus Torvalds 
6311da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
6329d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
6339d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
6341da177e4SLinus Torvalds {
6351da177e4SLinus Torvalds 	struct vm_area_struct *next;
6369d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
6379d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
6389d8cebd4SKOSAKI Motohiro 	int err = 0;
639e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
6409d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
6419d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
6421da177e4SLinus Torvalds 
643*097d5910SLinus Torvalds 	vma = find_vma(mm, start);
6449d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
6459d8cebd4SKOSAKI Motohiro 		return -EFAULT;
6469d8cebd4SKOSAKI Motohiro 
647*097d5910SLinus Torvalds 	prev = vma->vm_prev;
648e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
649e26a5114SKOSAKI Motohiro 		prev = vma;
650e26a5114SKOSAKI Motohiro 
6519d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
6521da177e4SLinus Torvalds 		next = vma->vm_next;
6539d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
6549d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
6559d8cebd4SKOSAKI Motohiro 
656e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
657e26a5114SKOSAKI Motohiro 			continue;
658e26a5114SKOSAKI Motohiro 
659e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
660e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
6619d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
662e26a5114SKOSAKI Motohiro 				  vma->anon_vma, vma->vm_file, pgoff,
6638aacc9f5SCaspar Zhang 				  new_pol);
6649d8cebd4SKOSAKI Motohiro 		if (prev) {
6659d8cebd4SKOSAKI Motohiro 			vma = prev;
6669d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
6679d8cebd4SKOSAKI Motohiro 			continue;
6681da177e4SLinus Torvalds 		}
6699d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
6709d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
6719d8cebd4SKOSAKI Motohiro 			if (err)
6729d8cebd4SKOSAKI Motohiro 				goto out;
6739d8cebd4SKOSAKI Motohiro 		}
6749d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
6759d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
6769d8cebd4SKOSAKI Motohiro 			if (err)
6779d8cebd4SKOSAKI Motohiro 				goto out;
6789d8cebd4SKOSAKI Motohiro 		}
6799d8cebd4SKOSAKI Motohiro 		err = policy_vma(vma, new_pol);
6809d8cebd4SKOSAKI Motohiro 		if (err)
6819d8cebd4SKOSAKI Motohiro 			goto out;
6829d8cebd4SKOSAKI Motohiro 	}
6839d8cebd4SKOSAKI Motohiro 
6849d8cebd4SKOSAKI Motohiro  out:
6851da177e4SLinus Torvalds 	return err;
6861da177e4SLinus Torvalds }
6871da177e4SLinus Torvalds 
688c61afb18SPaul Jackson /*
689c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
690c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
691c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
692c61afb18SPaul Jackson  *
693c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
694c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
695c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
696c61afb18SPaul Jackson  *
697c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
698c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
699c61afb18SPaul Jackson  *
700c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
701c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
702c61afb18SPaul Jackson  * for use within this file.
703c61afb18SPaul Jackson  */
704c61afb18SPaul Jackson 
705c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
706c61afb18SPaul Jackson {
707c61afb18SPaul Jackson 	if (p->mempolicy)
708c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
709c61afb18SPaul Jackson 	else
710c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
711c61afb18SPaul Jackson }
712c61afb18SPaul Jackson 
713c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
714c61afb18SPaul Jackson {
715c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
716c61afb18SPaul Jackson }
717c61afb18SPaul Jackson 
7181da177e4SLinus Torvalds /* Set the process memory policy */
719028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
720028fec41SDavid Rientjes 			     nodemask_t *nodes)
7211da177e4SLinus Torvalds {
72258568d2aSMiao Xie 	struct mempolicy *new, *old;
723f4e53d91SLee Schermerhorn 	struct mm_struct *mm = current->mm;
7244bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
72558568d2aSMiao Xie 	int ret;
7261da177e4SLinus Torvalds 
7274bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7284bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
729f4e53d91SLee Schermerhorn 
7304bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
7314bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
7324bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
7334bfc4495SKAMEZAWA Hiroyuki 		goto out;
7344bfc4495SKAMEZAWA Hiroyuki 	}
735f4e53d91SLee Schermerhorn 	/*
736f4e53d91SLee Schermerhorn 	 * prevent changing our mempolicy while show_numa_maps()
737f4e53d91SLee Schermerhorn 	 * is using it.
738f4e53d91SLee Schermerhorn 	 * Note:  do_set_mempolicy() can be called at init time
739f4e53d91SLee Schermerhorn 	 * with no 'mm'.
740f4e53d91SLee Schermerhorn 	 */
741f4e53d91SLee Schermerhorn 	if (mm)
742f4e53d91SLee Schermerhorn 		down_write(&mm->mmap_sem);
74358568d2aSMiao Xie 	task_lock(current);
7444bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
74558568d2aSMiao Xie 	if (ret) {
74658568d2aSMiao Xie 		task_unlock(current);
74758568d2aSMiao Xie 		if (mm)
74858568d2aSMiao Xie 			up_write(&mm->mmap_sem);
74958568d2aSMiao Xie 		mpol_put(new);
7504bfc4495SKAMEZAWA Hiroyuki 		goto out;
75158568d2aSMiao Xie 	}
75258568d2aSMiao Xie 	old = current->mempolicy;
7531da177e4SLinus Torvalds 	current->mempolicy = new;
754c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
75545c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
756f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
757dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
75858568d2aSMiao Xie 	task_unlock(current);
759f4e53d91SLee Schermerhorn 	if (mm)
760f4e53d91SLee Schermerhorn 		up_write(&mm->mmap_sem);
761f4e53d91SLee Schermerhorn 
76258568d2aSMiao Xie 	mpol_put(old);
7634bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
7644bfc4495SKAMEZAWA Hiroyuki out:
7654bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
7664bfc4495SKAMEZAWA Hiroyuki 	return ret;
7671da177e4SLinus Torvalds }
7681da177e4SLinus Torvalds 
769bea904d5SLee Schermerhorn /*
770bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
77158568d2aSMiao Xie  *
77258568d2aSMiao Xie  * Called with task's alloc_lock held
773bea904d5SLee Schermerhorn  */
774bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
7751da177e4SLinus Torvalds {
776dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
777bea904d5SLee Schermerhorn 	if (p == &default_policy)
778bea904d5SLee Schermerhorn 		return;
779bea904d5SLee Schermerhorn 
78045c4745aSLee Schermerhorn 	switch (p->mode) {
78119770b32SMel Gorman 	case MPOL_BIND:
78219770b32SMel Gorman 		/* Fall through */
7831da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
784dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
7851da177e4SLinus Torvalds 		break;
7861da177e4SLinus Torvalds 	case MPOL_PREFERRED:
787fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
788dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
78953f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
7901da177e4SLinus Torvalds 		break;
7911da177e4SLinus Torvalds 	default:
7921da177e4SLinus Torvalds 		BUG();
7931da177e4SLinus Torvalds 	}
7941da177e4SLinus Torvalds }
7951da177e4SLinus Torvalds 
7961da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
7971da177e4SLinus Torvalds {
7981da177e4SLinus Torvalds 	struct page *p;
7991da177e4SLinus Torvalds 	int err;
8001da177e4SLinus Torvalds 
8011da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
8021da177e4SLinus Torvalds 	if (err >= 0) {
8031da177e4SLinus Torvalds 		err = page_to_nid(p);
8041da177e4SLinus Torvalds 		put_page(p);
8051da177e4SLinus Torvalds 	}
8061da177e4SLinus Torvalds 	return err;
8071da177e4SLinus Torvalds }
8081da177e4SLinus Torvalds 
8091da177e4SLinus Torvalds /* Retrieve NUMA policy */
810dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8111da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8121da177e4SLinus Torvalds {
8138bccd85fSChristoph Lameter 	int err;
8141da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8151da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8161da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8171da177e4SLinus Torvalds 
818754af6f5SLee Schermerhorn 	if (flags &
819754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8201da177e4SLinus Torvalds 		return -EINVAL;
821754af6f5SLee Schermerhorn 
822754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
823754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
824754af6f5SLee Schermerhorn 			return -EINVAL;
825754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
82658568d2aSMiao Xie 		task_lock(current);
827754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
82858568d2aSMiao Xie 		task_unlock(current);
829754af6f5SLee Schermerhorn 		return 0;
830754af6f5SLee Schermerhorn 	}
831754af6f5SLee Schermerhorn 
8321da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
833bea904d5SLee Schermerhorn 		/*
834bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
835bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
836bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
837bea904d5SLee Schermerhorn 		 */
8381da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8391da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
8401da177e4SLinus Torvalds 		if (!vma) {
8411da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
8421da177e4SLinus Torvalds 			return -EFAULT;
8431da177e4SLinus Torvalds 		}
8441da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
8451da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
8461da177e4SLinus Torvalds 		else
8471da177e4SLinus Torvalds 			pol = vma->vm_policy;
8481da177e4SLinus Torvalds 	} else if (addr)
8491da177e4SLinus Torvalds 		return -EINVAL;
8501da177e4SLinus Torvalds 
8511da177e4SLinus Torvalds 	if (!pol)
852bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
8531da177e4SLinus Torvalds 
8541da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
8551da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
8561da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
8571da177e4SLinus Torvalds 			if (err < 0)
8581da177e4SLinus Torvalds 				goto out;
8598bccd85fSChristoph Lameter 			*policy = err;
8601da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
86145c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
8628bccd85fSChristoph Lameter 			*policy = current->il_next;
8631da177e4SLinus Torvalds 		} else {
8641da177e4SLinus Torvalds 			err = -EINVAL;
8651da177e4SLinus Torvalds 			goto out;
8661da177e4SLinus Torvalds 		}
867bea904d5SLee Schermerhorn 	} else {
868bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
869bea904d5SLee Schermerhorn 						pol->mode;
870d79df630SDavid Rientjes 		/*
871d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
872d79df630SDavid Rientjes 		 * the policy to userspace.
873d79df630SDavid Rientjes 		 */
874d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
875bea904d5SLee Schermerhorn 	}
8761da177e4SLinus Torvalds 
8771da177e4SLinus Torvalds 	if (vma) {
8781da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
8791da177e4SLinus Torvalds 		vma = NULL;
8801da177e4SLinus Torvalds 	}
8811da177e4SLinus Torvalds 
8821da177e4SLinus Torvalds 	err = 0;
88358568d2aSMiao Xie 	if (nmask) {
884c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
885c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
886c6b6ef8bSLee Schermerhorn 		} else {
88758568d2aSMiao Xie 			task_lock(current);
888bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
88958568d2aSMiao Xie 			task_unlock(current);
89058568d2aSMiao Xie 		}
891c6b6ef8bSLee Schermerhorn 	}
8921da177e4SLinus Torvalds 
8931da177e4SLinus Torvalds  out:
89452cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
8951da177e4SLinus Torvalds 	if (vma)
8961da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
8971da177e4SLinus Torvalds 	return err;
8981da177e4SLinus Torvalds }
8991da177e4SLinus Torvalds 
900b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9018bccd85fSChristoph Lameter /*
9026ce3c4c0SChristoph Lameter  * page migration
9036ce3c4c0SChristoph Lameter  */
904fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
905fc301289SChristoph Lameter 				unsigned long flags)
9066ce3c4c0SChristoph Lameter {
9076ce3c4c0SChristoph Lameter 	/*
908fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9096ce3c4c0SChristoph Lameter 	 */
91062695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
91162695a84SNick Piggin 		if (!isolate_lru_page(page)) {
91262695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
9136d9c285aSKOSAKI Motohiro 			inc_zone_page_state(page, NR_ISOLATED_ANON +
9146d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
91562695a84SNick Piggin 		}
91662695a84SNick Piggin 	}
9176ce3c4c0SChristoph Lameter }
9186ce3c4c0SChristoph Lameter 
919742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
92095a402c3SChristoph Lameter {
9216484eb3eSMel Gorman 	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
92295a402c3SChristoph Lameter }
92395a402c3SChristoph Lameter 
9246ce3c4c0SChristoph Lameter /*
9257e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
9267e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
9277e2ab150SChristoph Lameter  */
928dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
929dbcb0f19SAdrian Bunk 			   int flags)
9307e2ab150SChristoph Lameter {
9317e2ab150SChristoph Lameter 	nodemask_t nmask;
9327e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
9337e2ab150SChristoph Lameter 	int err = 0;
9340def08e3SVasiliy Kulikov 	struct vm_area_struct *vma;
9357e2ab150SChristoph Lameter 
9367e2ab150SChristoph Lameter 	nodes_clear(nmask);
9377e2ab150SChristoph Lameter 	node_set(source, nmask);
9387e2ab150SChristoph Lameter 
9390def08e3SVasiliy Kulikov 	vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
9407e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
9410def08e3SVasiliy Kulikov 	if (IS_ERR(vma))
9420def08e3SVasiliy Kulikov 		return PTR_ERR(vma);
9437e2ab150SChristoph Lameter 
944cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
9457f0f2496SMel Gorman 		err = migrate_pages(&pagelist, new_node_page, dest,
946a6bc32b8SMel Gorman 							false, MIGRATE_SYNC);
947cf608ac1SMinchan Kim 		if (err)
948cf608ac1SMinchan Kim 			putback_lru_pages(&pagelist);
949cf608ac1SMinchan Kim 	}
95095a402c3SChristoph Lameter 
9517e2ab150SChristoph Lameter 	return err;
9527e2ab150SChristoph Lameter }
9537e2ab150SChristoph Lameter 
9547e2ab150SChristoph Lameter /*
9557e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
9567e2ab150SChristoph Lameter  * layout as much as possible.
95739743889SChristoph Lameter  *
95839743889SChristoph Lameter  * Returns the number of page that could not be moved.
95939743889SChristoph Lameter  */
96039743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
96139743889SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
96239743889SChristoph Lameter {
9637e2ab150SChristoph Lameter 	int busy = 0;
9640aedadf9SChristoph Lameter 	int err;
9657e2ab150SChristoph Lameter 	nodemask_t tmp;
96639743889SChristoph Lameter 
9670aedadf9SChristoph Lameter 	err = migrate_prep();
9680aedadf9SChristoph Lameter 	if (err)
9690aedadf9SChristoph Lameter 		return err;
9700aedadf9SChristoph Lameter 
97139743889SChristoph Lameter 	down_read(&mm->mmap_sem);
972d4984711SChristoph Lameter 
9737b2259b3SChristoph Lameter 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
9747b2259b3SChristoph Lameter 	if (err)
9757b2259b3SChristoph Lameter 		goto out;
9767b2259b3SChristoph Lameter 
9777e2ab150SChristoph Lameter 	/*
9787e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
9797e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
9807e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
9817e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
9827e2ab150SChristoph Lameter 	 *
9837e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
9847e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
9857e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
9867e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
9877e2ab150SChristoph Lameter 	 *
9887e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
9897e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
9907e2ab150SChristoph Lameter 	 * (nothing left to migrate).
9917e2ab150SChristoph Lameter 	 *
9927e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
9937e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
9947e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
9957e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
9967e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
9977e2ab150SChristoph Lameter 	 *
9987e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
9997e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10007e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10017e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1002ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10037e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10047e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10057e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10067e2ab150SChristoph Lameter 	 */
10077e2ab150SChristoph Lameter 
10087e2ab150SChristoph Lameter 	tmp = *from_nodes;
10097e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10107e2ab150SChristoph Lameter 		int s,d;
10117e2ab150SChristoph Lameter 		int source = -1;
10127e2ab150SChristoph Lameter 		int dest = 0;
10137e2ab150SChristoph Lameter 
10147e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10157e2ab150SChristoph Lameter 			d = node_remap(s, *from_nodes, *to_nodes);
10167e2ab150SChristoph Lameter 			if (s == d)
10177e2ab150SChristoph Lameter 				continue;
10187e2ab150SChristoph Lameter 
10197e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
10207e2ab150SChristoph Lameter 			dest = d;
10217e2ab150SChristoph Lameter 
10227e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
10237e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
10247e2ab150SChristoph Lameter 				break;
10257e2ab150SChristoph Lameter 		}
10267e2ab150SChristoph Lameter 		if (source == -1)
10277e2ab150SChristoph Lameter 			break;
10287e2ab150SChristoph Lameter 
10297e2ab150SChristoph Lameter 		node_clear(source, tmp);
10307e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
10317e2ab150SChristoph Lameter 		if (err > 0)
10327e2ab150SChristoph Lameter 			busy += err;
10337e2ab150SChristoph Lameter 		if (err < 0)
10347e2ab150SChristoph Lameter 			break;
103539743889SChristoph Lameter 	}
10367b2259b3SChristoph Lameter out:
103739743889SChristoph Lameter 	up_read(&mm->mmap_sem);
10387e2ab150SChristoph Lameter 	if (err < 0)
10397e2ab150SChristoph Lameter 		return err;
10407e2ab150SChristoph Lameter 	return busy;
1041b20a3503SChristoph Lameter 
104239743889SChristoph Lameter }
104339743889SChristoph Lameter 
10443ad33b24SLee Schermerhorn /*
10453ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
10463ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
10473ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
10483ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
10493ad33b24SLee Schermerhorn  * is in virtual address order.
10503ad33b24SLee Schermerhorn  */
1051742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
105295a402c3SChristoph Lameter {
105395a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
10543ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
105595a402c3SChristoph Lameter 
10563ad33b24SLee Schermerhorn 	while (vma) {
10573ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
10583ad33b24SLee Schermerhorn 		if (address != -EFAULT)
10593ad33b24SLee Schermerhorn 			break;
10603ad33b24SLee Schermerhorn 		vma = vma->vm_next;
10613ad33b24SLee Schermerhorn 	}
10623ad33b24SLee Schermerhorn 
10633ad33b24SLee Schermerhorn 	/*
10643ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
10653ad33b24SLee Schermerhorn 	 */
10663ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
106795a402c3SChristoph Lameter }
1068b20a3503SChristoph Lameter #else
1069b20a3503SChristoph Lameter 
1070b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1071b20a3503SChristoph Lameter 				unsigned long flags)
1072b20a3503SChristoph Lameter {
1073b20a3503SChristoph Lameter }
1074b20a3503SChristoph Lameter 
1075b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
1076b20a3503SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1077b20a3503SChristoph Lameter {
1078b20a3503SChristoph Lameter 	return -ENOSYS;
1079b20a3503SChristoph Lameter }
108095a402c3SChristoph Lameter 
108169939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
108295a402c3SChristoph Lameter {
108395a402c3SChristoph Lameter 	return NULL;
108495a402c3SChristoph Lameter }
1085b20a3503SChristoph Lameter #endif
1086b20a3503SChristoph Lameter 
1087dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1088028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1089028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
10906ce3c4c0SChristoph Lameter {
10916ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
10926ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
10936ce3c4c0SChristoph Lameter 	struct mempolicy *new;
10946ce3c4c0SChristoph Lameter 	unsigned long end;
10956ce3c4c0SChristoph Lameter 	int err;
10966ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
10976ce3c4c0SChristoph Lameter 
1098a3b51e01SDavid Rientjes 	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
10996ce3c4c0SChristoph Lameter 				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
11006ce3c4c0SChristoph Lameter 		return -EINVAL;
110174c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
11026ce3c4c0SChristoph Lameter 		return -EPERM;
11036ce3c4c0SChristoph Lameter 
11046ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
11056ce3c4c0SChristoph Lameter 		return -EINVAL;
11066ce3c4c0SChristoph Lameter 
11076ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
11086ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
11096ce3c4c0SChristoph Lameter 
11106ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
11116ce3c4c0SChristoph Lameter 	end = start + len;
11126ce3c4c0SChristoph Lameter 
11136ce3c4c0SChristoph Lameter 	if (end < start)
11146ce3c4c0SChristoph Lameter 		return -EINVAL;
11156ce3c4c0SChristoph Lameter 	if (end == start)
11166ce3c4c0SChristoph Lameter 		return 0;
11176ce3c4c0SChristoph Lameter 
1118028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
11196ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
11206ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
11216ce3c4c0SChristoph Lameter 
11226ce3c4c0SChristoph Lameter 	/*
11236ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
11246ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
11256ce3c4c0SChristoph Lameter 	 */
11266ce3c4c0SChristoph Lameter 	if (!new)
11276ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
11286ce3c4c0SChristoph Lameter 
1129028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1130028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
1131028fec41SDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : -1);
11326ce3c4c0SChristoph Lameter 
11330aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
11340aedadf9SChristoph Lameter 
11350aedadf9SChristoph Lameter 		err = migrate_prep();
11360aedadf9SChristoph Lameter 		if (err)
1137b05ca738SKOSAKI Motohiro 			goto mpol_out;
11380aedadf9SChristoph Lameter 	}
11394bfc4495SKAMEZAWA Hiroyuki 	{
11404bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
11414bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
11426ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
114358568d2aSMiao Xie 			task_lock(current);
11444bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
114558568d2aSMiao Xie 			task_unlock(current);
11464bfc4495SKAMEZAWA Hiroyuki 			if (err)
114758568d2aSMiao Xie 				up_write(&mm->mmap_sem);
11484bfc4495SKAMEZAWA Hiroyuki 		} else
11494bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
11504bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
11514bfc4495SKAMEZAWA Hiroyuki 	}
1152b05ca738SKOSAKI Motohiro 	if (err)
1153b05ca738SKOSAKI Motohiro 		goto mpol_out;
1154b05ca738SKOSAKI Motohiro 
11556ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
11566ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
11576ce3c4c0SChristoph Lameter 
11586ce3c4c0SChristoph Lameter 	err = PTR_ERR(vma);
11596ce3c4c0SChristoph Lameter 	if (!IS_ERR(vma)) {
11606ce3c4c0SChristoph Lameter 		int nr_failed = 0;
11616ce3c4c0SChristoph Lameter 
11629d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
11637e2ab150SChristoph Lameter 
1164cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
116595a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
11667f0f2496SMel Gorman 						(unsigned long)vma,
11677f0f2496SMel Gorman 						false, true);
1168cf608ac1SMinchan Kim 			if (nr_failed)
1169cf608ac1SMinchan Kim 				putback_lru_pages(&pagelist);
1170cf608ac1SMinchan Kim 		}
11716ce3c4c0SChristoph Lameter 
11726ce3c4c0SChristoph Lameter 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
11736ce3c4c0SChristoph Lameter 			err = -EIO;
1174ab8a3e14SKOSAKI Motohiro 	} else
1175ab8a3e14SKOSAKI Motohiro 		putback_lru_pages(&pagelist);
1176b20a3503SChristoph Lameter 
11776ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1178b05ca738SKOSAKI Motohiro  mpol_out:
1179f0be3d32SLee Schermerhorn 	mpol_put(new);
11806ce3c4c0SChristoph Lameter 	return err;
11816ce3c4c0SChristoph Lameter }
11826ce3c4c0SChristoph Lameter 
118339743889SChristoph Lameter /*
11848bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
11858bccd85fSChristoph Lameter  */
11868bccd85fSChristoph Lameter 
11878bccd85fSChristoph Lameter /* Copy a node mask from user space. */
118839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
11898bccd85fSChristoph Lameter 		     unsigned long maxnode)
11908bccd85fSChristoph Lameter {
11918bccd85fSChristoph Lameter 	unsigned long k;
11928bccd85fSChristoph Lameter 	unsigned long nlongs;
11938bccd85fSChristoph Lameter 	unsigned long endmask;
11948bccd85fSChristoph Lameter 
11958bccd85fSChristoph Lameter 	--maxnode;
11968bccd85fSChristoph Lameter 	nodes_clear(*nodes);
11978bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
11988bccd85fSChristoph Lameter 		return 0;
1199a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1200636f13c1SChris Wright 		return -EINVAL;
12018bccd85fSChristoph Lameter 
12028bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
12038bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
12048bccd85fSChristoph Lameter 		endmask = ~0UL;
12058bccd85fSChristoph Lameter 	else
12068bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
12078bccd85fSChristoph Lameter 
12088bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
12098bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
12108bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
12118bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
12128bccd85fSChristoph Lameter 			return -EINVAL;
12138bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
12148bccd85fSChristoph Lameter 			unsigned long t;
12158bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
12168bccd85fSChristoph Lameter 				return -EFAULT;
12178bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
12188bccd85fSChristoph Lameter 				if (t & endmask)
12198bccd85fSChristoph Lameter 					return -EINVAL;
12208bccd85fSChristoph Lameter 			} else if (t)
12218bccd85fSChristoph Lameter 				return -EINVAL;
12228bccd85fSChristoph Lameter 		}
12238bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
12248bccd85fSChristoph Lameter 		endmask = ~0UL;
12258bccd85fSChristoph Lameter 	}
12268bccd85fSChristoph Lameter 
12278bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
12288bccd85fSChristoph Lameter 		return -EFAULT;
12298bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
12308bccd85fSChristoph Lameter 	return 0;
12318bccd85fSChristoph Lameter }
12328bccd85fSChristoph Lameter 
12338bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
12348bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
12358bccd85fSChristoph Lameter 			      nodemask_t *nodes)
12368bccd85fSChristoph Lameter {
12378bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
12388bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
12398bccd85fSChristoph Lameter 
12408bccd85fSChristoph Lameter 	if (copy > nbytes) {
12418bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
12428bccd85fSChristoph Lameter 			return -EINVAL;
12438bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
12448bccd85fSChristoph Lameter 			return -EFAULT;
12458bccd85fSChristoph Lameter 		copy = nbytes;
12468bccd85fSChristoph Lameter 	}
12478bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
12488bccd85fSChristoph Lameter }
12498bccd85fSChristoph Lameter 
1250938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1251938bb9f5SHeiko Carstens 		unsigned long, mode, unsigned long __user *, nmask,
1252938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
12538bccd85fSChristoph Lameter {
12548bccd85fSChristoph Lameter 	nodemask_t nodes;
12558bccd85fSChristoph Lameter 	int err;
1256028fec41SDavid Rientjes 	unsigned short mode_flags;
12578bccd85fSChristoph Lameter 
1258028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1259028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1260a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1261a3b51e01SDavid Rientjes 		return -EINVAL;
12624c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
12634c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
12644c50bc01SDavid Rientjes 		return -EINVAL;
12658bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
12668bccd85fSChristoph Lameter 	if (err)
12678bccd85fSChristoph Lameter 		return err;
1268028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
12698bccd85fSChristoph Lameter }
12708bccd85fSChristoph Lameter 
12718bccd85fSChristoph Lameter /* Set the process memory policy */
1272938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1273938bb9f5SHeiko Carstens 		unsigned long, maxnode)
12748bccd85fSChristoph Lameter {
12758bccd85fSChristoph Lameter 	int err;
12768bccd85fSChristoph Lameter 	nodemask_t nodes;
1277028fec41SDavid Rientjes 	unsigned short flags;
12788bccd85fSChristoph Lameter 
1279028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1280028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1281028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
12828bccd85fSChristoph Lameter 		return -EINVAL;
12834c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
12844c50bc01SDavid Rientjes 		return -EINVAL;
12858bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
12868bccd85fSChristoph Lameter 	if (err)
12878bccd85fSChristoph Lameter 		return err;
1288028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
12898bccd85fSChristoph Lameter }
12908bccd85fSChristoph Lameter 
1291938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1292938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1293938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
129439743889SChristoph Lameter {
1295c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
1296596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
129739743889SChristoph Lameter 	struct task_struct *task;
129839743889SChristoph Lameter 	nodemask_t task_nodes;
129939743889SChristoph Lameter 	int err;
1300596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1301596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1302596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
130339743889SChristoph Lameter 
1304596d7cfaSKOSAKI Motohiro 	if (!scratch)
1305596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
130639743889SChristoph Lameter 
1307596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1308596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1309596d7cfaSKOSAKI Motohiro 
1310596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
131139743889SChristoph Lameter 	if (err)
1312596d7cfaSKOSAKI Motohiro 		goto out;
1313596d7cfaSKOSAKI Motohiro 
1314596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1315596d7cfaSKOSAKI Motohiro 	if (err)
1316596d7cfaSKOSAKI Motohiro 		goto out;
131739743889SChristoph Lameter 
131839743889SChristoph Lameter 	/* Find the mm_struct */
131955cfaa3cSZeng Zhaoming 	rcu_read_lock();
1320228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
132139743889SChristoph Lameter 	if (!task) {
132255cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1323596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1324596d7cfaSKOSAKI Motohiro 		goto out;
132539743889SChristoph Lameter 	}
132639743889SChristoph Lameter 	mm = get_task_mm(task);
132755cfaa3cSZeng Zhaoming 	rcu_read_unlock();
132839743889SChristoph Lameter 
1329596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
133039743889SChristoph Lameter 	if (!mm)
1331596d7cfaSKOSAKI Motohiro 		goto out;
133239743889SChristoph Lameter 
133339743889SChristoph Lameter 	/*
133439743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
133539743889SChristoph Lameter 	 * process. The right exists if the process has administrative
13367f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
133739743889SChristoph Lameter 	 * userid as the target process.
133839743889SChristoph Lameter 	 */
1339c69e8d9cSDavid Howells 	rcu_read_lock();
1340c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1341b6dff3ecSDavid Howells 	if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1342b6dff3ecSDavid Howells 	    cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
134374c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1344c69e8d9cSDavid Howells 		rcu_read_unlock();
134539743889SChristoph Lameter 		err = -EPERM;
134639743889SChristoph Lameter 		goto out;
134739743889SChristoph Lameter 	}
1348c69e8d9cSDavid Howells 	rcu_read_unlock();
134939743889SChristoph Lameter 
135039743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
135139743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1352596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
135339743889SChristoph Lameter 		err = -EPERM;
135439743889SChristoph Lameter 		goto out;
135539743889SChristoph Lameter 	}
135639743889SChristoph Lameter 
1357596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
13583b42d28bSChristoph Lameter 		err = -EINVAL;
13593b42d28bSChristoph Lameter 		goto out;
13603b42d28bSChristoph Lameter 	}
13613b42d28bSChristoph Lameter 
136286c3a764SDavid Quigley 	err = security_task_movememory(task);
136386c3a764SDavid Quigley 	if (err)
136486c3a764SDavid Quigley 		goto out;
136586c3a764SDavid Quigley 
1366596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
136774c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
136839743889SChristoph Lameter out:
1369596d7cfaSKOSAKI Motohiro 	if (mm)
137039743889SChristoph Lameter 		mmput(mm);
1371596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1372596d7cfaSKOSAKI Motohiro 
137339743889SChristoph Lameter 	return err;
137439743889SChristoph Lameter }
137539743889SChristoph Lameter 
137639743889SChristoph Lameter 
13778bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1378938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1379938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1380938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
13818bccd85fSChristoph Lameter {
1382dbcb0f19SAdrian Bunk 	int err;
1383dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
13848bccd85fSChristoph Lameter 	nodemask_t nodes;
13858bccd85fSChristoph Lameter 
13868bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
13878bccd85fSChristoph Lameter 		return -EINVAL;
13888bccd85fSChristoph Lameter 
13898bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
13908bccd85fSChristoph Lameter 
13918bccd85fSChristoph Lameter 	if (err)
13928bccd85fSChristoph Lameter 		return err;
13938bccd85fSChristoph Lameter 
13948bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
13958bccd85fSChristoph Lameter 		return -EFAULT;
13968bccd85fSChristoph Lameter 
13978bccd85fSChristoph Lameter 	if (nmask)
13988bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
13998bccd85fSChristoph Lameter 
14008bccd85fSChristoph Lameter 	return err;
14018bccd85fSChristoph Lameter }
14028bccd85fSChristoph Lameter 
14031da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
14041da177e4SLinus Torvalds 
14051da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
14061da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
14071da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
14081da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
14091da177e4SLinus Torvalds {
14101da177e4SLinus Torvalds 	long err;
14111da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
14121da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
14131da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
14161da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
14171da177e4SLinus Torvalds 
14181da177e4SLinus Torvalds 	if (nmask)
14191da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
14201da177e4SLinus Torvalds 
14211da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
14221da177e4SLinus Torvalds 
14231da177e4SLinus Torvalds 	if (!err && nmask) {
14242bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
14252bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
14262bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
14271da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
14281da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
14291da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
14301da177e4SLinus Torvalds 	}
14311da177e4SLinus Torvalds 
14321da177e4SLinus Torvalds 	return err;
14331da177e4SLinus Torvalds }
14341da177e4SLinus Torvalds 
14351da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
14361da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
14371da177e4SLinus Torvalds {
14381da177e4SLinus Torvalds 	long err = 0;
14391da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
14401da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
14411da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
14421da177e4SLinus Torvalds 
14431da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
14441da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
14451da177e4SLinus Torvalds 
14461da177e4SLinus Torvalds 	if (nmask) {
14471da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
14481da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
14491da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
14501da177e4SLinus Torvalds 	}
14511da177e4SLinus Torvalds 
14521da177e4SLinus Torvalds 	if (err)
14531da177e4SLinus Torvalds 		return -EFAULT;
14541da177e4SLinus Torvalds 
14551da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
14561da177e4SLinus Torvalds }
14571da177e4SLinus Torvalds 
14581da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
14591da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
14601da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
14611da177e4SLinus Torvalds {
14621da177e4SLinus Torvalds 	long err = 0;
14631da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
14641da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1465dfcd3c0dSAndi Kleen 	nodemask_t bm;
14661da177e4SLinus Torvalds 
14671da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
14681da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
14691da177e4SLinus Torvalds 
14701da177e4SLinus Torvalds 	if (nmask) {
1471dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
14721da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1473dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
14741da177e4SLinus Torvalds 	}
14751da177e4SLinus Torvalds 
14761da177e4SLinus Torvalds 	if (err)
14771da177e4SLinus Torvalds 		return -EFAULT;
14781da177e4SLinus Torvalds 
14791da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
14801da177e4SLinus Torvalds }
14811da177e4SLinus Torvalds 
14821da177e4SLinus Torvalds #endif
14831da177e4SLinus Torvalds 
1484480eccf9SLee Schermerhorn /*
1485480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1486480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1487480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1488480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1489480eccf9SLee Schermerhorn  *
1490480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1491480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
149252cd3b07SLee Schermerhorn  * Current or other task's task mempolicy and non-shared vma policies
149352cd3b07SLee Schermerhorn  * are protected by the task's mmap_sem, which must be held for read by
149452cd3b07SLee Schermerhorn  * the caller.
149552cd3b07SLee Schermerhorn  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
149652cd3b07SLee Schermerhorn  * count--added by the get_policy() vm_op, as appropriate--to protect against
149752cd3b07SLee Schermerhorn  * freeing by another task.  It is the caller's responsibility to free the
149852cd3b07SLee Schermerhorn  * extra reference for shared policies.
1499480eccf9SLee Schermerhorn  */
1500d98f6cb6SStephen Wilson struct mempolicy *get_vma_policy(struct task_struct *task,
150148fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
15021da177e4SLinus Torvalds {
15036e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
15041da177e4SLinus Torvalds 
15051da177e4SLinus Torvalds 	if (vma) {
1506480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1507ae4d8c16SLee Schermerhorn 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1508ae4d8c16SLee Schermerhorn 									addr);
1509ae4d8c16SLee Schermerhorn 			if (vpol)
1510ae4d8c16SLee Schermerhorn 				pol = vpol;
1511bea904d5SLee Schermerhorn 		} else if (vma->vm_policy)
15121da177e4SLinus Torvalds 			pol = vma->vm_policy;
15131da177e4SLinus Torvalds 	}
15141da177e4SLinus Torvalds 	if (!pol)
15151da177e4SLinus Torvalds 		pol = &default_policy;
15161da177e4SLinus Torvalds 	return pol;
15171da177e4SLinus Torvalds }
15181da177e4SLinus Torvalds 
151952cd3b07SLee Schermerhorn /*
152052cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
152152cd3b07SLee Schermerhorn  * page allocation
152252cd3b07SLee Schermerhorn  */
152352cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
152419770b32SMel Gorman {
152519770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
152645c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
152719770b32SMel Gorman 			gfp_zone(gfp) >= policy_zone &&
152819770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
152919770b32SMel Gorman 		return &policy->v.nodes;
153019770b32SMel Gorman 
153119770b32SMel Gorman 	return NULL;
153219770b32SMel Gorman }
153319770b32SMel Gorman 
153452cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
15352f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
15362f5f9486SAndi Kleen 	int nd)
15371da177e4SLinus Torvalds {
153845c4745aSLee Schermerhorn 	switch (policy->mode) {
15391da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1540fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
15411da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
15421da177e4SLinus Torvalds 		break;
15431da177e4SLinus Torvalds 	case MPOL_BIND:
154419770b32SMel Gorman 		/*
154552cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
154652cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
15476eb27e1fSBob Liu 		 * current node isn't part of the mask, we use the zonelist for
154852cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
154919770b32SMel Gorman 		 */
155019770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
155119770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
155219770b32SMel Gorman 			nd = first_node(policy->v.nodes);
155319770b32SMel Gorman 		break;
15541da177e4SLinus Torvalds 	default:
15551da177e4SLinus Torvalds 		BUG();
15561da177e4SLinus Torvalds 	}
15570e88460dSMel Gorman 	return node_zonelist(nd, gfp);
15581da177e4SLinus Torvalds }
15591da177e4SLinus Torvalds 
15601da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
15611da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
15621da177e4SLinus Torvalds {
15631da177e4SLinus Torvalds 	unsigned nid, next;
15641da177e4SLinus Torvalds 	struct task_struct *me = current;
15651da177e4SLinus Torvalds 
15661da177e4SLinus Torvalds 	nid = me->il_next;
1567dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
15681da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1569dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1570f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
15711da177e4SLinus Torvalds 		me->il_next = next;
15721da177e4SLinus Torvalds 	return nid;
15731da177e4SLinus Torvalds }
15741da177e4SLinus Torvalds 
1575dc85da15SChristoph Lameter /*
1576dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1577dc85da15SChristoph Lameter  * next slab entry.
157852cd3b07SLee Schermerhorn  * @policy must be protected by freeing by the caller.  If @policy is
157952cd3b07SLee Schermerhorn  * the current task's mempolicy, this protection is implicit, as only the
158052cd3b07SLee Schermerhorn  * task can change it's policy.  The system default policy requires no
158152cd3b07SLee Schermerhorn  * such protection.
1582dc85da15SChristoph Lameter  */
1583dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy)
1584dc85da15SChristoph Lameter {
1585fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
1586bea904d5SLee Schermerhorn 		return numa_node_id();
1587765c4507SChristoph Lameter 
1588bea904d5SLee Schermerhorn 	switch (policy->mode) {
1589bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1590fc36b8d3SLee Schermerhorn 		/*
1591fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1592fc36b8d3SLee Schermerhorn 		 */
1593bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1594bea904d5SLee Schermerhorn 
1595dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1596dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1597dc85da15SChristoph Lameter 
1598dd1a239fSMel Gorman 	case MPOL_BIND: {
1599dc85da15SChristoph Lameter 		/*
1600dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1601dc85da15SChristoph Lameter 		 * first node.
1602dc85da15SChristoph Lameter 		 */
160319770b32SMel Gorman 		struct zonelist *zonelist;
160419770b32SMel Gorman 		struct zone *zone;
160519770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
160619770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
160719770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
160819770b32SMel Gorman 							&policy->v.nodes,
160919770b32SMel Gorman 							&zone);
1610800416f7SEric Dumazet 		return zone ? zone->node : numa_node_id();
1611dd1a239fSMel Gorman 	}
1612dc85da15SChristoph Lameter 
1613dc85da15SChristoph Lameter 	default:
1614bea904d5SLee Schermerhorn 		BUG();
1615dc85da15SChristoph Lameter 	}
1616dc85da15SChristoph Lameter }
1617dc85da15SChristoph Lameter 
16181da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
16191da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
16201da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
16211da177e4SLinus Torvalds {
1622dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1623f5b087b5SDavid Rientjes 	unsigned target;
16241da177e4SLinus Torvalds 	int c;
16251da177e4SLinus Torvalds 	int nid = -1;
16261da177e4SLinus Torvalds 
1627f5b087b5SDavid Rientjes 	if (!nnodes)
1628f5b087b5SDavid Rientjes 		return numa_node_id();
1629f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
16301da177e4SLinus Torvalds 	c = 0;
16311da177e4SLinus Torvalds 	do {
1632dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
16331da177e4SLinus Torvalds 		c++;
16341da177e4SLinus Torvalds 	} while (c <= target);
16351da177e4SLinus Torvalds 	return nid;
16361da177e4SLinus Torvalds }
16371da177e4SLinus Torvalds 
16385da7ca86SChristoph Lameter /* Determine a node number for interleave */
16395da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
16405da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
16415da7ca86SChristoph Lameter {
16425da7ca86SChristoph Lameter 	if (vma) {
16435da7ca86SChristoph Lameter 		unsigned long off;
16445da7ca86SChristoph Lameter 
16453b98b087SNishanth Aravamudan 		/*
16463b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
16473b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
16483b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
16493b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
16503b98b087SNishanth Aravamudan 		 * a useful offset.
16513b98b087SNishanth Aravamudan 		 */
16523b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
16533b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
16545da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
16555da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
16565da7ca86SChristoph Lameter 	} else
16575da7ca86SChristoph Lameter 		return interleave_nodes(pol);
16585da7ca86SChristoph Lameter }
16595da7ca86SChristoph Lameter 
1660778d3b0fSMichal Hocko /*
1661778d3b0fSMichal Hocko  * Return the bit number of a random bit set in the nodemask.
1662778d3b0fSMichal Hocko  * (returns -1 if nodemask is empty)
1663778d3b0fSMichal Hocko  */
1664778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp)
1665778d3b0fSMichal Hocko {
1666778d3b0fSMichal Hocko 	int w, bit = -1;
1667778d3b0fSMichal Hocko 
1668778d3b0fSMichal Hocko 	w = nodes_weight(*maskp);
1669778d3b0fSMichal Hocko 	if (w)
1670778d3b0fSMichal Hocko 		bit = bitmap_ord_to_pos(maskp->bits,
1671778d3b0fSMichal Hocko 			get_random_int() % w, MAX_NUMNODES);
1672778d3b0fSMichal Hocko 	return bit;
1673778d3b0fSMichal Hocko }
1674778d3b0fSMichal Hocko 
167500ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1676480eccf9SLee Schermerhorn /*
1677480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1678480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1679480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1680480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
168119770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
168219770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1683480eccf9SLee Schermerhorn  *
168452cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
168552cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
168652cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
168752cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1688c0ff7453SMiao Xie  *
1689c0ff7453SMiao Xie  * Must be protected by get_mems_allowed()
1690480eccf9SLee Schermerhorn  */
1691396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
169219770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
169319770b32SMel Gorman 				nodemask_t **nodemask)
16945da7ca86SChristoph Lameter {
1695480eccf9SLee Schermerhorn 	struct zonelist *zl;
16965da7ca86SChristoph Lameter 
169752cd3b07SLee Schermerhorn 	*mpol = get_vma_policy(current, vma, addr);
169819770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
16995da7ca86SChristoph Lameter 
170052cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
170152cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1702a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
170352cd3b07SLee Schermerhorn 	} else {
17042f5f9486SAndi Kleen 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
170552cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
170652cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1707480eccf9SLee Schermerhorn 	}
1708480eccf9SLee Schermerhorn 	return zl;
17095da7ca86SChristoph Lameter }
171006808b08SLee Schermerhorn 
171106808b08SLee Schermerhorn /*
171206808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
171306808b08SLee Schermerhorn  *
171406808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
171506808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
171606808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
171706808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
171806808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
171906808b08SLee Schermerhorn  * of non-default mempolicy.
172006808b08SLee Schermerhorn  *
172106808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
172206808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
172306808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
172406808b08SLee Schermerhorn  *
172506808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
172606808b08SLee Schermerhorn  */
172706808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
172806808b08SLee Schermerhorn {
172906808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
173006808b08SLee Schermerhorn 	int nid;
173106808b08SLee Schermerhorn 
173206808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
173306808b08SLee Schermerhorn 		return false;
173406808b08SLee Schermerhorn 
1735c0ff7453SMiao Xie 	task_lock(current);
173606808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
173706808b08SLee Schermerhorn 	switch (mempolicy->mode) {
173806808b08SLee Schermerhorn 	case MPOL_PREFERRED:
173906808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
174006808b08SLee Schermerhorn 			nid = numa_node_id();
174106808b08SLee Schermerhorn 		else
174206808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
174306808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
174406808b08SLee Schermerhorn 		break;
174506808b08SLee Schermerhorn 
174606808b08SLee Schermerhorn 	case MPOL_BIND:
174706808b08SLee Schermerhorn 		/* Fall through */
174806808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
174906808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
175006808b08SLee Schermerhorn 		break;
175106808b08SLee Schermerhorn 
175206808b08SLee Schermerhorn 	default:
175306808b08SLee Schermerhorn 		BUG();
175406808b08SLee Schermerhorn 	}
1755c0ff7453SMiao Xie 	task_unlock(current);
175606808b08SLee Schermerhorn 
175706808b08SLee Schermerhorn 	return true;
175806808b08SLee Schermerhorn }
175900ac59adSChen, Kenneth W #endif
17605da7ca86SChristoph Lameter 
17616f48d0ebSDavid Rientjes /*
17626f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
17636f48d0ebSDavid Rientjes  *
17646f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
17656f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
17666f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
17676f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
17686f48d0ebSDavid Rientjes  *
17696f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
17706f48d0ebSDavid Rientjes  */
17716f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
17726f48d0ebSDavid Rientjes 					const nodemask_t *mask)
17736f48d0ebSDavid Rientjes {
17746f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
17756f48d0ebSDavid Rientjes 	bool ret = true;
17766f48d0ebSDavid Rientjes 
17776f48d0ebSDavid Rientjes 	if (!mask)
17786f48d0ebSDavid Rientjes 		return ret;
17796f48d0ebSDavid Rientjes 	task_lock(tsk);
17806f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
17816f48d0ebSDavid Rientjes 	if (!mempolicy)
17826f48d0ebSDavid Rientjes 		goto out;
17836f48d0ebSDavid Rientjes 
17846f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
17856f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
17866f48d0ebSDavid Rientjes 		/*
17876f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
17886f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
17896f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
17906f48d0ebSDavid Rientjes 		 * nodes in mask.
17916f48d0ebSDavid Rientjes 		 */
17926f48d0ebSDavid Rientjes 		break;
17936f48d0ebSDavid Rientjes 	case MPOL_BIND:
17946f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
17956f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
17966f48d0ebSDavid Rientjes 		break;
17976f48d0ebSDavid Rientjes 	default:
17986f48d0ebSDavid Rientjes 		BUG();
17996f48d0ebSDavid Rientjes 	}
18006f48d0ebSDavid Rientjes out:
18016f48d0ebSDavid Rientjes 	task_unlock(tsk);
18026f48d0ebSDavid Rientjes 	return ret;
18036f48d0ebSDavid Rientjes }
18046f48d0ebSDavid Rientjes 
18051da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
18061da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1807662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1808662f3a0bSAndi Kleen 					unsigned nid)
18091da177e4SLinus Torvalds {
18101da177e4SLinus Torvalds 	struct zonelist *zl;
18111da177e4SLinus Torvalds 	struct page *page;
18121da177e4SLinus Torvalds 
18130e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
18141da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1815dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1816ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
18171da177e4SLinus Torvalds 	return page;
18181da177e4SLinus Torvalds }
18191da177e4SLinus Torvalds 
18201da177e4SLinus Torvalds /**
18210bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
18221da177e4SLinus Torvalds  *
18231da177e4SLinus Torvalds  * 	@gfp:
18241da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
18251da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
18261da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
18271da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
18281da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
18291da177e4SLinus Torvalds  *
18300bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
18311da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
18321da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
18331da177e4SLinus Torvalds  *
18341da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
18351da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
18361da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
18371da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
18381da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
18391da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
18401da177e4SLinus Torvalds  *
18411da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
18421da177e4SLinus Torvalds  */
18431da177e4SLinus Torvalds struct page *
18440bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
18452f5f9486SAndi Kleen 		unsigned long addr, int node)
18461da177e4SLinus Torvalds {
18476e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1848480eccf9SLee Schermerhorn 	struct zonelist *zl;
1849c0ff7453SMiao Xie 	struct page *page;
18501da177e4SLinus Torvalds 
1851c0ff7453SMiao Xie 	get_mems_allowed();
185245c4745aSLee Schermerhorn 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
18531da177e4SLinus Torvalds 		unsigned nid;
18545da7ca86SChristoph Lameter 
18558eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
185652cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
18570bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
1858c0ff7453SMiao Xie 		put_mems_allowed();
1859c0ff7453SMiao Xie 		return page;
18601da177e4SLinus Torvalds 	}
18612f5f9486SAndi Kleen 	zl = policy_zonelist(gfp, pol, node);
186252cd3b07SLee Schermerhorn 	if (unlikely(mpol_needs_cond_ref(pol))) {
1863480eccf9SLee Schermerhorn 		/*
186452cd3b07SLee Schermerhorn 		 * slow path: ref counted shared policy
1865480eccf9SLee Schermerhorn 		 */
18660bbbc0b3SAndrea Arcangeli 		struct page *page =  __alloc_pages_nodemask(gfp, order,
186752cd3b07SLee Schermerhorn 						zl, policy_nodemask(gfp, pol));
1868f0be3d32SLee Schermerhorn 		__mpol_put(pol);
1869c0ff7453SMiao Xie 		put_mems_allowed();
1870480eccf9SLee Schermerhorn 		return page;
1871480eccf9SLee Schermerhorn 	}
1872480eccf9SLee Schermerhorn 	/*
1873480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
1874480eccf9SLee Schermerhorn 	 */
18750bbbc0b3SAndrea Arcangeli 	page = __alloc_pages_nodemask(gfp, order, zl,
18760bbbc0b3SAndrea Arcangeli 				      policy_nodemask(gfp, pol));
1877c0ff7453SMiao Xie 	put_mems_allowed();
1878c0ff7453SMiao Xie 	return page;
18791da177e4SLinus Torvalds }
18801da177e4SLinus Torvalds 
18811da177e4SLinus Torvalds /**
18821da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
18831da177e4SLinus Torvalds  *
18841da177e4SLinus Torvalds  *	@gfp:
18851da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
18861da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
18871da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
18881da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
18891da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
18901da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
18911da177e4SLinus Torvalds  *
18921da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
18931da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
18941da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
18951da177e4SLinus Torvalds  *
1896cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
18971da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
18981da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
18991da177e4SLinus Torvalds  */
1900dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
19011da177e4SLinus Torvalds {
19021da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
1903c0ff7453SMiao Xie 	struct page *page;
19041da177e4SLinus Torvalds 
19059b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
19061da177e4SLinus Torvalds 		pol = &default_policy;
190752cd3b07SLee Schermerhorn 
1908c0ff7453SMiao Xie 	get_mems_allowed();
190952cd3b07SLee Schermerhorn 	/*
191052cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
191152cd3b07SLee Schermerhorn 	 * nor system default_policy
191252cd3b07SLee Schermerhorn 	 */
191345c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
1914c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1915c0ff7453SMiao Xie 	else
1916c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
19175c4b4be3SAndi Kleen 				policy_zonelist(gfp, pol, numa_node_id()),
19185c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
1919c0ff7453SMiao Xie 	put_mems_allowed();
1920c0ff7453SMiao Xie 	return page;
19211da177e4SLinus Torvalds }
19221da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
19231da177e4SLinus Torvalds 
19244225399aSPaul Jackson /*
1925846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
19264225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
19274225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
19284225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
19294225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
1930708c1bbcSMiao Xie  *
1931708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
1932708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
19334225399aSPaul Jackson  */
19344225399aSPaul Jackson 
1935846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
1936846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
19371da177e4SLinus Torvalds {
19381da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
19391da177e4SLinus Torvalds 
19401da177e4SLinus Torvalds 	if (!new)
19411da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
1942708c1bbcSMiao Xie 
1943708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
1944708c1bbcSMiao Xie 	if (old == current->mempolicy) {
1945708c1bbcSMiao Xie 		task_lock(current);
1946708c1bbcSMiao Xie 		*new = *old;
1947708c1bbcSMiao Xie 		task_unlock(current);
1948708c1bbcSMiao Xie 	} else
1949708c1bbcSMiao Xie 		*new = *old;
1950708c1bbcSMiao Xie 
195199ee4ca7SPaul E. McKenney 	rcu_read_lock();
19524225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
19534225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
1954708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
1955708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1956708c1bbcSMiao Xie 		else
1957708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
19584225399aSPaul Jackson 	}
195999ee4ca7SPaul E. McKenney 	rcu_read_unlock();
19601da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
19611da177e4SLinus Torvalds 	return new;
19621da177e4SLinus Torvalds }
19631da177e4SLinus Torvalds 
196452cd3b07SLee Schermerhorn /*
196552cd3b07SLee Schermerhorn  * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
196652cd3b07SLee Schermerhorn  * eliminate the * MPOL_F_* flags that require conditional ref and
196752cd3b07SLee Schermerhorn  * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
196852cd3b07SLee Schermerhorn  * after return.  Use the returned value.
196952cd3b07SLee Schermerhorn  *
197052cd3b07SLee Schermerhorn  * Allows use of a mempolicy for, e.g., multiple allocations with a single
197152cd3b07SLee Schermerhorn  * policy lookup, even if the policy needs/has extra ref on lookup.
197252cd3b07SLee Schermerhorn  * shmem_readahead needs this.
197352cd3b07SLee Schermerhorn  */
197452cd3b07SLee Schermerhorn struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
197552cd3b07SLee Schermerhorn 						struct mempolicy *frompol)
197652cd3b07SLee Schermerhorn {
197752cd3b07SLee Schermerhorn 	if (!mpol_needs_cond_ref(frompol))
197852cd3b07SLee Schermerhorn 		return frompol;
197952cd3b07SLee Schermerhorn 
198052cd3b07SLee Schermerhorn 	*tompol = *frompol;
198152cd3b07SLee Schermerhorn 	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
198252cd3b07SLee Schermerhorn 	__mpol_put(frompol);
198352cd3b07SLee Schermerhorn 	return tompol;
198452cd3b07SLee Schermerhorn }
198552cd3b07SLee Schermerhorn 
19861da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
1987fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
19881da177e4SLinus Torvalds {
19891da177e4SLinus Torvalds 	if (!a || !b)
1990fcfb4dccSKOSAKI Motohiro 		return false;
199145c4745aSLee Schermerhorn 	if (a->mode != b->mode)
1992fcfb4dccSKOSAKI Motohiro 		return false;
199319800502SBob Liu 	if (a->flags != b->flags)
1994fcfb4dccSKOSAKI Motohiro 		return false;
199519800502SBob Liu 	if (mpol_store_user_nodemask(a))
199619800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
1997fcfb4dccSKOSAKI Motohiro 			return false;
199819800502SBob Liu 
199945c4745aSLee Schermerhorn 	switch (a->mode) {
200019770b32SMel Gorman 	case MPOL_BIND:
200119770b32SMel Gorman 		/* Fall through */
20021da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2003fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
20041da177e4SLinus Torvalds 	case MPOL_PREFERRED:
200575719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
20061da177e4SLinus Torvalds 	default:
20071da177e4SLinus Torvalds 		BUG();
2008fcfb4dccSKOSAKI Motohiro 		return false;
20091da177e4SLinus Torvalds 	}
20101da177e4SLinus Torvalds }
20111da177e4SLinus Torvalds 
20121da177e4SLinus Torvalds /*
20131da177e4SLinus Torvalds  * Shared memory backing store policy support.
20141da177e4SLinus Torvalds  *
20151da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
20161da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
20171da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
20181da177e4SLinus Torvalds  * for any accesses to the tree.
20191da177e4SLinus Torvalds  */
20201da177e4SLinus Torvalds 
20211da177e4SLinus Torvalds /* lookup first element intersecting start-end */
20221da177e4SLinus Torvalds /* Caller holds sp->lock */
20231da177e4SLinus Torvalds static struct sp_node *
20241da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
20251da177e4SLinus Torvalds {
20261da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
20271da177e4SLinus Torvalds 
20281da177e4SLinus Torvalds 	while (n) {
20291da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
20301da177e4SLinus Torvalds 
20311da177e4SLinus Torvalds 		if (start >= p->end)
20321da177e4SLinus Torvalds 			n = n->rb_right;
20331da177e4SLinus Torvalds 		else if (end <= p->start)
20341da177e4SLinus Torvalds 			n = n->rb_left;
20351da177e4SLinus Torvalds 		else
20361da177e4SLinus Torvalds 			break;
20371da177e4SLinus Torvalds 	}
20381da177e4SLinus Torvalds 	if (!n)
20391da177e4SLinus Torvalds 		return NULL;
20401da177e4SLinus Torvalds 	for (;;) {
20411da177e4SLinus Torvalds 		struct sp_node *w = NULL;
20421da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
20431da177e4SLinus Torvalds 		if (!prev)
20441da177e4SLinus Torvalds 			break;
20451da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
20461da177e4SLinus Torvalds 		if (w->end <= start)
20471da177e4SLinus Torvalds 			break;
20481da177e4SLinus Torvalds 		n = prev;
20491da177e4SLinus Torvalds 	}
20501da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
20511da177e4SLinus Torvalds }
20521da177e4SLinus Torvalds 
20531da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
20541da177e4SLinus Torvalds /* Caller holds sp->lock */
20551da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
20561da177e4SLinus Torvalds {
20571da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
20581da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
20591da177e4SLinus Torvalds 	struct sp_node *nd;
20601da177e4SLinus Torvalds 
20611da177e4SLinus Torvalds 	while (*p) {
20621da177e4SLinus Torvalds 		parent = *p;
20631da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
20641da177e4SLinus Torvalds 		if (new->start < nd->start)
20651da177e4SLinus Torvalds 			p = &(*p)->rb_left;
20661da177e4SLinus Torvalds 		else if (new->end > nd->end)
20671da177e4SLinus Torvalds 			p = &(*p)->rb_right;
20681da177e4SLinus Torvalds 		else
20691da177e4SLinus Torvalds 			BUG();
20701da177e4SLinus Torvalds 	}
20711da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
20721da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2073140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
207445c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
20751da177e4SLinus Torvalds }
20761da177e4SLinus Torvalds 
20771da177e4SLinus Torvalds /* Find shared policy intersecting idx */
20781da177e4SLinus Torvalds struct mempolicy *
20791da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
20801da177e4SLinus Torvalds {
20811da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
20821da177e4SLinus Torvalds 	struct sp_node *sn;
20831da177e4SLinus Torvalds 
20841da177e4SLinus Torvalds 	if (!sp->root.rb_node)
20851da177e4SLinus Torvalds 		return NULL;
20861da177e4SLinus Torvalds 	spin_lock(&sp->lock);
20871da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
20881da177e4SLinus Torvalds 	if (sn) {
20891da177e4SLinus Torvalds 		mpol_get(sn->policy);
20901da177e4SLinus Torvalds 		pol = sn->policy;
20911da177e4SLinus Torvalds 	}
20921da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
20931da177e4SLinus Torvalds 	return pol;
20941da177e4SLinus Torvalds }
20951da177e4SLinus Torvalds 
20961da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
20971da177e4SLinus Torvalds {
2098140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
20991da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
2100f0be3d32SLee Schermerhorn 	mpol_put(n->policy);
21011da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
21021da177e4SLinus Torvalds }
21031da177e4SLinus Torvalds 
2104dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2105dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
21061da177e4SLinus Torvalds {
21071da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
21081da177e4SLinus Torvalds 
21091da177e4SLinus Torvalds 	if (!n)
21101da177e4SLinus Torvalds 		return NULL;
21111da177e4SLinus Torvalds 	n->start = start;
21121da177e4SLinus Torvalds 	n->end = end;
21131da177e4SLinus Torvalds 	mpol_get(pol);
2114aab0b102SLee Schermerhorn 	pol->flags |= MPOL_F_SHARED;	/* for unref */
21151da177e4SLinus Torvalds 	n->policy = pol;
21161da177e4SLinus Torvalds 	return n;
21171da177e4SLinus Torvalds }
21181da177e4SLinus Torvalds 
21191da177e4SLinus Torvalds /* Replace a policy range. */
21201da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
21211da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
21221da177e4SLinus Torvalds {
21231da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
21241da177e4SLinus Torvalds 
21251da177e4SLinus Torvalds restart:
21261da177e4SLinus Torvalds 	spin_lock(&sp->lock);
21271da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
21281da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
21291da177e4SLinus Torvalds 	while (n && n->start < end) {
21301da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
21311da177e4SLinus Torvalds 		if (n->start >= start) {
21321da177e4SLinus Torvalds 			if (n->end <= end)
21331da177e4SLinus Torvalds 				sp_delete(sp, n);
21341da177e4SLinus Torvalds 			else
21351da177e4SLinus Torvalds 				n->start = end;
21361da177e4SLinus Torvalds 		} else {
21371da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
21381da177e4SLinus Torvalds 			if (n->end > end) {
21391da177e4SLinus Torvalds 				if (!new2) {
21401da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
21411da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
21421da177e4SLinus Torvalds 					if (!new2)
21431da177e4SLinus Torvalds 						return -ENOMEM;
21441da177e4SLinus Torvalds 					goto restart;
21451da177e4SLinus Torvalds 				}
21461da177e4SLinus Torvalds 				n->end = start;
21471da177e4SLinus Torvalds 				sp_insert(sp, new2);
21481da177e4SLinus Torvalds 				new2 = NULL;
21491da177e4SLinus Torvalds 				break;
21501da177e4SLinus Torvalds 			} else
21511da177e4SLinus Torvalds 				n->end = start;
21521da177e4SLinus Torvalds 		}
21531da177e4SLinus Torvalds 		if (!next)
21541da177e4SLinus Torvalds 			break;
21551da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
21561da177e4SLinus Torvalds 	}
21571da177e4SLinus Torvalds 	if (new)
21581da177e4SLinus Torvalds 		sp_insert(sp, new);
21591da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
21601da177e4SLinus Torvalds 	if (new2) {
2161f0be3d32SLee Schermerhorn 		mpol_put(new2->policy);
21621da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
21631da177e4SLinus Torvalds 	}
21641da177e4SLinus Torvalds 	return 0;
21651da177e4SLinus Torvalds }
21661da177e4SLinus Torvalds 
216771fe804bSLee Schermerhorn /**
216871fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
216971fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
217071fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
217171fe804bSLee Schermerhorn  *
217271fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
217371fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
217471fe804bSLee Schermerhorn  * This must be released on exit.
21754bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
217671fe804bSLee Schermerhorn  */
217771fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
21787339ff83SRobin Holt {
217958568d2aSMiao Xie 	int ret;
218058568d2aSMiao Xie 
218171fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
218271fe804bSLee Schermerhorn 	spin_lock_init(&sp->lock);
21837339ff83SRobin Holt 
218471fe804bSLee Schermerhorn 	if (mpol) {
21857339ff83SRobin Holt 		struct vm_area_struct pvma;
218671fe804bSLee Schermerhorn 		struct mempolicy *new;
21874bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
21887339ff83SRobin Holt 
21894bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
21905c0c1654SLee Schermerhorn 			goto put_mpol;
219171fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
219271fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
219315d77835SLee Schermerhorn 		if (IS_ERR(new))
21940cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
219558568d2aSMiao Xie 
219658568d2aSMiao Xie 		task_lock(current);
21974bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
219858568d2aSMiao Xie 		task_unlock(current);
219915d77835SLee Schermerhorn 		if (ret)
22005c0c1654SLee Schermerhorn 			goto put_new;
220171fe804bSLee Schermerhorn 
220271fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
22037339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
220471fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
220571fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
220615d77835SLee Schermerhorn 
22075c0c1654SLee Schermerhorn put_new:
220871fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
22090cae3457SDan Carpenter free_scratch:
22104bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
22115c0c1654SLee Schermerhorn put_mpol:
22125c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
22137339ff83SRobin Holt 	}
22147339ff83SRobin Holt }
22157339ff83SRobin Holt 
22161da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
22171da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
22181da177e4SLinus Torvalds {
22191da177e4SLinus Torvalds 	int err;
22201da177e4SLinus Torvalds 	struct sp_node *new = NULL;
22211da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
22221da177e4SLinus Torvalds 
2223028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
22241da177e4SLinus Torvalds 		 vma->vm_pgoff,
222545c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2226028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2227dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
22281da177e4SLinus Torvalds 
22291da177e4SLinus Torvalds 	if (npol) {
22301da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
22311da177e4SLinus Torvalds 		if (!new)
22321da177e4SLinus Torvalds 			return -ENOMEM;
22331da177e4SLinus Torvalds 	}
22341da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
22351da177e4SLinus Torvalds 	if (err && new)
22361da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
22371da177e4SLinus Torvalds 	return err;
22381da177e4SLinus Torvalds }
22391da177e4SLinus Torvalds 
22401da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
22411da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
22421da177e4SLinus Torvalds {
22431da177e4SLinus Torvalds 	struct sp_node *n;
22441da177e4SLinus Torvalds 	struct rb_node *next;
22451da177e4SLinus Torvalds 
22461da177e4SLinus Torvalds 	if (!p->root.rb_node)
22471da177e4SLinus Torvalds 		return;
22481da177e4SLinus Torvalds 	spin_lock(&p->lock);
22491da177e4SLinus Torvalds 	next = rb_first(&p->root);
22501da177e4SLinus Torvalds 	while (next) {
22511da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
22521da177e4SLinus Torvalds 		next = rb_next(&n->nd);
225390c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
2254f0be3d32SLee Schermerhorn 		mpol_put(n->policy);
22551da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
22561da177e4SLinus Torvalds 	}
22571da177e4SLinus Torvalds 	spin_unlock(&p->lock);
22581da177e4SLinus Torvalds }
22591da177e4SLinus Torvalds 
22601da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
22611da177e4SLinus Torvalds void __init numa_policy_init(void)
22621da177e4SLinus Torvalds {
2263b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2264b71636e2SPaul Mundt 	unsigned long largest = 0;
2265b71636e2SPaul Mundt 	int nid, prefer = 0;
2266b71636e2SPaul Mundt 
22671da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
22681da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
226920c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
22701da177e4SLinus Torvalds 
22711da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
22721da177e4SLinus Torvalds 				     sizeof(struct sp_node),
227320c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
22741da177e4SLinus Torvalds 
2275b71636e2SPaul Mundt 	/*
2276b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2277b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2278b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2279b71636e2SPaul Mundt 	 */
2280b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
228156bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
2282b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
22831da177e4SLinus Torvalds 
2284b71636e2SPaul Mundt 		/* Preserve the largest node */
2285b71636e2SPaul Mundt 		if (largest < total_pages) {
2286b71636e2SPaul Mundt 			largest = total_pages;
2287b71636e2SPaul Mundt 			prefer = nid;
2288b71636e2SPaul Mundt 		}
2289b71636e2SPaul Mundt 
2290b71636e2SPaul Mundt 		/* Interleave this node? */
2291b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2292b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2293b71636e2SPaul Mundt 	}
2294b71636e2SPaul Mundt 
2295b71636e2SPaul Mundt 	/* All too small, use the largest */
2296b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2297b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2298b71636e2SPaul Mundt 
2299028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
23001da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
23011da177e4SLinus Torvalds }
23021da177e4SLinus Torvalds 
23038bccd85fSChristoph Lameter /* Reset policy of current process to default */
23041da177e4SLinus Torvalds void numa_default_policy(void)
23051da177e4SLinus Torvalds {
2306028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
23071da177e4SLinus Torvalds }
230868860ec1SPaul Jackson 
23094225399aSPaul Jackson /*
2310095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2311095f1fc4SLee Schermerhorn  */
2312095f1fc4SLee Schermerhorn 
2313095f1fc4SLee Schermerhorn /*
2314fc36b8d3SLee Schermerhorn  * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
23153f226aa1SLee Schermerhorn  * Used only for mpol_parse_str() and mpol_to_str()
23161a75a6c8SChristoph Lameter  */
2317345ace9cSLee Schermerhorn #define MPOL_LOCAL MPOL_MAX
2318345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2319345ace9cSLee Schermerhorn {
2320345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2321345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2322345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2323345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2324345ace9cSLee Schermerhorn 	[MPOL_LOCAL]      = "local"
2325345ace9cSLee Schermerhorn };
23261a75a6c8SChristoph Lameter 
2327095f1fc4SLee Schermerhorn 
2328095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2329095f1fc4SLee Schermerhorn /**
2330095f1fc4SLee Schermerhorn  * mpol_parse_str - parse string to mempolicy
2331095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
233271fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
233371fe804bSLee Schermerhorn  * @no_context:  flag whether to "contextualize" the mempolicy
2334095f1fc4SLee Schermerhorn  *
2335095f1fc4SLee Schermerhorn  * Format of input:
2336095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2337095f1fc4SLee Schermerhorn  *
233871fe804bSLee Schermerhorn  * if @no_context is true, save the input nodemask in w.user_nodemask in
233971fe804bSLee Schermerhorn  * the returned mempolicy.  This will be used to "clone" the mempolicy in
234071fe804bSLee Schermerhorn  * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
234171fe804bSLee Schermerhorn  * mount option.  Note that if 'static' or 'relative' mode flags were
234271fe804bSLee Schermerhorn  * specified, the input nodemask will already have been saved.  Saving
234371fe804bSLee Schermerhorn  * it again is redundant, but safe.
234471fe804bSLee Schermerhorn  *
234571fe804bSLee Schermerhorn  * On success, returns 0, else 1
2346095f1fc4SLee Schermerhorn  */
234771fe804bSLee Schermerhorn int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2348095f1fc4SLee Schermerhorn {
234971fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2350b4652e84SLee Schermerhorn 	unsigned short mode;
235171fe804bSLee Schermerhorn 	unsigned short uninitialized_var(mode_flags);
235271fe804bSLee Schermerhorn 	nodemask_t nodes;
2353095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2354095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2355095f1fc4SLee Schermerhorn 	int err = 1;
2356095f1fc4SLee Schermerhorn 
2357095f1fc4SLee Schermerhorn 	if (nodelist) {
2358095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2359095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
236071fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2361095f1fc4SLee Schermerhorn 			goto out;
236271fe804bSLee Schermerhorn 		if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2363095f1fc4SLee Schermerhorn 			goto out;
236471fe804bSLee Schermerhorn 	} else
236571fe804bSLee Schermerhorn 		nodes_clear(nodes);
236671fe804bSLee Schermerhorn 
2367095f1fc4SLee Schermerhorn 	if (flags)
2368095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2369095f1fc4SLee Schermerhorn 
2370b4652e84SLee Schermerhorn 	for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2371345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2372095f1fc4SLee Schermerhorn 			break;
2373095f1fc4SLee Schermerhorn 		}
2374095f1fc4SLee Schermerhorn 	}
2375b4652e84SLee Schermerhorn 	if (mode > MPOL_LOCAL)
2376095f1fc4SLee Schermerhorn 		goto out;
2377095f1fc4SLee Schermerhorn 
237871fe804bSLee Schermerhorn 	switch (mode) {
2379095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
238071fe804bSLee Schermerhorn 		/*
238171fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
238271fe804bSLee Schermerhorn 		 */
2383095f1fc4SLee Schermerhorn 		if (nodelist) {
2384095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2385095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2386095f1fc4SLee Schermerhorn 				rest++;
2387926f2ae0SKOSAKI Motohiro 			if (*rest)
2388926f2ae0SKOSAKI Motohiro 				goto out;
2389095f1fc4SLee Schermerhorn 		}
2390095f1fc4SLee Schermerhorn 		break;
2391095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2392095f1fc4SLee Schermerhorn 		/*
2393095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2394095f1fc4SLee Schermerhorn 		 */
2395095f1fc4SLee Schermerhorn 		if (!nodelist)
239671fe804bSLee Schermerhorn 			nodes = node_states[N_HIGH_MEMORY];
23973f226aa1SLee Schermerhorn 		break;
239871fe804bSLee Schermerhorn 	case MPOL_LOCAL:
23993f226aa1SLee Schermerhorn 		/*
240071fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
24013f226aa1SLee Schermerhorn 		 */
240271fe804bSLee Schermerhorn 		if (nodelist)
24033f226aa1SLee Schermerhorn 			goto out;
240471fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
24053f226aa1SLee Schermerhorn 		break;
2406413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2407413b43deSRavikiran G Thirumalai 		/*
2408413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2409413b43deSRavikiran G Thirumalai 		 */
2410413b43deSRavikiran G Thirumalai 		if (!nodelist)
2411413b43deSRavikiran G Thirumalai 			err = 0;
2412413b43deSRavikiran G Thirumalai 		goto out;
2413d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
241471fe804bSLee Schermerhorn 		/*
2415d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
241671fe804bSLee Schermerhorn 		 */
2417d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2418d69b2e63SKOSAKI Motohiro 			goto out;
2419095f1fc4SLee Schermerhorn 	}
2420095f1fc4SLee Schermerhorn 
242171fe804bSLee Schermerhorn 	mode_flags = 0;
2422095f1fc4SLee Schermerhorn 	if (flags) {
2423095f1fc4SLee Schermerhorn 		/*
2424095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2425095f1fc4SLee Schermerhorn 		 * mode flags.
2426095f1fc4SLee Schermerhorn 		 */
2427095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
242871fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2429095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
243071fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2431095f1fc4SLee Schermerhorn 		else
2432926f2ae0SKOSAKI Motohiro 			goto out;
2433095f1fc4SLee Schermerhorn 	}
243471fe804bSLee Schermerhorn 
243571fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
243671fe804bSLee Schermerhorn 	if (IS_ERR(new))
2437926f2ae0SKOSAKI Motohiro 		goto out;
2438926f2ae0SKOSAKI Motohiro 
2439e17f74afSLee Schermerhorn 	if (no_context) {
2440e17f74afSLee Schermerhorn 		/* save for contextualization */
2441e17f74afSLee Schermerhorn 		new->w.user_nodemask = nodes;
2442e17f74afSLee Schermerhorn 	} else {
244358568d2aSMiao Xie 		int ret;
24444bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
24454bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
244658568d2aSMiao Xie 			task_lock(current);
24474bfc4495SKAMEZAWA Hiroyuki 			ret = mpol_set_nodemask(new, &nodes, scratch);
244858568d2aSMiao Xie 			task_unlock(current);
24494bfc4495SKAMEZAWA Hiroyuki 		} else
24504bfc4495SKAMEZAWA Hiroyuki 			ret = -ENOMEM;
24514bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
24524bfc4495SKAMEZAWA Hiroyuki 		if (ret) {
24534bfc4495SKAMEZAWA Hiroyuki 			mpol_put(new);
2454926f2ae0SKOSAKI Motohiro 			goto out;
2455926f2ae0SKOSAKI Motohiro 		}
2456926f2ae0SKOSAKI Motohiro 	}
2457926f2ae0SKOSAKI Motohiro 	err = 0;
245871fe804bSLee Schermerhorn 
2459095f1fc4SLee Schermerhorn out:
2460095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2461095f1fc4SLee Schermerhorn 	if (nodelist)
2462095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2463095f1fc4SLee Schermerhorn 	if (flags)
2464095f1fc4SLee Schermerhorn 		*--flags = '=';
246571fe804bSLee Schermerhorn 	if (!err)
246671fe804bSLee Schermerhorn 		*mpol = new;
2467095f1fc4SLee Schermerhorn 	return err;
2468095f1fc4SLee Schermerhorn }
2469095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2470095f1fc4SLee Schermerhorn 
247171fe804bSLee Schermerhorn /**
247271fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
247371fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
247471fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
247571fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
247671fe804bSLee Schermerhorn  * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
247771fe804bSLee Schermerhorn  *
24781a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
24791a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
24801a75a6c8SChristoph Lameter  * or an error (negative)
24811a75a6c8SChristoph Lameter  */
248271fe804bSLee Schermerhorn int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
24831a75a6c8SChristoph Lameter {
24841a75a6c8SChristoph Lameter 	char *p = buffer;
24851a75a6c8SChristoph Lameter 	int l;
24861a75a6c8SChristoph Lameter 	nodemask_t nodes;
2487bea904d5SLee Schermerhorn 	unsigned short mode;
2488f5b087b5SDavid Rientjes 	unsigned short flags = pol ? pol->flags : 0;
24891a75a6c8SChristoph Lameter 
24902291990aSLee Schermerhorn 	/*
24912291990aSLee Schermerhorn 	 * Sanity check:  room for longest mode, flag and some nodes
24922291990aSLee Schermerhorn 	 */
24932291990aSLee Schermerhorn 	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
24942291990aSLee Schermerhorn 
2495bea904d5SLee Schermerhorn 	if (!pol || pol == &default_policy)
2496bea904d5SLee Schermerhorn 		mode = MPOL_DEFAULT;
2497bea904d5SLee Schermerhorn 	else
2498bea904d5SLee Schermerhorn 		mode = pol->mode;
2499bea904d5SLee Schermerhorn 
25001a75a6c8SChristoph Lameter 	switch (mode) {
25011a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
25021a75a6c8SChristoph Lameter 		nodes_clear(nodes);
25031a75a6c8SChristoph Lameter 		break;
25041a75a6c8SChristoph Lameter 
25051a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
25061a75a6c8SChristoph Lameter 		nodes_clear(nodes);
2507fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
250853f2556bSLee Schermerhorn 			mode = MPOL_LOCAL;	/* pseudo-policy */
250953f2556bSLee Schermerhorn 		else
2510fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
25111a75a6c8SChristoph Lameter 		break;
25121a75a6c8SChristoph Lameter 
25131a75a6c8SChristoph Lameter 	case MPOL_BIND:
251419770b32SMel Gorman 		/* Fall through */
25151a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
251671fe804bSLee Schermerhorn 		if (no_context)
251771fe804bSLee Schermerhorn 			nodes = pol->w.user_nodemask;
251871fe804bSLee Schermerhorn 		else
25191a75a6c8SChristoph Lameter 			nodes = pol->v.nodes;
25201a75a6c8SChristoph Lameter 		break;
25211a75a6c8SChristoph Lameter 
25221a75a6c8SChristoph Lameter 	default:
25231a75a6c8SChristoph Lameter 		BUG();
25241a75a6c8SChristoph Lameter 	}
25251a75a6c8SChristoph Lameter 
2526345ace9cSLee Schermerhorn 	l = strlen(policy_modes[mode]);
25271a75a6c8SChristoph Lameter 	if (buffer + maxlen < p + l + 1)
25281a75a6c8SChristoph Lameter 		return -ENOSPC;
25291a75a6c8SChristoph Lameter 
2530345ace9cSLee Schermerhorn 	strcpy(p, policy_modes[mode]);
25311a75a6c8SChristoph Lameter 	p += l;
25321a75a6c8SChristoph Lameter 
2533fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2534f5b087b5SDavid Rientjes 		if (buffer + maxlen < p + 2)
2535f5b087b5SDavid Rientjes 			return -ENOSPC;
2536f5b087b5SDavid Rientjes 		*p++ = '=';
2537f5b087b5SDavid Rientjes 
25382291990aSLee Schermerhorn 		/*
25392291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
25402291990aSLee Schermerhorn 		 */
2541f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
25422291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
25432291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
25442291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2545f5b087b5SDavid Rientjes 	}
2546f5b087b5SDavid Rientjes 
25471a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
25481a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
25491a75a6c8SChristoph Lameter 			return -ENOSPC;
2550095f1fc4SLee Schermerhorn 		*p++ = ':';
25511a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
25521a75a6c8SChristoph Lameter 	}
25531a75a6c8SChristoph Lameter 	return p - buffer;
25541a75a6c8SChristoph Lameter }
2555