xref: /openbmc/linux/mm/mempolicy.c (revision 58568d2a8215cb6f55caf2332017d7bdff954e1c)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/mm.h>
701da177e4SLinus Torvalds #include <linux/highmem.h>
711da177e4SLinus Torvalds #include <linux/hugetlb.h>
721da177e4SLinus Torvalds #include <linux/kernel.h>
731da177e4SLinus Torvalds #include <linux/sched.h>
741da177e4SLinus Torvalds #include <linux/nodemask.h>
751da177e4SLinus Torvalds #include <linux/cpuset.h>
761da177e4SLinus Torvalds #include <linux/gfp.h>
771da177e4SLinus Torvalds #include <linux/slab.h>
781da177e4SLinus Torvalds #include <linux/string.h>
791da177e4SLinus Torvalds #include <linux/module.h>
80b488893aSPavel Emelyanov #include <linux/nsproxy.h>
811da177e4SLinus Torvalds #include <linux/interrupt.h>
821da177e4SLinus Torvalds #include <linux/init.h>
831da177e4SLinus Torvalds #include <linux/compat.h>
84dc9aa5b9SChristoph Lameter #include <linux/swap.h>
851a75a6c8SChristoph Lameter #include <linux/seq_file.h>
861a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
87b20a3503SChristoph Lameter #include <linux/migrate.h>
8895a402c3SChristoph Lameter #include <linux/rmap.h>
8986c3a764SDavid Quigley #include <linux/security.h>
90dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
91095f1fc4SLee Schermerhorn #include <linux/ctype.h>
92dc9aa5b9SChristoph Lameter 
931da177e4SLinus Torvalds #include <asm/tlbflush.h>
941da177e4SLinus Torvalds #include <asm/uaccess.h>
951da177e4SLinus Torvalds 
9662695a84SNick Piggin #include "internal.h"
9762695a84SNick Piggin 
9838e35860SChristoph Lameter /* Internal flags */
99dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
1011a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
102dc9aa5b9SChristoph Lameter 
103fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
104fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1051da177e4SLinus Torvalds 
1061da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1071da177e4SLinus Torvalds    policied. */
1086267276fSChristoph Lameter enum zone_type policy_zone = 0;
1091da177e4SLinus Torvalds 
110bea904d5SLee Schermerhorn /*
111bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
112bea904d5SLee Schermerhorn  */
113d42c6997SAndi Kleen struct mempolicy default_policy = {
1141da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
115bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
116fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1171da177e4SLinus Torvalds };
1181da177e4SLinus Torvalds 
11937012946SDavid Rientjes static const struct mempolicy_operations {
12037012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
12137012946SDavid Rientjes 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
12237012946SDavid Rientjes } mpol_ops[MPOL_MAX];
12337012946SDavid Rientjes 
12419770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
12537012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask)
1261da177e4SLinus Torvalds {
12719770b32SMel Gorman 	int nd, k;
1281da177e4SLinus Torvalds 
12919770b32SMel Gorman 	/* Check that there is something useful in this mask */
13019770b32SMel Gorman 	k = policy_zone;
13119770b32SMel Gorman 
13219770b32SMel Gorman 	for_each_node_mask(nd, *nodemask) {
13319770b32SMel Gorman 		struct zone *z;
13419770b32SMel Gorman 
13519770b32SMel Gorman 		for (k = 0; k <= policy_zone; k++) {
13619770b32SMel Gorman 			z = &NODE_DATA(nd)->node_zones[k];
137dd942ae3SAndi Kleen 			if (z->present_pages > 0)
13819770b32SMel Gorman 				return 1;
139dd942ae3SAndi Kleen 		}
140dd942ae3SAndi Kleen 	}
14119770b32SMel Gorman 
14219770b32SMel Gorman 	return 0;
1431da177e4SLinus Torvalds }
1441da177e4SLinus Torvalds 
145f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
146f5b087b5SDavid Rientjes {
1474c50bc01SDavid Rientjes 	return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
1484c50bc01SDavid Rientjes }
1494c50bc01SDavid Rientjes 
1504c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1514c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1524c50bc01SDavid Rientjes {
1534c50bc01SDavid Rientjes 	nodemask_t tmp;
1544c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1554c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
156f5b087b5SDavid Rientjes }
157f5b087b5SDavid Rientjes 
15837012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
15937012946SDavid Rientjes {
16037012946SDavid Rientjes 	if (nodes_empty(*nodes))
16137012946SDavid Rientjes 		return -EINVAL;
16237012946SDavid Rientjes 	pol->v.nodes = *nodes;
16337012946SDavid Rientjes 	return 0;
16437012946SDavid Rientjes }
16537012946SDavid Rientjes 
16637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
16737012946SDavid Rientjes {
16837012946SDavid Rientjes 	if (!nodes)
169fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
17037012946SDavid Rientjes 	else if (nodes_empty(*nodes))
17137012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
17237012946SDavid Rientjes 	else
17337012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
17437012946SDavid Rientjes 	return 0;
17537012946SDavid Rientjes }
17637012946SDavid Rientjes 
17737012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
17837012946SDavid Rientjes {
17937012946SDavid Rientjes 	if (!is_valid_nodemask(nodes))
18037012946SDavid Rientjes 		return -EINVAL;
18137012946SDavid Rientjes 	pol->v.nodes = *nodes;
18237012946SDavid Rientjes 	return 0;
18337012946SDavid Rientjes }
18437012946SDavid Rientjes 
185*58568d2aSMiao Xie /*
186*58568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
187*58568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
188*58568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
189*58568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
190*58568d2aSMiao Xie  *
191*58568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
192*58568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
193*58568d2aSMiao Xie  */
194*58568d2aSMiao Xie static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
195*58568d2aSMiao Xie {
196*58568d2aSMiao Xie 	nodemask_t cpuset_context_nmask;
197*58568d2aSMiao Xie 	int ret;
198*58568d2aSMiao Xie 
199*58568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
200*58568d2aSMiao Xie 	if (pol == NULL)
201*58568d2aSMiao Xie 		return 0;
202*58568d2aSMiao Xie 
203*58568d2aSMiao Xie 	VM_BUG_ON(!nodes);
204*58568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
205*58568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
206*58568d2aSMiao Xie 	else {
207*58568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
208*58568d2aSMiao Xie 			mpol_relative_nodemask(&cpuset_context_nmask, nodes,
209*58568d2aSMiao Xie 					       &cpuset_current_mems_allowed);
210*58568d2aSMiao Xie 		else
211*58568d2aSMiao Xie 			nodes_and(cpuset_context_nmask, *nodes,
212*58568d2aSMiao Xie 				  cpuset_current_mems_allowed);
213*58568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
214*58568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
215*58568d2aSMiao Xie 		else
216*58568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
217*58568d2aSMiao Xie 						cpuset_current_mems_allowed;
218*58568d2aSMiao Xie 	}
219*58568d2aSMiao Xie 
220*58568d2aSMiao Xie 	ret = mpol_ops[pol->mode].create(pol,
221*58568d2aSMiao Xie 				nodes ? &cpuset_context_nmask : NULL);
222*58568d2aSMiao Xie 	return ret;
223*58568d2aSMiao Xie }
224*58568d2aSMiao Xie 
225*58568d2aSMiao Xie /*
226*58568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
227*58568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
228*58568d2aSMiao Xie  */
229028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
230028fec41SDavid Rientjes 				  nodemask_t *nodes)
2311da177e4SLinus Torvalds {
2321da177e4SLinus Torvalds 	struct mempolicy *policy;
2331da177e4SLinus Torvalds 
234028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
235028fec41SDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
236140d5a49SPaul Mundt 
2373e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2383e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
23937012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
240bea904d5SLee Schermerhorn 		return NULL;	/* simply delete any existing policy */
24137012946SDavid Rientjes 	}
2423e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2433e1f0645SDavid Rientjes 
2443e1f0645SDavid Rientjes 	/*
2453e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2463e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2473e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2483e1f0645SDavid Rientjes 	 */
2493e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2503e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2513e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2523e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2533e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2543e1f0645SDavid Rientjes 		}
2553e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2563e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2571da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2581da177e4SLinus Torvalds 	if (!policy)
2591da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2601da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
26145c4745aSLee Schermerhorn 	policy->mode = mode;
26237012946SDavid Rientjes 	policy->flags = flags;
2633e1f0645SDavid Rientjes 
26437012946SDavid Rientjes 	return policy;
26537012946SDavid Rientjes }
26637012946SDavid Rientjes 
26752cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
26852cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
26952cd3b07SLee Schermerhorn {
27052cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
27152cd3b07SLee Schermerhorn 		return;
27252cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
27352cd3b07SLee Schermerhorn }
27452cd3b07SLee Schermerhorn 
27537012946SDavid Rientjes static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
27637012946SDavid Rientjes {
27737012946SDavid Rientjes }
27837012946SDavid Rientjes 
27937012946SDavid Rientjes static void mpol_rebind_nodemask(struct mempolicy *pol,
28037012946SDavid Rientjes 				 const nodemask_t *nodes)
2811d0d2680SDavid Rientjes {
2821d0d2680SDavid Rientjes 	nodemask_t tmp;
2831d0d2680SDavid Rientjes 
28437012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
28537012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
28637012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
28737012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
2881d0d2680SDavid Rientjes 	else {
28937012946SDavid Rientjes 		nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
29037012946SDavid Rientjes 			    *nodes);
29137012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
2921d0d2680SDavid Rientjes 	}
29337012946SDavid Rientjes 
2941d0d2680SDavid Rientjes 	pol->v.nodes = tmp;
2951d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
2961d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
2971d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
2981d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
2991d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3001d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3011d0d2680SDavid Rientjes 	}
30237012946SDavid Rientjes }
30337012946SDavid Rientjes 
30437012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
30537012946SDavid Rientjes 				  const nodemask_t *nodes)
30637012946SDavid Rientjes {
30737012946SDavid Rientjes 	nodemask_t tmp;
30837012946SDavid Rientjes 
30937012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3101d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3111d0d2680SDavid Rientjes 
312fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3131d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
314fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
315fc36b8d3SLee Schermerhorn 		} else
316fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
31737012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
31837012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3191d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
320fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3211d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
32237012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
32337012946SDavid Rientjes 						   *nodes);
32437012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3251d0d2680SDavid Rientjes 	}
3261d0d2680SDavid Rientjes }
32737012946SDavid Rientjes 
32837012946SDavid Rientjes /* Migrate a policy to a different set of nodes */
32937012946SDavid Rientjes static void mpol_rebind_policy(struct mempolicy *pol,
33037012946SDavid Rientjes 			       const nodemask_t *newmask)
33137012946SDavid Rientjes {
33237012946SDavid Rientjes 	if (!pol)
33337012946SDavid Rientjes 		return;
33437012946SDavid Rientjes 	if (!mpol_store_user_nodemask(pol) &&
33537012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
33637012946SDavid Rientjes 		return;
33745c4745aSLee Schermerhorn 	mpol_ops[pol->mode].rebind(pol, newmask);
3381d0d2680SDavid Rientjes }
3391d0d2680SDavid Rientjes 
3401d0d2680SDavid Rientjes /*
3411d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3421d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
343*58568d2aSMiao Xie  *
344*58568d2aSMiao Xie  * Called with task's alloc_lock held.
3451d0d2680SDavid Rientjes  */
3461d0d2680SDavid Rientjes 
3471d0d2680SDavid Rientjes void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3481d0d2680SDavid Rientjes {
3491d0d2680SDavid Rientjes 	mpol_rebind_policy(tsk->mempolicy, new);
3501d0d2680SDavid Rientjes }
3511d0d2680SDavid Rientjes 
3521d0d2680SDavid Rientjes /*
3531d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3541d0d2680SDavid Rientjes  *
3551d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
3561d0d2680SDavid Rientjes  */
3571d0d2680SDavid Rientjes 
3581d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3591d0d2680SDavid Rientjes {
3601d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
3611d0d2680SDavid Rientjes 
3621d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
3631d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
3641d0d2680SDavid Rientjes 		mpol_rebind_policy(vma->vm_policy, new);
3651d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
3661d0d2680SDavid Rientjes }
3671d0d2680SDavid Rientjes 
36837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
36937012946SDavid Rientjes 	[MPOL_DEFAULT] = {
37037012946SDavid Rientjes 		.rebind = mpol_rebind_default,
37137012946SDavid Rientjes 	},
37237012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
37337012946SDavid Rientjes 		.create = mpol_new_interleave,
37437012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
37537012946SDavid Rientjes 	},
37637012946SDavid Rientjes 	[MPOL_PREFERRED] = {
37737012946SDavid Rientjes 		.create = mpol_new_preferred,
37837012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
37937012946SDavid Rientjes 	},
38037012946SDavid Rientjes 	[MPOL_BIND] = {
38137012946SDavid Rientjes 		.create = mpol_new_bind,
38237012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
38337012946SDavid Rientjes 	},
38437012946SDavid Rientjes };
38537012946SDavid Rientjes 
386397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty);
387fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
388fc301289SChristoph Lameter 				unsigned long flags);
3891a75a6c8SChristoph Lameter 
39038e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
391b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
392dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
393dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
39438e35860SChristoph Lameter 		void *private)
3951da177e4SLinus Torvalds {
39691612e0dSHugh Dickins 	pte_t *orig_pte;
39791612e0dSHugh Dickins 	pte_t *pte;
398705e87c0SHugh Dickins 	spinlock_t *ptl;
399941150a3SHugh Dickins 
400705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
40191612e0dSHugh Dickins 	do {
4026aab341eSLinus Torvalds 		struct page *page;
40325ba77c1SAndy Whitcroft 		int nid;
40491612e0dSHugh Dickins 
40591612e0dSHugh Dickins 		if (!pte_present(*pte))
40691612e0dSHugh Dickins 			continue;
4076aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
4086aab341eSLinus Torvalds 		if (!page)
40991612e0dSHugh Dickins 			continue;
410053837fcSNick Piggin 		/*
411053837fcSNick Piggin 		 * The check for PageReserved here is important to avoid
412053837fcSNick Piggin 		 * handling zero pages and other pages that may have been
413053837fcSNick Piggin 		 * marked special by the system.
414053837fcSNick Piggin 		 *
415053837fcSNick Piggin 		 * If the PageReserved would not be checked here then f.e.
416053837fcSNick Piggin 		 * the location of the zero page could have an influence
417053837fcSNick Piggin 		 * on MPOL_MF_STRICT, zero pages would be counted for
418053837fcSNick Piggin 		 * the per node stats, and there would be useless attempts
419053837fcSNick Piggin 		 * to put zero pages on the migration list.
420053837fcSNick Piggin 		 */
421f4598c8bSChristoph Lameter 		if (PageReserved(page))
422f4598c8bSChristoph Lameter 			continue;
4236aab341eSLinus Torvalds 		nid = page_to_nid(page);
42438e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
42538e35860SChristoph Lameter 			continue;
42638e35860SChristoph Lameter 
4271a75a6c8SChristoph Lameter 		if (flags & MPOL_MF_STATS)
428397874dfSChristoph Lameter 			gather_stats(page, private, pte_dirty(*pte));
429053837fcSNick Piggin 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
430fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
431dc9aa5b9SChristoph Lameter 		else
4321da177e4SLinus Torvalds 			break;
43391612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
434705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
43591612e0dSHugh Dickins 	return addr != end;
43691612e0dSHugh Dickins }
43791612e0dSHugh Dickins 
438b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
439dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
440dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
44138e35860SChristoph Lameter 		void *private)
44291612e0dSHugh Dickins {
44391612e0dSHugh Dickins 	pmd_t *pmd;
44491612e0dSHugh Dickins 	unsigned long next;
44591612e0dSHugh Dickins 
44691612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
44791612e0dSHugh Dickins 	do {
44891612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
44991612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
45091612e0dSHugh Dickins 			continue;
451dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
45238e35860SChristoph Lameter 				    flags, private))
45391612e0dSHugh Dickins 			return -EIO;
45491612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
45591612e0dSHugh Dickins 	return 0;
45691612e0dSHugh Dickins }
45791612e0dSHugh Dickins 
458b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
459dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
460dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
46138e35860SChristoph Lameter 		void *private)
46291612e0dSHugh Dickins {
46391612e0dSHugh Dickins 	pud_t *pud;
46491612e0dSHugh Dickins 	unsigned long next;
46591612e0dSHugh Dickins 
46691612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
46791612e0dSHugh Dickins 	do {
46891612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
46991612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
47091612e0dSHugh Dickins 			continue;
471dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
47238e35860SChristoph Lameter 				    flags, private))
47391612e0dSHugh Dickins 			return -EIO;
47491612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
47591612e0dSHugh Dickins 	return 0;
47691612e0dSHugh Dickins }
47791612e0dSHugh Dickins 
478b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
479dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
480dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
48138e35860SChristoph Lameter 		void *private)
48291612e0dSHugh Dickins {
48391612e0dSHugh Dickins 	pgd_t *pgd;
48491612e0dSHugh Dickins 	unsigned long next;
48591612e0dSHugh Dickins 
486b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
48791612e0dSHugh Dickins 	do {
48891612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
48991612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
49091612e0dSHugh Dickins 			continue;
491dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
49238e35860SChristoph Lameter 				    flags, private))
49391612e0dSHugh Dickins 			return -EIO;
49491612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
49591612e0dSHugh Dickins 	return 0;
4961da177e4SLinus Torvalds }
4971da177e4SLinus Torvalds 
498dc9aa5b9SChristoph Lameter /*
499dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
500dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
501dc9aa5b9SChristoph Lameter  * put them on the pagelist.
502dc9aa5b9SChristoph Lameter  */
5031da177e4SLinus Torvalds static struct vm_area_struct *
5041da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
50538e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
5061da177e4SLinus Torvalds {
5071da177e4SLinus Torvalds 	int err;
5081da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
5091da177e4SLinus Torvalds 
510053837fcSNick Piggin 
5111da177e4SLinus Torvalds 	first = find_vma(mm, start);
5121da177e4SLinus Torvalds 	if (!first)
5131da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
5141da177e4SLinus Torvalds 	prev = NULL;
5151da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
516dc9aa5b9SChristoph Lameter 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
5171da177e4SLinus Torvalds 			if (!vma->vm_next && vma->vm_end < end)
5181da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
5191da177e4SLinus Torvalds 			if (prev && prev->vm_end < vma->vm_start)
5201da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
521dc9aa5b9SChristoph Lameter 		}
522dc9aa5b9SChristoph Lameter 		if (!is_vm_hugetlb_page(vma) &&
523dc9aa5b9SChristoph Lameter 		    ((flags & MPOL_MF_STRICT) ||
524dc9aa5b9SChristoph Lameter 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
525dc9aa5b9SChristoph Lameter 				vma_migratable(vma)))) {
5265b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
527dc9aa5b9SChristoph Lameter 
5285b952b3cSAndi Kleen 			if (endvma > end)
5295b952b3cSAndi Kleen 				endvma = end;
5305b952b3cSAndi Kleen 			if (vma->vm_start > start)
5315b952b3cSAndi Kleen 				start = vma->vm_start;
532dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
53338e35860SChristoph Lameter 						flags, private);
5341da177e4SLinus Torvalds 			if (err) {
5351da177e4SLinus Torvalds 				first = ERR_PTR(err);
5361da177e4SLinus Torvalds 				break;
5371da177e4SLinus Torvalds 			}
5381da177e4SLinus Torvalds 		}
5391da177e4SLinus Torvalds 		prev = vma;
5401da177e4SLinus Torvalds 	}
5411da177e4SLinus Torvalds 	return first;
5421da177e4SLinus Torvalds }
5431da177e4SLinus Torvalds 
5441da177e4SLinus Torvalds /* Apply policy to a single VMA */
5451da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
5461da177e4SLinus Torvalds {
5471da177e4SLinus Torvalds 	int err = 0;
5481da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
5491da177e4SLinus Torvalds 
550140d5a49SPaul Mundt 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
5511da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
5521da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
5531da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
5541da177e4SLinus Torvalds 
5551da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
5561da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
5571da177e4SLinus Torvalds 	if (!err) {
5581da177e4SLinus Torvalds 		mpol_get(new);
5591da177e4SLinus Torvalds 		vma->vm_policy = new;
560f0be3d32SLee Schermerhorn 		mpol_put(old);
5611da177e4SLinus Torvalds 	}
5621da177e4SLinus Torvalds 	return err;
5631da177e4SLinus Torvalds }
5641da177e4SLinus Torvalds 
5651da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
5661da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start,
5671da177e4SLinus Torvalds 		       unsigned long end, struct mempolicy *new)
5681da177e4SLinus Torvalds {
5691da177e4SLinus Torvalds 	struct vm_area_struct *next;
5701da177e4SLinus Torvalds 	int err;
5711da177e4SLinus Torvalds 
5721da177e4SLinus Torvalds 	err = 0;
5731da177e4SLinus Torvalds 	for (; vma && vma->vm_start < end; vma = next) {
5741da177e4SLinus Torvalds 		next = vma->vm_next;
5751da177e4SLinus Torvalds 		if (vma->vm_start < start)
5761da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, start, 1);
5771da177e4SLinus Torvalds 		if (!err && vma->vm_end > end)
5781da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, end, 0);
5791da177e4SLinus Torvalds 		if (!err)
5801da177e4SLinus Torvalds 			err = policy_vma(vma, new);
5811da177e4SLinus Torvalds 		if (err)
5821da177e4SLinus Torvalds 			break;
5831da177e4SLinus Torvalds 	}
5841da177e4SLinus Torvalds 	return err;
5851da177e4SLinus Torvalds }
5861da177e4SLinus Torvalds 
587c61afb18SPaul Jackson /*
588c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
589c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
590c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
591c61afb18SPaul Jackson  *
592c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
593c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
594c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
595c61afb18SPaul Jackson  *
596c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
597c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
598c61afb18SPaul Jackson  *
599c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
600c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
601c61afb18SPaul Jackson  * for use within this file.
602c61afb18SPaul Jackson  */
603c61afb18SPaul Jackson 
604c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
605c61afb18SPaul Jackson {
606c61afb18SPaul Jackson 	if (p->mempolicy)
607c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
608c61afb18SPaul Jackson 	else
609c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
610c61afb18SPaul Jackson }
611c61afb18SPaul Jackson 
612c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
613c61afb18SPaul Jackson {
614c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
615c61afb18SPaul Jackson }
616c61afb18SPaul Jackson 
6171da177e4SLinus Torvalds /* Set the process memory policy */
618028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
619028fec41SDavid Rientjes 			     nodemask_t *nodes)
6201da177e4SLinus Torvalds {
621*58568d2aSMiao Xie 	struct mempolicy *new, *old;
622f4e53d91SLee Schermerhorn 	struct mm_struct *mm = current->mm;
623*58568d2aSMiao Xie 	int ret;
6241da177e4SLinus Torvalds 
625028fec41SDavid Rientjes 	new = mpol_new(mode, flags, nodes);
6261da177e4SLinus Torvalds 	if (IS_ERR(new))
6271da177e4SLinus Torvalds 		return PTR_ERR(new);
628f4e53d91SLee Schermerhorn 
629f4e53d91SLee Schermerhorn 	/*
630f4e53d91SLee Schermerhorn 	 * prevent changing our mempolicy while show_numa_maps()
631f4e53d91SLee Schermerhorn 	 * is using it.
632f4e53d91SLee Schermerhorn 	 * Note:  do_set_mempolicy() can be called at init time
633f4e53d91SLee Schermerhorn 	 * with no 'mm'.
634f4e53d91SLee Schermerhorn 	 */
635f4e53d91SLee Schermerhorn 	if (mm)
636f4e53d91SLee Schermerhorn 		down_write(&mm->mmap_sem);
637*58568d2aSMiao Xie 	task_lock(current);
638*58568d2aSMiao Xie 	ret = mpol_set_nodemask(new, nodes);
639*58568d2aSMiao Xie 	if (ret) {
640*58568d2aSMiao Xie 		task_unlock(current);
641*58568d2aSMiao Xie 		if (mm)
642*58568d2aSMiao Xie 			up_write(&mm->mmap_sem);
643*58568d2aSMiao Xie 		mpol_put(new);
644*58568d2aSMiao Xie 		return ret;
645*58568d2aSMiao Xie 	}
646*58568d2aSMiao Xie 	old = current->mempolicy;
6471da177e4SLinus Torvalds 	current->mempolicy = new;
648c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
64945c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
650f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
651dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
652*58568d2aSMiao Xie 	task_unlock(current);
653f4e53d91SLee Schermerhorn 	if (mm)
654f4e53d91SLee Schermerhorn 		up_write(&mm->mmap_sem);
655f4e53d91SLee Schermerhorn 
656*58568d2aSMiao Xie 	mpol_put(old);
6571da177e4SLinus Torvalds 	return 0;
6581da177e4SLinus Torvalds }
6591da177e4SLinus Torvalds 
660bea904d5SLee Schermerhorn /*
661bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
662*58568d2aSMiao Xie  *
663*58568d2aSMiao Xie  * Called with task's alloc_lock held
664bea904d5SLee Schermerhorn  */
665bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
6661da177e4SLinus Torvalds {
667dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
668bea904d5SLee Schermerhorn 	if (p == &default_policy)
669bea904d5SLee Schermerhorn 		return;
670bea904d5SLee Schermerhorn 
67145c4745aSLee Schermerhorn 	switch (p->mode) {
67219770b32SMel Gorman 	case MPOL_BIND:
67319770b32SMel Gorman 		/* Fall through */
6741da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
675dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
6761da177e4SLinus Torvalds 		break;
6771da177e4SLinus Torvalds 	case MPOL_PREFERRED:
678fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
679dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
68053f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
6811da177e4SLinus Torvalds 		break;
6821da177e4SLinus Torvalds 	default:
6831da177e4SLinus Torvalds 		BUG();
6841da177e4SLinus Torvalds 	}
6851da177e4SLinus Torvalds }
6861da177e4SLinus Torvalds 
6871da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
6881da177e4SLinus Torvalds {
6891da177e4SLinus Torvalds 	struct page *p;
6901da177e4SLinus Torvalds 	int err;
6911da177e4SLinus Torvalds 
6921da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
6931da177e4SLinus Torvalds 	if (err >= 0) {
6941da177e4SLinus Torvalds 		err = page_to_nid(p);
6951da177e4SLinus Torvalds 		put_page(p);
6961da177e4SLinus Torvalds 	}
6971da177e4SLinus Torvalds 	return err;
6981da177e4SLinus Torvalds }
6991da177e4SLinus Torvalds 
7001da177e4SLinus Torvalds /* Retrieve NUMA policy */
701dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
7021da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
7031da177e4SLinus Torvalds {
7048bccd85fSChristoph Lameter 	int err;
7051da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
7061da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
7071da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
7081da177e4SLinus Torvalds 
709754af6f5SLee Schermerhorn 	if (flags &
710754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
7111da177e4SLinus Torvalds 		return -EINVAL;
712754af6f5SLee Schermerhorn 
713754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
714754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
715754af6f5SLee Schermerhorn 			return -EINVAL;
716754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
717*58568d2aSMiao Xie 		task_lock(current);
718754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
719*58568d2aSMiao Xie 		task_unlock(current);
720754af6f5SLee Schermerhorn 		return 0;
721754af6f5SLee Schermerhorn 	}
722754af6f5SLee Schermerhorn 
7231da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
724bea904d5SLee Schermerhorn 		/*
725bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
726bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
727bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
728bea904d5SLee Schermerhorn 		 */
7291da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
7301da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
7311da177e4SLinus Torvalds 		if (!vma) {
7321da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
7331da177e4SLinus Torvalds 			return -EFAULT;
7341da177e4SLinus Torvalds 		}
7351da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
7361da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
7371da177e4SLinus Torvalds 		else
7381da177e4SLinus Torvalds 			pol = vma->vm_policy;
7391da177e4SLinus Torvalds 	} else if (addr)
7401da177e4SLinus Torvalds 		return -EINVAL;
7411da177e4SLinus Torvalds 
7421da177e4SLinus Torvalds 	if (!pol)
743bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
7441da177e4SLinus Torvalds 
7451da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
7461da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
7471da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
7481da177e4SLinus Torvalds 			if (err < 0)
7491da177e4SLinus Torvalds 				goto out;
7508bccd85fSChristoph Lameter 			*policy = err;
7511da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
75245c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
7538bccd85fSChristoph Lameter 			*policy = current->il_next;
7541da177e4SLinus Torvalds 		} else {
7551da177e4SLinus Torvalds 			err = -EINVAL;
7561da177e4SLinus Torvalds 			goto out;
7571da177e4SLinus Torvalds 		}
758bea904d5SLee Schermerhorn 	} else {
759bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
760bea904d5SLee Schermerhorn 						pol->mode;
761d79df630SDavid Rientjes 		/*
762d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
763d79df630SDavid Rientjes 		 * the policy to userspace.
764d79df630SDavid Rientjes 		 */
765d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
766bea904d5SLee Schermerhorn 	}
7671da177e4SLinus Torvalds 
7681da177e4SLinus Torvalds 	if (vma) {
7691da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
7701da177e4SLinus Torvalds 		vma = NULL;
7711da177e4SLinus Torvalds 	}
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds 	err = 0;
774*58568d2aSMiao Xie 	if (nmask) {
775*58568d2aSMiao Xie 		task_lock(current);
776bea904d5SLee Schermerhorn 		get_policy_nodemask(pol, nmask);
777*58568d2aSMiao Xie 		task_unlock(current);
778*58568d2aSMiao Xie 	}
7791da177e4SLinus Torvalds 
7801da177e4SLinus Torvalds  out:
78152cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
7821da177e4SLinus Torvalds 	if (vma)
7831da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
7841da177e4SLinus Torvalds 	return err;
7851da177e4SLinus Torvalds }
7861da177e4SLinus Torvalds 
787b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
7888bccd85fSChristoph Lameter /*
7896ce3c4c0SChristoph Lameter  * page migration
7906ce3c4c0SChristoph Lameter  */
791fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
792fc301289SChristoph Lameter 				unsigned long flags)
7936ce3c4c0SChristoph Lameter {
7946ce3c4c0SChristoph Lameter 	/*
795fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
7966ce3c4c0SChristoph Lameter 	 */
79762695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
79862695a84SNick Piggin 		if (!isolate_lru_page(page)) {
79962695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
80062695a84SNick Piggin 		}
80162695a84SNick Piggin 	}
8026ce3c4c0SChristoph Lameter }
8036ce3c4c0SChristoph Lameter 
804742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
80595a402c3SChristoph Lameter {
806769848c0SMel Gorman 	return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
80795a402c3SChristoph Lameter }
80895a402c3SChristoph Lameter 
8096ce3c4c0SChristoph Lameter /*
8107e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
8117e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
8127e2ab150SChristoph Lameter  */
813dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
814dbcb0f19SAdrian Bunk 			   int flags)
8157e2ab150SChristoph Lameter {
8167e2ab150SChristoph Lameter 	nodemask_t nmask;
8177e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
8187e2ab150SChristoph Lameter 	int err = 0;
8197e2ab150SChristoph Lameter 
8207e2ab150SChristoph Lameter 	nodes_clear(nmask);
8217e2ab150SChristoph Lameter 	node_set(source, nmask);
8227e2ab150SChristoph Lameter 
8237e2ab150SChristoph Lameter 	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
8247e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
8257e2ab150SChristoph Lameter 
8267e2ab150SChristoph Lameter 	if (!list_empty(&pagelist))
82795a402c3SChristoph Lameter 		err = migrate_pages(&pagelist, new_node_page, dest);
82895a402c3SChristoph Lameter 
8297e2ab150SChristoph Lameter 	return err;
8307e2ab150SChristoph Lameter }
8317e2ab150SChristoph Lameter 
8327e2ab150SChristoph Lameter /*
8337e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
8347e2ab150SChristoph Lameter  * layout as much as possible.
83539743889SChristoph Lameter  *
83639743889SChristoph Lameter  * Returns the number of page that could not be moved.
83739743889SChristoph Lameter  */
83839743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
83939743889SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
84039743889SChristoph Lameter {
8417e2ab150SChristoph Lameter 	int busy = 0;
8420aedadf9SChristoph Lameter 	int err;
8437e2ab150SChristoph Lameter 	nodemask_t tmp;
84439743889SChristoph Lameter 
8450aedadf9SChristoph Lameter 	err = migrate_prep();
8460aedadf9SChristoph Lameter 	if (err)
8470aedadf9SChristoph Lameter 		return err;
8480aedadf9SChristoph Lameter 
84939743889SChristoph Lameter 	down_read(&mm->mmap_sem);
850d4984711SChristoph Lameter 
8517b2259b3SChristoph Lameter 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
8527b2259b3SChristoph Lameter 	if (err)
8537b2259b3SChristoph Lameter 		goto out;
8547b2259b3SChristoph Lameter 
8557e2ab150SChristoph Lameter /*
8567e2ab150SChristoph Lameter  * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
8577e2ab150SChristoph Lameter  * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
8587e2ab150SChristoph Lameter  * bit in 'tmp', and return that <source, dest> pair for migration.
8597e2ab150SChristoph Lameter  * The pair of nodemasks 'to' and 'from' define the map.
8607e2ab150SChristoph Lameter  *
8617e2ab150SChristoph Lameter  * If no pair of bits is found that way, fallback to picking some
8627e2ab150SChristoph Lameter  * pair of 'source' and 'dest' bits that are not the same.  If the
8637e2ab150SChristoph Lameter  * 'source' and 'dest' bits are the same, this represents a node
8647e2ab150SChristoph Lameter  * that will be migrating to itself, so no pages need move.
8657e2ab150SChristoph Lameter  *
8667e2ab150SChristoph Lameter  * If no bits are left in 'tmp', or if all remaining bits left
8677e2ab150SChristoph Lameter  * in 'tmp' correspond to the same bit in 'to', return false
8687e2ab150SChristoph Lameter  * (nothing left to migrate).
8697e2ab150SChristoph Lameter  *
8707e2ab150SChristoph Lameter  * This lets us pick a pair of nodes to migrate between, such that
8717e2ab150SChristoph Lameter  * if possible the dest node is not already occupied by some other
8727e2ab150SChristoph Lameter  * source node, minimizing the risk of overloading the memory on a
8737e2ab150SChristoph Lameter  * node that would happen if we migrated incoming memory to a node
8747e2ab150SChristoph Lameter  * before migrating outgoing memory source that same node.
8757e2ab150SChristoph Lameter  *
8767e2ab150SChristoph Lameter  * A single scan of tmp is sufficient.  As we go, we remember the
8777e2ab150SChristoph Lameter  * most recent <s, d> pair that moved (s != d).  If we find a pair
8787e2ab150SChristoph Lameter  * that not only moved, but what's better, moved to an empty slot
8797e2ab150SChristoph Lameter  * (d is not set in tmp), then we break out then, with that pair.
8807e2ab150SChristoph Lameter  * Otherwise when we finish scannng from_tmp, we at least have the
8817e2ab150SChristoph Lameter  * most recent <s, d> pair that moved.  If we get all the way through
8827e2ab150SChristoph Lameter  * the scan of tmp without finding any node that moved, much less
8837e2ab150SChristoph Lameter  * moved to an empty node, then there is nothing left worth migrating.
8847e2ab150SChristoph Lameter  */
8857e2ab150SChristoph Lameter 
8867e2ab150SChristoph Lameter 	tmp = *from_nodes;
8877e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
8887e2ab150SChristoph Lameter 		int s,d;
8897e2ab150SChristoph Lameter 		int source = -1;
8907e2ab150SChristoph Lameter 		int dest = 0;
8917e2ab150SChristoph Lameter 
8927e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
8937e2ab150SChristoph Lameter 			d = node_remap(s, *from_nodes, *to_nodes);
8947e2ab150SChristoph Lameter 			if (s == d)
8957e2ab150SChristoph Lameter 				continue;
8967e2ab150SChristoph Lameter 
8977e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
8987e2ab150SChristoph Lameter 			dest = d;
8997e2ab150SChristoph Lameter 
9007e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
9017e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
9027e2ab150SChristoph Lameter 				break;
9037e2ab150SChristoph Lameter 		}
9047e2ab150SChristoph Lameter 		if (source == -1)
9057e2ab150SChristoph Lameter 			break;
9067e2ab150SChristoph Lameter 
9077e2ab150SChristoph Lameter 		node_clear(source, tmp);
9087e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
9097e2ab150SChristoph Lameter 		if (err > 0)
9107e2ab150SChristoph Lameter 			busy += err;
9117e2ab150SChristoph Lameter 		if (err < 0)
9127e2ab150SChristoph Lameter 			break;
91339743889SChristoph Lameter 	}
9147b2259b3SChristoph Lameter out:
91539743889SChristoph Lameter 	up_read(&mm->mmap_sem);
9167e2ab150SChristoph Lameter 	if (err < 0)
9177e2ab150SChristoph Lameter 		return err;
9187e2ab150SChristoph Lameter 	return busy;
919b20a3503SChristoph Lameter 
92039743889SChristoph Lameter }
92139743889SChristoph Lameter 
9223ad33b24SLee Schermerhorn /*
9233ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
9243ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
9253ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
9263ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
9273ad33b24SLee Schermerhorn  * is in virtual address order.
9283ad33b24SLee Schermerhorn  */
929742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
93095a402c3SChristoph Lameter {
93195a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
9323ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
93395a402c3SChristoph Lameter 
9343ad33b24SLee Schermerhorn 	while (vma) {
9353ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
9363ad33b24SLee Schermerhorn 		if (address != -EFAULT)
9373ad33b24SLee Schermerhorn 			break;
9383ad33b24SLee Schermerhorn 		vma = vma->vm_next;
9393ad33b24SLee Schermerhorn 	}
9403ad33b24SLee Schermerhorn 
9413ad33b24SLee Schermerhorn 	/*
9423ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
9433ad33b24SLee Schermerhorn 	 */
9443ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
94595a402c3SChristoph Lameter }
946b20a3503SChristoph Lameter #else
947b20a3503SChristoph Lameter 
948b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
949b20a3503SChristoph Lameter 				unsigned long flags)
950b20a3503SChristoph Lameter {
951b20a3503SChristoph Lameter }
952b20a3503SChristoph Lameter 
953b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
954b20a3503SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
955b20a3503SChristoph Lameter {
956b20a3503SChristoph Lameter 	return -ENOSYS;
957b20a3503SChristoph Lameter }
95895a402c3SChristoph Lameter 
95969939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
96095a402c3SChristoph Lameter {
96195a402c3SChristoph Lameter 	return NULL;
96295a402c3SChristoph Lameter }
963b20a3503SChristoph Lameter #endif
964b20a3503SChristoph Lameter 
965dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
966028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
967028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
9686ce3c4c0SChristoph Lameter {
9696ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
9706ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
9716ce3c4c0SChristoph Lameter 	struct mempolicy *new;
9726ce3c4c0SChristoph Lameter 	unsigned long end;
9736ce3c4c0SChristoph Lameter 	int err;
9746ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
9756ce3c4c0SChristoph Lameter 
976a3b51e01SDavid Rientjes 	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
9776ce3c4c0SChristoph Lameter 				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
9786ce3c4c0SChristoph Lameter 		return -EINVAL;
97974c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
9806ce3c4c0SChristoph Lameter 		return -EPERM;
9816ce3c4c0SChristoph Lameter 
9826ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
9836ce3c4c0SChristoph Lameter 		return -EINVAL;
9846ce3c4c0SChristoph Lameter 
9856ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
9866ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
9876ce3c4c0SChristoph Lameter 
9886ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
9896ce3c4c0SChristoph Lameter 	end = start + len;
9906ce3c4c0SChristoph Lameter 
9916ce3c4c0SChristoph Lameter 	if (end < start)
9926ce3c4c0SChristoph Lameter 		return -EINVAL;
9936ce3c4c0SChristoph Lameter 	if (end == start)
9946ce3c4c0SChristoph Lameter 		return 0;
9956ce3c4c0SChristoph Lameter 
996028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
9976ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
9986ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
9996ce3c4c0SChristoph Lameter 
10006ce3c4c0SChristoph Lameter 	/*
10016ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
10026ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
10036ce3c4c0SChristoph Lameter 	 */
10046ce3c4c0SChristoph Lameter 	if (!new)
10056ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
10066ce3c4c0SChristoph Lameter 
1007028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1008028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
1009028fec41SDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : -1);
10106ce3c4c0SChristoph Lameter 
10110aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
10120aedadf9SChristoph Lameter 
10130aedadf9SChristoph Lameter 		err = migrate_prep();
10140aedadf9SChristoph Lameter 		if (err)
10150aedadf9SChristoph Lameter 			return err;
10160aedadf9SChristoph Lameter 	}
10176ce3c4c0SChristoph Lameter 	down_write(&mm->mmap_sem);
1018*58568d2aSMiao Xie 	task_lock(current);
1019*58568d2aSMiao Xie 	err = mpol_set_nodemask(new, nmask);
1020*58568d2aSMiao Xie 	task_unlock(current);
1021*58568d2aSMiao Xie 	if (err) {
1022*58568d2aSMiao Xie 		up_write(&mm->mmap_sem);
1023*58568d2aSMiao Xie 		mpol_put(new);
1024*58568d2aSMiao Xie 		return err;
1025*58568d2aSMiao Xie 	}
10266ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
10276ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
10286ce3c4c0SChristoph Lameter 
10296ce3c4c0SChristoph Lameter 	err = PTR_ERR(vma);
10306ce3c4c0SChristoph Lameter 	if (!IS_ERR(vma)) {
10316ce3c4c0SChristoph Lameter 		int nr_failed = 0;
10326ce3c4c0SChristoph Lameter 
10336ce3c4c0SChristoph Lameter 		err = mbind_range(vma, start, end, new);
10347e2ab150SChristoph Lameter 
10356ce3c4c0SChristoph Lameter 		if (!list_empty(&pagelist))
103695a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
103795a402c3SChristoph Lameter 						(unsigned long)vma);
10386ce3c4c0SChristoph Lameter 
10396ce3c4c0SChristoph Lameter 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
10406ce3c4c0SChristoph Lameter 			err = -EIO;
10416ce3c4c0SChristoph Lameter 	}
1042b20a3503SChristoph Lameter 
10436ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1044f0be3d32SLee Schermerhorn 	mpol_put(new);
10456ce3c4c0SChristoph Lameter 	return err;
10466ce3c4c0SChristoph Lameter }
10476ce3c4c0SChristoph Lameter 
104839743889SChristoph Lameter /*
10498bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
10508bccd85fSChristoph Lameter  */
10518bccd85fSChristoph Lameter 
10528bccd85fSChristoph Lameter /* Copy a node mask from user space. */
105339743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
10548bccd85fSChristoph Lameter 		     unsigned long maxnode)
10558bccd85fSChristoph Lameter {
10568bccd85fSChristoph Lameter 	unsigned long k;
10578bccd85fSChristoph Lameter 	unsigned long nlongs;
10588bccd85fSChristoph Lameter 	unsigned long endmask;
10598bccd85fSChristoph Lameter 
10608bccd85fSChristoph Lameter 	--maxnode;
10618bccd85fSChristoph Lameter 	nodes_clear(*nodes);
10628bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
10638bccd85fSChristoph Lameter 		return 0;
1064a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1065636f13c1SChris Wright 		return -EINVAL;
10668bccd85fSChristoph Lameter 
10678bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
10688bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
10698bccd85fSChristoph Lameter 		endmask = ~0UL;
10708bccd85fSChristoph Lameter 	else
10718bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
10728bccd85fSChristoph Lameter 
10738bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
10748bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
10758bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
10768bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
10778bccd85fSChristoph Lameter 			return -EINVAL;
10788bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
10798bccd85fSChristoph Lameter 			unsigned long t;
10808bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
10818bccd85fSChristoph Lameter 				return -EFAULT;
10828bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
10838bccd85fSChristoph Lameter 				if (t & endmask)
10848bccd85fSChristoph Lameter 					return -EINVAL;
10858bccd85fSChristoph Lameter 			} else if (t)
10868bccd85fSChristoph Lameter 				return -EINVAL;
10878bccd85fSChristoph Lameter 		}
10888bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
10898bccd85fSChristoph Lameter 		endmask = ~0UL;
10908bccd85fSChristoph Lameter 	}
10918bccd85fSChristoph Lameter 
10928bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
10938bccd85fSChristoph Lameter 		return -EFAULT;
10948bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
10958bccd85fSChristoph Lameter 	return 0;
10968bccd85fSChristoph Lameter }
10978bccd85fSChristoph Lameter 
10988bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
10998bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
11008bccd85fSChristoph Lameter 			      nodemask_t *nodes)
11018bccd85fSChristoph Lameter {
11028bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
11038bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
11048bccd85fSChristoph Lameter 
11058bccd85fSChristoph Lameter 	if (copy > nbytes) {
11068bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
11078bccd85fSChristoph Lameter 			return -EINVAL;
11088bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
11098bccd85fSChristoph Lameter 			return -EFAULT;
11108bccd85fSChristoph Lameter 		copy = nbytes;
11118bccd85fSChristoph Lameter 	}
11128bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
11138bccd85fSChristoph Lameter }
11148bccd85fSChristoph Lameter 
1115938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1116938bb9f5SHeiko Carstens 		unsigned long, mode, unsigned long __user *, nmask,
1117938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
11188bccd85fSChristoph Lameter {
11198bccd85fSChristoph Lameter 	nodemask_t nodes;
11208bccd85fSChristoph Lameter 	int err;
1121028fec41SDavid Rientjes 	unsigned short mode_flags;
11228bccd85fSChristoph Lameter 
1123028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1124028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1125a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1126a3b51e01SDavid Rientjes 		return -EINVAL;
11274c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
11284c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
11294c50bc01SDavid Rientjes 		return -EINVAL;
11308bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
11318bccd85fSChristoph Lameter 	if (err)
11328bccd85fSChristoph Lameter 		return err;
1133028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
11348bccd85fSChristoph Lameter }
11358bccd85fSChristoph Lameter 
11368bccd85fSChristoph Lameter /* Set the process memory policy */
1137938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1138938bb9f5SHeiko Carstens 		unsigned long, maxnode)
11398bccd85fSChristoph Lameter {
11408bccd85fSChristoph Lameter 	int err;
11418bccd85fSChristoph Lameter 	nodemask_t nodes;
1142028fec41SDavid Rientjes 	unsigned short flags;
11438bccd85fSChristoph Lameter 
1144028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1145028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1146028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
11478bccd85fSChristoph Lameter 		return -EINVAL;
11484c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
11494c50bc01SDavid Rientjes 		return -EINVAL;
11508bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
11518bccd85fSChristoph Lameter 	if (err)
11528bccd85fSChristoph Lameter 		return err;
1153028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
11548bccd85fSChristoph Lameter }
11558bccd85fSChristoph Lameter 
1156938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1157938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1158938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
115939743889SChristoph Lameter {
1160c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
116139743889SChristoph Lameter 	struct mm_struct *mm;
116239743889SChristoph Lameter 	struct task_struct *task;
116339743889SChristoph Lameter 	nodemask_t old;
116439743889SChristoph Lameter 	nodemask_t new;
116539743889SChristoph Lameter 	nodemask_t task_nodes;
116639743889SChristoph Lameter 	int err;
116739743889SChristoph Lameter 
116839743889SChristoph Lameter 	err = get_nodes(&old, old_nodes, maxnode);
116939743889SChristoph Lameter 	if (err)
117039743889SChristoph Lameter 		return err;
117139743889SChristoph Lameter 
117239743889SChristoph Lameter 	err = get_nodes(&new, new_nodes, maxnode);
117339743889SChristoph Lameter 	if (err)
117439743889SChristoph Lameter 		return err;
117539743889SChristoph Lameter 
117639743889SChristoph Lameter 	/* Find the mm_struct */
117739743889SChristoph Lameter 	read_lock(&tasklist_lock);
1178228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
117939743889SChristoph Lameter 	if (!task) {
118039743889SChristoph Lameter 		read_unlock(&tasklist_lock);
118139743889SChristoph Lameter 		return -ESRCH;
118239743889SChristoph Lameter 	}
118339743889SChristoph Lameter 	mm = get_task_mm(task);
118439743889SChristoph Lameter 	read_unlock(&tasklist_lock);
118539743889SChristoph Lameter 
118639743889SChristoph Lameter 	if (!mm)
118739743889SChristoph Lameter 		return -EINVAL;
118839743889SChristoph Lameter 
118939743889SChristoph Lameter 	/*
119039743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
119139743889SChristoph Lameter 	 * process. The right exists if the process has administrative
11927f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
119339743889SChristoph Lameter 	 * userid as the target process.
119439743889SChristoph Lameter 	 */
1195c69e8d9cSDavid Howells 	rcu_read_lock();
1196c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1197b6dff3ecSDavid Howells 	if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1198b6dff3ecSDavid Howells 	    cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
119974c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1200c69e8d9cSDavid Howells 		rcu_read_unlock();
120139743889SChristoph Lameter 		err = -EPERM;
120239743889SChristoph Lameter 		goto out;
120339743889SChristoph Lameter 	}
1204c69e8d9cSDavid Howells 	rcu_read_unlock();
120539743889SChristoph Lameter 
120639743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
120739743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
120874c00241SChristoph Lameter 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
120939743889SChristoph Lameter 		err = -EPERM;
121039743889SChristoph Lameter 		goto out;
121139743889SChristoph Lameter 	}
121239743889SChristoph Lameter 
121337b07e41SLee Schermerhorn 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
12143b42d28bSChristoph Lameter 		err = -EINVAL;
12153b42d28bSChristoph Lameter 		goto out;
12163b42d28bSChristoph Lameter 	}
12173b42d28bSChristoph Lameter 
121886c3a764SDavid Quigley 	err = security_task_movememory(task);
121986c3a764SDavid Quigley 	if (err)
122086c3a764SDavid Quigley 		goto out;
122186c3a764SDavid Quigley 
1222511030bcSChristoph Lameter 	err = do_migrate_pages(mm, &old, &new,
122374c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
122439743889SChristoph Lameter out:
122539743889SChristoph Lameter 	mmput(mm);
122639743889SChristoph Lameter 	return err;
122739743889SChristoph Lameter }
122839743889SChristoph Lameter 
122939743889SChristoph Lameter 
12308bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1231938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1232938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1233938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
12348bccd85fSChristoph Lameter {
1235dbcb0f19SAdrian Bunk 	int err;
1236dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
12378bccd85fSChristoph Lameter 	nodemask_t nodes;
12388bccd85fSChristoph Lameter 
12398bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
12408bccd85fSChristoph Lameter 		return -EINVAL;
12418bccd85fSChristoph Lameter 
12428bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
12438bccd85fSChristoph Lameter 
12448bccd85fSChristoph Lameter 	if (err)
12458bccd85fSChristoph Lameter 		return err;
12468bccd85fSChristoph Lameter 
12478bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
12488bccd85fSChristoph Lameter 		return -EFAULT;
12498bccd85fSChristoph Lameter 
12508bccd85fSChristoph Lameter 	if (nmask)
12518bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
12528bccd85fSChristoph Lameter 
12538bccd85fSChristoph Lameter 	return err;
12548bccd85fSChristoph Lameter }
12558bccd85fSChristoph Lameter 
12561da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
12571da177e4SLinus Torvalds 
12581da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
12591da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
12601da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
12611da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
12621da177e4SLinus Torvalds {
12631da177e4SLinus Torvalds 	long err;
12641da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
12651da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
12661da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
12671da177e4SLinus Torvalds 
12681da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
12691da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
12701da177e4SLinus Torvalds 
12711da177e4SLinus Torvalds 	if (nmask)
12721da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
12731da177e4SLinus Torvalds 
12741da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
12751da177e4SLinus Torvalds 
12761da177e4SLinus Torvalds 	if (!err && nmask) {
12771da177e4SLinus Torvalds 		err = copy_from_user(bm, nm, alloc_size);
12781da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
12791da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
12801da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
12811da177e4SLinus Torvalds 	}
12821da177e4SLinus Torvalds 
12831da177e4SLinus Torvalds 	return err;
12841da177e4SLinus Torvalds }
12851da177e4SLinus Torvalds 
12861da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
12871da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
12881da177e4SLinus Torvalds {
12891da177e4SLinus Torvalds 	long err = 0;
12901da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
12911da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
12921da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
12931da177e4SLinus Torvalds 
12941da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
12951da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
12961da177e4SLinus Torvalds 
12971da177e4SLinus Torvalds 	if (nmask) {
12981da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
12991da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
13001da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
13011da177e4SLinus Torvalds 	}
13021da177e4SLinus Torvalds 
13031da177e4SLinus Torvalds 	if (err)
13041da177e4SLinus Torvalds 		return -EFAULT;
13051da177e4SLinus Torvalds 
13061da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
13071da177e4SLinus Torvalds }
13081da177e4SLinus Torvalds 
13091da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
13101da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
13111da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
13121da177e4SLinus Torvalds {
13131da177e4SLinus Torvalds 	long err = 0;
13141da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
13151da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1316dfcd3c0dSAndi Kleen 	nodemask_t bm;
13171da177e4SLinus Torvalds 
13181da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
13191da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
13201da177e4SLinus Torvalds 
13211da177e4SLinus Torvalds 	if (nmask) {
1322dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
13231da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1324dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
13251da177e4SLinus Torvalds 	}
13261da177e4SLinus Torvalds 
13271da177e4SLinus Torvalds 	if (err)
13281da177e4SLinus Torvalds 		return -EFAULT;
13291da177e4SLinus Torvalds 
13301da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
13311da177e4SLinus Torvalds }
13321da177e4SLinus Torvalds 
13331da177e4SLinus Torvalds #endif
13341da177e4SLinus Torvalds 
1335480eccf9SLee Schermerhorn /*
1336480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1337480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1338480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1339480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1340480eccf9SLee Schermerhorn  *
1341480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1342480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
134352cd3b07SLee Schermerhorn  * Current or other task's task mempolicy and non-shared vma policies
134452cd3b07SLee Schermerhorn  * are protected by the task's mmap_sem, which must be held for read by
134552cd3b07SLee Schermerhorn  * the caller.
134652cd3b07SLee Schermerhorn  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
134752cd3b07SLee Schermerhorn  * count--added by the get_policy() vm_op, as appropriate--to protect against
134852cd3b07SLee Schermerhorn  * freeing by another task.  It is the caller's responsibility to free the
134952cd3b07SLee Schermerhorn  * extra reference for shared policies.
1350480eccf9SLee Schermerhorn  */
135148fce342SChristoph Lameter static struct mempolicy *get_vma_policy(struct task_struct *task,
135248fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
13531da177e4SLinus Torvalds {
13546e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
13551da177e4SLinus Torvalds 
13561da177e4SLinus Torvalds 	if (vma) {
1357480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1358ae4d8c16SLee Schermerhorn 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1359ae4d8c16SLee Schermerhorn 									addr);
1360ae4d8c16SLee Schermerhorn 			if (vpol)
1361ae4d8c16SLee Schermerhorn 				pol = vpol;
1362bea904d5SLee Schermerhorn 		} else if (vma->vm_policy)
13631da177e4SLinus Torvalds 			pol = vma->vm_policy;
13641da177e4SLinus Torvalds 	}
13651da177e4SLinus Torvalds 	if (!pol)
13661da177e4SLinus Torvalds 		pol = &default_policy;
13671da177e4SLinus Torvalds 	return pol;
13681da177e4SLinus Torvalds }
13691da177e4SLinus Torvalds 
137052cd3b07SLee Schermerhorn /*
137152cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
137252cd3b07SLee Schermerhorn  * page allocation
137352cd3b07SLee Schermerhorn  */
137452cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
137519770b32SMel Gorman {
137619770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
137745c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
137819770b32SMel Gorman 			gfp_zone(gfp) >= policy_zone &&
137919770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
138019770b32SMel Gorman 		return &policy->v.nodes;
138119770b32SMel Gorman 
138219770b32SMel Gorman 	return NULL;
138319770b32SMel Gorman }
138419770b32SMel Gorman 
138552cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
138652cd3b07SLee Schermerhorn static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
13871da177e4SLinus Torvalds {
1388fc36b8d3SLee Schermerhorn 	int nd = numa_node_id();
13891da177e4SLinus Torvalds 
139045c4745aSLee Schermerhorn 	switch (policy->mode) {
13911da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1392fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
13931da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
13941da177e4SLinus Torvalds 		break;
13951da177e4SLinus Torvalds 	case MPOL_BIND:
139619770b32SMel Gorman 		/*
139752cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
139852cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
139952cd3b07SLee Schermerhorn 		 * current node is part of the mask, we use the zonelist for
140052cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
140119770b32SMel Gorman 		 */
140219770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
140319770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
140419770b32SMel Gorman 			nd = first_node(policy->v.nodes);
140519770b32SMel Gorman 		break;
14061da177e4SLinus Torvalds 	case MPOL_INTERLEAVE: /* should not happen */
14071da177e4SLinus Torvalds 		break;
14081da177e4SLinus Torvalds 	default:
14091da177e4SLinus Torvalds 		BUG();
14101da177e4SLinus Torvalds 	}
14110e88460dSMel Gorman 	return node_zonelist(nd, gfp);
14121da177e4SLinus Torvalds }
14131da177e4SLinus Torvalds 
14141da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
14151da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
14161da177e4SLinus Torvalds {
14171da177e4SLinus Torvalds 	unsigned nid, next;
14181da177e4SLinus Torvalds 	struct task_struct *me = current;
14191da177e4SLinus Torvalds 
14201da177e4SLinus Torvalds 	nid = me->il_next;
1421dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
14221da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1423dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1424f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
14251da177e4SLinus Torvalds 		me->il_next = next;
14261da177e4SLinus Torvalds 	return nid;
14271da177e4SLinus Torvalds }
14281da177e4SLinus Torvalds 
1429dc85da15SChristoph Lameter /*
1430dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1431dc85da15SChristoph Lameter  * next slab entry.
143252cd3b07SLee Schermerhorn  * @policy must be protected by freeing by the caller.  If @policy is
143352cd3b07SLee Schermerhorn  * the current task's mempolicy, this protection is implicit, as only the
143452cd3b07SLee Schermerhorn  * task can change it's policy.  The system default policy requires no
143552cd3b07SLee Schermerhorn  * such protection.
1436dc85da15SChristoph Lameter  */
1437dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy)
1438dc85da15SChristoph Lameter {
1439fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
1440bea904d5SLee Schermerhorn 		return numa_node_id();
1441765c4507SChristoph Lameter 
1442bea904d5SLee Schermerhorn 	switch (policy->mode) {
1443bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1444fc36b8d3SLee Schermerhorn 		/*
1445fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1446fc36b8d3SLee Schermerhorn 		 */
1447bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1448bea904d5SLee Schermerhorn 
1449dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1450dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1451dc85da15SChristoph Lameter 
1452dd1a239fSMel Gorman 	case MPOL_BIND: {
1453dc85da15SChristoph Lameter 		/*
1454dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1455dc85da15SChristoph Lameter 		 * first node.
1456dc85da15SChristoph Lameter 		 */
145719770b32SMel Gorman 		struct zonelist *zonelist;
145819770b32SMel Gorman 		struct zone *zone;
145919770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
146019770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
146119770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
146219770b32SMel Gorman 							&policy->v.nodes,
146319770b32SMel Gorman 							&zone);
146419770b32SMel Gorman 		return zone->node;
1465dd1a239fSMel Gorman 	}
1466dc85da15SChristoph Lameter 
1467dc85da15SChristoph Lameter 	default:
1468bea904d5SLee Schermerhorn 		BUG();
1469dc85da15SChristoph Lameter 	}
1470dc85da15SChristoph Lameter }
1471dc85da15SChristoph Lameter 
14721da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
14731da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
14741da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
14751da177e4SLinus Torvalds {
1476dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1477f5b087b5SDavid Rientjes 	unsigned target;
14781da177e4SLinus Torvalds 	int c;
14791da177e4SLinus Torvalds 	int nid = -1;
14801da177e4SLinus Torvalds 
1481f5b087b5SDavid Rientjes 	if (!nnodes)
1482f5b087b5SDavid Rientjes 		return numa_node_id();
1483f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
14841da177e4SLinus Torvalds 	c = 0;
14851da177e4SLinus Torvalds 	do {
1486dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
14871da177e4SLinus Torvalds 		c++;
14881da177e4SLinus Torvalds 	} while (c <= target);
14891da177e4SLinus Torvalds 	return nid;
14901da177e4SLinus Torvalds }
14911da177e4SLinus Torvalds 
14925da7ca86SChristoph Lameter /* Determine a node number for interleave */
14935da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
14945da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
14955da7ca86SChristoph Lameter {
14965da7ca86SChristoph Lameter 	if (vma) {
14975da7ca86SChristoph Lameter 		unsigned long off;
14985da7ca86SChristoph Lameter 
14993b98b087SNishanth Aravamudan 		/*
15003b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
15013b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
15023b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
15033b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
15043b98b087SNishanth Aravamudan 		 * a useful offset.
15053b98b087SNishanth Aravamudan 		 */
15063b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
15073b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
15085da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
15095da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
15105da7ca86SChristoph Lameter 	} else
15115da7ca86SChristoph Lameter 		return interleave_nodes(pol);
15125da7ca86SChristoph Lameter }
15135da7ca86SChristoph Lameter 
151400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1515480eccf9SLee Schermerhorn /*
1516480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1517480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1518480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1519480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
152019770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
152119770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1522480eccf9SLee Schermerhorn  *
152352cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
152452cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
152552cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
152652cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1527480eccf9SLee Schermerhorn  */
1528396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
152919770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
153019770b32SMel Gorman 				nodemask_t **nodemask)
15315da7ca86SChristoph Lameter {
1532480eccf9SLee Schermerhorn 	struct zonelist *zl;
15335da7ca86SChristoph Lameter 
153452cd3b07SLee Schermerhorn 	*mpol = get_vma_policy(current, vma, addr);
153519770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
15365da7ca86SChristoph Lameter 
153752cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
153852cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1539a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
154052cd3b07SLee Schermerhorn 	} else {
154152cd3b07SLee Schermerhorn 		zl = policy_zonelist(gfp_flags, *mpol);
154252cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
154352cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1544480eccf9SLee Schermerhorn 	}
1545480eccf9SLee Schermerhorn 	return zl;
15465da7ca86SChristoph Lameter }
154700ac59adSChen, Kenneth W #endif
15485da7ca86SChristoph Lameter 
15491da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
15501da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1551662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1552662f3a0bSAndi Kleen 					unsigned nid)
15531da177e4SLinus Torvalds {
15541da177e4SLinus Torvalds 	struct zonelist *zl;
15551da177e4SLinus Torvalds 	struct page *page;
15561da177e4SLinus Torvalds 
15570e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
15581da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1559dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1560ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
15611da177e4SLinus Torvalds 	return page;
15621da177e4SLinus Torvalds }
15631da177e4SLinus Torvalds 
15641da177e4SLinus Torvalds /**
15651da177e4SLinus Torvalds  * 	alloc_page_vma	- Allocate a page for a VMA.
15661da177e4SLinus Torvalds  *
15671da177e4SLinus Torvalds  * 	@gfp:
15681da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
15691da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
15701da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
15711da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
15721da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
15731da177e4SLinus Torvalds  *
15741da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
15751da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
15761da177e4SLinus Torvalds  *
15771da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
15781da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
15791da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
15801da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
15811da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
15821da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
15831da177e4SLinus Torvalds  *
15841da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
15851da177e4SLinus Torvalds  */
15861da177e4SLinus Torvalds struct page *
1587dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
15881da177e4SLinus Torvalds {
15896e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1590480eccf9SLee Schermerhorn 	struct zonelist *zl;
15911da177e4SLinus Torvalds 
159245c4745aSLee Schermerhorn 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
15931da177e4SLinus Torvalds 		unsigned nid;
15945da7ca86SChristoph Lameter 
15955da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
159652cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
15971da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, 0, nid);
15981da177e4SLinus Torvalds 	}
159952cd3b07SLee Schermerhorn 	zl = policy_zonelist(gfp, pol);
160052cd3b07SLee Schermerhorn 	if (unlikely(mpol_needs_cond_ref(pol))) {
1601480eccf9SLee Schermerhorn 		/*
160252cd3b07SLee Schermerhorn 		 * slow path: ref counted shared policy
1603480eccf9SLee Schermerhorn 		 */
160419770b32SMel Gorman 		struct page *page =  __alloc_pages_nodemask(gfp, 0,
160552cd3b07SLee Schermerhorn 						zl, policy_nodemask(gfp, pol));
1606f0be3d32SLee Schermerhorn 		__mpol_put(pol);
1607480eccf9SLee Schermerhorn 		return page;
1608480eccf9SLee Schermerhorn 	}
1609480eccf9SLee Schermerhorn 	/*
1610480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
1611480eccf9SLee Schermerhorn 	 */
161252cd3b07SLee Schermerhorn 	return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
16131da177e4SLinus Torvalds }
16141da177e4SLinus Torvalds 
16151da177e4SLinus Torvalds /**
16161da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
16171da177e4SLinus Torvalds  *
16181da177e4SLinus Torvalds  *	@gfp:
16191da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
16201da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
16211da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
16221da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
16231da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
16241da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
16251da177e4SLinus Torvalds  *
16261da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
16271da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
16281da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
16291da177e4SLinus Torvalds  *
1630cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
16311da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
16321da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
16331da177e4SLinus Torvalds  */
1634dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
16351da177e4SLinus Torvalds {
16361da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
16371da177e4SLinus Torvalds 
16389b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
16391da177e4SLinus Torvalds 		pol = &default_policy;
164052cd3b07SLee Schermerhorn 
164152cd3b07SLee Schermerhorn 	/*
164252cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
164352cd3b07SLee Schermerhorn 	 * nor system default_policy
164452cd3b07SLee Schermerhorn 	 */
164545c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
16461da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
164719770b32SMel Gorman 	return __alloc_pages_nodemask(gfp, order,
164852cd3b07SLee Schermerhorn 			policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
16491da177e4SLinus Torvalds }
16501da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
16511da177e4SLinus Torvalds 
16524225399aSPaul Jackson /*
1653846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
16544225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
16554225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
16564225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
16574225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
16584225399aSPaul Jackson  */
16594225399aSPaul Jackson 
1660846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
1661846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
16621da177e4SLinus Torvalds {
16631da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
16641da177e4SLinus Torvalds 
16651da177e4SLinus Torvalds 	if (!new)
16661da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
16674225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
16684225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
16694225399aSPaul Jackson 		mpol_rebind_policy(old, &mems);
16704225399aSPaul Jackson 	}
16711da177e4SLinus Torvalds 	*new = *old;
16721da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
16731da177e4SLinus Torvalds 	return new;
16741da177e4SLinus Torvalds }
16751da177e4SLinus Torvalds 
167652cd3b07SLee Schermerhorn /*
167752cd3b07SLee Schermerhorn  * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
167852cd3b07SLee Schermerhorn  * eliminate the * MPOL_F_* flags that require conditional ref and
167952cd3b07SLee Schermerhorn  * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
168052cd3b07SLee Schermerhorn  * after return.  Use the returned value.
168152cd3b07SLee Schermerhorn  *
168252cd3b07SLee Schermerhorn  * Allows use of a mempolicy for, e.g., multiple allocations with a single
168352cd3b07SLee Schermerhorn  * policy lookup, even if the policy needs/has extra ref on lookup.
168452cd3b07SLee Schermerhorn  * shmem_readahead needs this.
168552cd3b07SLee Schermerhorn  */
168652cd3b07SLee Schermerhorn struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
168752cd3b07SLee Schermerhorn 						struct mempolicy *frompol)
168852cd3b07SLee Schermerhorn {
168952cd3b07SLee Schermerhorn 	if (!mpol_needs_cond_ref(frompol))
169052cd3b07SLee Schermerhorn 		return frompol;
169152cd3b07SLee Schermerhorn 
169252cd3b07SLee Schermerhorn 	*tompol = *frompol;
169352cd3b07SLee Schermerhorn 	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
169452cd3b07SLee Schermerhorn 	__mpol_put(frompol);
169552cd3b07SLee Schermerhorn 	return tompol;
169652cd3b07SLee Schermerhorn }
169752cd3b07SLee Schermerhorn 
1698f5b087b5SDavid Rientjes static int mpol_match_intent(const struct mempolicy *a,
1699f5b087b5SDavid Rientjes 			     const struct mempolicy *b)
1700f5b087b5SDavid Rientjes {
1701f5b087b5SDavid Rientjes 	if (a->flags != b->flags)
1702f5b087b5SDavid Rientjes 		return 0;
1703f5b087b5SDavid Rientjes 	if (!mpol_store_user_nodemask(a))
1704f5b087b5SDavid Rientjes 		return 1;
1705f5b087b5SDavid Rientjes 	return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1706f5b087b5SDavid Rientjes }
1707f5b087b5SDavid Rientjes 
17081da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
17091da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
17101da177e4SLinus Torvalds {
17111da177e4SLinus Torvalds 	if (!a || !b)
17121da177e4SLinus Torvalds 		return 0;
171345c4745aSLee Schermerhorn 	if (a->mode != b->mode)
17141da177e4SLinus Torvalds 		return 0;
171545c4745aSLee Schermerhorn 	if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
1716f5b087b5SDavid Rientjes 		return 0;
171745c4745aSLee Schermerhorn 	switch (a->mode) {
171819770b32SMel Gorman 	case MPOL_BIND:
171919770b32SMel Gorman 		/* Fall through */
17201da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1721dfcd3c0dSAndi Kleen 		return nodes_equal(a->v.nodes, b->v.nodes);
17221da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1723fc36b8d3SLee Schermerhorn 		return a->v.preferred_node == b->v.preferred_node &&
1724fc36b8d3SLee Schermerhorn 			a->flags == b->flags;
17251da177e4SLinus Torvalds 	default:
17261da177e4SLinus Torvalds 		BUG();
17271da177e4SLinus Torvalds 		return 0;
17281da177e4SLinus Torvalds 	}
17291da177e4SLinus Torvalds }
17301da177e4SLinus Torvalds 
17311da177e4SLinus Torvalds /*
17321da177e4SLinus Torvalds  * Shared memory backing store policy support.
17331da177e4SLinus Torvalds  *
17341da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
17351da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
17361da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
17371da177e4SLinus Torvalds  * for any accesses to the tree.
17381da177e4SLinus Torvalds  */
17391da177e4SLinus Torvalds 
17401da177e4SLinus Torvalds /* lookup first element intersecting start-end */
17411da177e4SLinus Torvalds /* Caller holds sp->lock */
17421da177e4SLinus Torvalds static struct sp_node *
17431da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
17441da177e4SLinus Torvalds {
17451da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
17461da177e4SLinus Torvalds 
17471da177e4SLinus Torvalds 	while (n) {
17481da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
17491da177e4SLinus Torvalds 
17501da177e4SLinus Torvalds 		if (start >= p->end)
17511da177e4SLinus Torvalds 			n = n->rb_right;
17521da177e4SLinus Torvalds 		else if (end <= p->start)
17531da177e4SLinus Torvalds 			n = n->rb_left;
17541da177e4SLinus Torvalds 		else
17551da177e4SLinus Torvalds 			break;
17561da177e4SLinus Torvalds 	}
17571da177e4SLinus Torvalds 	if (!n)
17581da177e4SLinus Torvalds 		return NULL;
17591da177e4SLinus Torvalds 	for (;;) {
17601da177e4SLinus Torvalds 		struct sp_node *w = NULL;
17611da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
17621da177e4SLinus Torvalds 		if (!prev)
17631da177e4SLinus Torvalds 			break;
17641da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
17651da177e4SLinus Torvalds 		if (w->end <= start)
17661da177e4SLinus Torvalds 			break;
17671da177e4SLinus Torvalds 		n = prev;
17681da177e4SLinus Torvalds 	}
17691da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
17701da177e4SLinus Torvalds }
17711da177e4SLinus Torvalds 
17721da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
17731da177e4SLinus Torvalds /* Caller holds sp->lock */
17741da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
17751da177e4SLinus Torvalds {
17761da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
17771da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
17781da177e4SLinus Torvalds 	struct sp_node *nd;
17791da177e4SLinus Torvalds 
17801da177e4SLinus Torvalds 	while (*p) {
17811da177e4SLinus Torvalds 		parent = *p;
17821da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
17831da177e4SLinus Torvalds 		if (new->start < nd->start)
17841da177e4SLinus Torvalds 			p = &(*p)->rb_left;
17851da177e4SLinus Torvalds 		else if (new->end > nd->end)
17861da177e4SLinus Torvalds 			p = &(*p)->rb_right;
17871da177e4SLinus Torvalds 		else
17881da177e4SLinus Torvalds 			BUG();
17891da177e4SLinus Torvalds 	}
17901da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
17911da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
1792140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
179345c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
17941da177e4SLinus Torvalds }
17951da177e4SLinus Torvalds 
17961da177e4SLinus Torvalds /* Find shared policy intersecting idx */
17971da177e4SLinus Torvalds struct mempolicy *
17981da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
17991da177e4SLinus Torvalds {
18001da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
18011da177e4SLinus Torvalds 	struct sp_node *sn;
18021da177e4SLinus Torvalds 
18031da177e4SLinus Torvalds 	if (!sp->root.rb_node)
18041da177e4SLinus Torvalds 		return NULL;
18051da177e4SLinus Torvalds 	spin_lock(&sp->lock);
18061da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
18071da177e4SLinus Torvalds 	if (sn) {
18081da177e4SLinus Torvalds 		mpol_get(sn->policy);
18091da177e4SLinus Torvalds 		pol = sn->policy;
18101da177e4SLinus Torvalds 	}
18111da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
18121da177e4SLinus Torvalds 	return pol;
18131da177e4SLinus Torvalds }
18141da177e4SLinus Torvalds 
18151da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
18161da177e4SLinus Torvalds {
1817140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
18181da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
1819f0be3d32SLee Schermerhorn 	mpol_put(n->policy);
18201da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
18211da177e4SLinus Torvalds }
18221da177e4SLinus Torvalds 
1823dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1824dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
18251da177e4SLinus Torvalds {
18261da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
18271da177e4SLinus Torvalds 
18281da177e4SLinus Torvalds 	if (!n)
18291da177e4SLinus Torvalds 		return NULL;
18301da177e4SLinus Torvalds 	n->start = start;
18311da177e4SLinus Torvalds 	n->end = end;
18321da177e4SLinus Torvalds 	mpol_get(pol);
1833aab0b102SLee Schermerhorn 	pol->flags |= MPOL_F_SHARED;	/* for unref */
18341da177e4SLinus Torvalds 	n->policy = pol;
18351da177e4SLinus Torvalds 	return n;
18361da177e4SLinus Torvalds }
18371da177e4SLinus Torvalds 
18381da177e4SLinus Torvalds /* Replace a policy range. */
18391da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
18401da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
18411da177e4SLinus Torvalds {
18421da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
18431da177e4SLinus Torvalds 
18441da177e4SLinus Torvalds restart:
18451da177e4SLinus Torvalds 	spin_lock(&sp->lock);
18461da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
18471da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
18481da177e4SLinus Torvalds 	while (n && n->start < end) {
18491da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
18501da177e4SLinus Torvalds 		if (n->start >= start) {
18511da177e4SLinus Torvalds 			if (n->end <= end)
18521da177e4SLinus Torvalds 				sp_delete(sp, n);
18531da177e4SLinus Torvalds 			else
18541da177e4SLinus Torvalds 				n->start = end;
18551da177e4SLinus Torvalds 		} else {
18561da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
18571da177e4SLinus Torvalds 			if (n->end > end) {
18581da177e4SLinus Torvalds 				if (!new2) {
18591da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
18601da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
18611da177e4SLinus Torvalds 					if (!new2)
18621da177e4SLinus Torvalds 						return -ENOMEM;
18631da177e4SLinus Torvalds 					goto restart;
18641da177e4SLinus Torvalds 				}
18651da177e4SLinus Torvalds 				n->end = start;
18661da177e4SLinus Torvalds 				sp_insert(sp, new2);
18671da177e4SLinus Torvalds 				new2 = NULL;
18681da177e4SLinus Torvalds 				break;
18691da177e4SLinus Torvalds 			} else
18701da177e4SLinus Torvalds 				n->end = start;
18711da177e4SLinus Torvalds 		}
18721da177e4SLinus Torvalds 		if (!next)
18731da177e4SLinus Torvalds 			break;
18741da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
18751da177e4SLinus Torvalds 	}
18761da177e4SLinus Torvalds 	if (new)
18771da177e4SLinus Torvalds 		sp_insert(sp, new);
18781da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
18791da177e4SLinus Torvalds 	if (new2) {
1880f0be3d32SLee Schermerhorn 		mpol_put(new2->policy);
18811da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
18821da177e4SLinus Torvalds 	}
18831da177e4SLinus Torvalds 	return 0;
18841da177e4SLinus Torvalds }
18851da177e4SLinus Torvalds 
188671fe804bSLee Schermerhorn /**
188771fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
188871fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
188971fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
189071fe804bSLee Schermerhorn  *
189171fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
189271fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
189371fe804bSLee Schermerhorn  * This must be released on exit.
189471fe804bSLee Schermerhorn  */
189571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
18967339ff83SRobin Holt {
1897*58568d2aSMiao Xie 	int ret;
1898*58568d2aSMiao Xie 
189971fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
190071fe804bSLee Schermerhorn 	spin_lock_init(&sp->lock);
19017339ff83SRobin Holt 
190271fe804bSLee Schermerhorn 	if (mpol) {
19037339ff83SRobin Holt 		struct vm_area_struct pvma;
190471fe804bSLee Schermerhorn 		struct mempolicy *new;
19057339ff83SRobin Holt 
190671fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
190771fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
1908*58568d2aSMiao Xie 		if (IS_ERR(new)) {
190971fe804bSLee Schermerhorn 			mpol_put(mpol);	/* drop our ref on sb mpol */
191071fe804bSLee Schermerhorn 			return;		/* no valid nodemask intersection */
1911*58568d2aSMiao Xie 		}
1912*58568d2aSMiao Xie 
1913*58568d2aSMiao Xie 		task_lock(current);
1914*58568d2aSMiao Xie 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask);
1915*58568d2aSMiao Xie 		task_unlock(current);
1916*58568d2aSMiao Xie 		mpol_put(mpol);	/* drop our ref on sb mpol */
1917*58568d2aSMiao Xie 		if (ret) {
1918*58568d2aSMiao Xie 			mpol_put(new);
1919*58568d2aSMiao Xie 			return;
1920*58568d2aSMiao Xie 		}
192171fe804bSLee Schermerhorn 
192271fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
19237339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
192471fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
192571fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
192671fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
19277339ff83SRobin Holt 	}
19287339ff83SRobin Holt }
19297339ff83SRobin Holt 
19301da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
19311da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
19321da177e4SLinus Torvalds {
19331da177e4SLinus Torvalds 	int err;
19341da177e4SLinus Torvalds 	struct sp_node *new = NULL;
19351da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
19361da177e4SLinus Torvalds 
1937028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
19381da177e4SLinus Torvalds 		 vma->vm_pgoff,
193945c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
1940028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
1941dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
19421da177e4SLinus Torvalds 
19431da177e4SLinus Torvalds 	if (npol) {
19441da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
19451da177e4SLinus Torvalds 		if (!new)
19461da177e4SLinus Torvalds 			return -ENOMEM;
19471da177e4SLinus Torvalds 	}
19481da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
19491da177e4SLinus Torvalds 	if (err && new)
19501da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
19511da177e4SLinus Torvalds 	return err;
19521da177e4SLinus Torvalds }
19531da177e4SLinus Torvalds 
19541da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
19551da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
19561da177e4SLinus Torvalds {
19571da177e4SLinus Torvalds 	struct sp_node *n;
19581da177e4SLinus Torvalds 	struct rb_node *next;
19591da177e4SLinus Torvalds 
19601da177e4SLinus Torvalds 	if (!p->root.rb_node)
19611da177e4SLinus Torvalds 		return;
19621da177e4SLinus Torvalds 	spin_lock(&p->lock);
19631da177e4SLinus Torvalds 	next = rb_first(&p->root);
19641da177e4SLinus Torvalds 	while (next) {
19651da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
19661da177e4SLinus Torvalds 		next = rb_next(&n->nd);
196790c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
1968f0be3d32SLee Schermerhorn 		mpol_put(n->policy);
19691da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
19701da177e4SLinus Torvalds 	}
19711da177e4SLinus Torvalds 	spin_unlock(&p->lock);
19721da177e4SLinus Torvalds }
19731da177e4SLinus Torvalds 
19741da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
19751da177e4SLinus Torvalds void __init numa_policy_init(void)
19761da177e4SLinus Torvalds {
1977b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
1978b71636e2SPaul Mundt 	unsigned long largest = 0;
1979b71636e2SPaul Mundt 	int nid, prefer = 0;
1980b71636e2SPaul Mundt 
19811da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
19821da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
198320c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
19841da177e4SLinus Torvalds 
19851da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
19861da177e4SLinus Torvalds 				     sizeof(struct sp_node),
198720c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
19881da177e4SLinus Torvalds 
1989b71636e2SPaul Mundt 	/*
1990b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
1991b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
1992b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
1993b71636e2SPaul Mundt 	 */
1994b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
199556bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
1996b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
19971da177e4SLinus Torvalds 
1998b71636e2SPaul Mundt 		/* Preserve the largest node */
1999b71636e2SPaul Mundt 		if (largest < total_pages) {
2000b71636e2SPaul Mundt 			largest = total_pages;
2001b71636e2SPaul Mundt 			prefer = nid;
2002b71636e2SPaul Mundt 		}
2003b71636e2SPaul Mundt 
2004b71636e2SPaul Mundt 		/* Interleave this node? */
2005b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2006b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2007b71636e2SPaul Mundt 	}
2008b71636e2SPaul Mundt 
2009b71636e2SPaul Mundt 	/* All too small, use the largest */
2010b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2011b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2012b71636e2SPaul Mundt 
2013028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
20141da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
20151da177e4SLinus Torvalds }
20161da177e4SLinus Torvalds 
20178bccd85fSChristoph Lameter /* Reset policy of current process to default */
20181da177e4SLinus Torvalds void numa_default_policy(void)
20191da177e4SLinus Torvalds {
2020028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
20211da177e4SLinus Torvalds }
202268860ec1SPaul Jackson 
20234225399aSPaul Jackson /*
2024095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2025095f1fc4SLee Schermerhorn  */
2026095f1fc4SLee Schermerhorn 
2027095f1fc4SLee Schermerhorn /*
2028fc36b8d3SLee Schermerhorn  * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
20293f226aa1SLee Schermerhorn  * Used only for mpol_parse_str() and mpol_to_str()
20301a75a6c8SChristoph Lameter  */
203153f2556bSLee Schermerhorn #define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
203215ad7cdcSHelge Deller static const char * const policy_types[] =
203353f2556bSLee Schermerhorn 	{ "default", "prefer", "bind", "interleave", "local" };
20341a75a6c8SChristoph Lameter 
2035095f1fc4SLee Schermerhorn 
2036095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2037095f1fc4SLee Schermerhorn /**
2038095f1fc4SLee Schermerhorn  * mpol_parse_str - parse string to mempolicy
2039095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
204071fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
204171fe804bSLee Schermerhorn  * @no_context:  flag whether to "contextualize" the mempolicy
2042095f1fc4SLee Schermerhorn  *
2043095f1fc4SLee Schermerhorn  * Format of input:
2044095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2045095f1fc4SLee Schermerhorn  *
204671fe804bSLee Schermerhorn  * if @no_context is true, save the input nodemask in w.user_nodemask in
204771fe804bSLee Schermerhorn  * the returned mempolicy.  This will be used to "clone" the mempolicy in
204871fe804bSLee Schermerhorn  * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
204971fe804bSLee Schermerhorn  * mount option.  Note that if 'static' or 'relative' mode flags were
205071fe804bSLee Schermerhorn  * specified, the input nodemask will already have been saved.  Saving
205171fe804bSLee Schermerhorn  * it again is redundant, but safe.
205271fe804bSLee Schermerhorn  *
205371fe804bSLee Schermerhorn  * On success, returns 0, else 1
2054095f1fc4SLee Schermerhorn  */
205571fe804bSLee Schermerhorn int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2056095f1fc4SLee Schermerhorn {
205771fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
205871fe804bSLee Schermerhorn 	unsigned short uninitialized_var(mode);
205971fe804bSLee Schermerhorn 	unsigned short uninitialized_var(mode_flags);
206071fe804bSLee Schermerhorn 	nodemask_t nodes;
2061095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2062095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2063095f1fc4SLee Schermerhorn 	int i;
2064095f1fc4SLee Schermerhorn 	int err = 1;
2065095f1fc4SLee Schermerhorn 
2066095f1fc4SLee Schermerhorn 	if (nodelist) {
2067095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2068095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
206971fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2070095f1fc4SLee Schermerhorn 			goto out;
207171fe804bSLee Schermerhorn 		if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2072095f1fc4SLee Schermerhorn 			goto out;
207371fe804bSLee Schermerhorn 	} else
207471fe804bSLee Schermerhorn 		nodes_clear(nodes);
207571fe804bSLee Schermerhorn 
2076095f1fc4SLee Schermerhorn 	if (flags)
2077095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2078095f1fc4SLee Schermerhorn 
20793f226aa1SLee Schermerhorn 	for (i = 0; i <= MPOL_LOCAL; i++) {
2080095f1fc4SLee Schermerhorn 		if (!strcmp(str, policy_types[i])) {
208171fe804bSLee Schermerhorn 			mode = i;
2082095f1fc4SLee Schermerhorn 			break;
2083095f1fc4SLee Schermerhorn 		}
2084095f1fc4SLee Schermerhorn 	}
20853f226aa1SLee Schermerhorn 	if (i > MPOL_LOCAL)
2086095f1fc4SLee Schermerhorn 		goto out;
2087095f1fc4SLee Schermerhorn 
208871fe804bSLee Schermerhorn 	switch (mode) {
2089095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
209071fe804bSLee Schermerhorn 		/*
209171fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
209271fe804bSLee Schermerhorn 		 */
2093095f1fc4SLee Schermerhorn 		if (nodelist) {
2094095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2095095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2096095f1fc4SLee Schermerhorn 				rest++;
2097095f1fc4SLee Schermerhorn 			if (!*rest)
2098095f1fc4SLee Schermerhorn 				err = 0;
2099095f1fc4SLee Schermerhorn 		}
2100095f1fc4SLee Schermerhorn 		break;
2101095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2102095f1fc4SLee Schermerhorn 		/*
2103095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2104095f1fc4SLee Schermerhorn 		 */
2105095f1fc4SLee Schermerhorn 		if (!nodelist)
210671fe804bSLee Schermerhorn 			nodes = node_states[N_HIGH_MEMORY];
2107095f1fc4SLee Schermerhorn 		err = 0;
21083f226aa1SLee Schermerhorn 		break;
210971fe804bSLee Schermerhorn 	case MPOL_LOCAL:
21103f226aa1SLee Schermerhorn 		/*
211171fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
21123f226aa1SLee Schermerhorn 		 */
211371fe804bSLee Schermerhorn 		if (nodelist)
21143f226aa1SLee Schermerhorn 			goto out;
211571fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
21163f226aa1SLee Schermerhorn 		break;
211771fe804bSLee Schermerhorn 
211871fe804bSLee Schermerhorn 	/*
211971fe804bSLee Schermerhorn 	 * case MPOL_BIND:    mpol_new() enforces non-empty nodemask.
212071fe804bSLee Schermerhorn 	 * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
212171fe804bSLee Schermerhorn 	 */
2122095f1fc4SLee Schermerhorn 	}
2123095f1fc4SLee Schermerhorn 
212471fe804bSLee Schermerhorn 	mode_flags = 0;
2125095f1fc4SLee Schermerhorn 	if (flags) {
2126095f1fc4SLee Schermerhorn 		/*
2127095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2128095f1fc4SLee Schermerhorn 		 * mode flags.
2129095f1fc4SLee Schermerhorn 		 */
2130095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
213171fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2132095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
213371fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2134095f1fc4SLee Schermerhorn 		else
2135095f1fc4SLee Schermerhorn 			err = 1;
2136095f1fc4SLee Schermerhorn 	}
213771fe804bSLee Schermerhorn 
213871fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
213971fe804bSLee Schermerhorn 	if (IS_ERR(new))
214071fe804bSLee Schermerhorn 		err = 1;
2141*58568d2aSMiao Xie 	else {
2142*58568d2aSMiao Xie 		int ret;
2143*58568d2aSMiao Xie 
2144*58568d2aSMiao Xie 		task_lock(current);
2145*58568d2aSMiao Xie 		ret = mpol_set_nodemask(new, &nodes);
2146*58568d2aSMiao Xie 		task_unlock(current);
2147*58568d2aSMiao Xie 		if (ret)
2148*58568d2aSMiao Xie 			err = 1;
2149*58568d2aSMiao Xie 		else if (no_context) {
2150*58568d2aSMiao Xie 			/* save for contextualization */
2151*58568d2aSMiao Xie 			new->w.user_nodemask = nodes;
2152*58568d2aSMiao Xie 		}
2153*58568d2aSMiao Xie 	}
215471fe804bSLee Schermerhorn 
2155095f1fc4SLee Schermerhorn out:
2156095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2157095f1fc4SLee Schermerhorn 	if (nodelist)
2158095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2159095f1fc4SLee Schermerhorn 	if (flags)
2160095f1fc4SLee Schermerhorn 		*--flags = '=';
216171fe804bSLee Schermerhorn 	if (!err)
216271fe804bSLee Schermerhorn 		*mpol = new;
2163095f1fc4SLee Schermerhorn 	return err;
2164095f1fc4SLee Schermerhorn }
2165095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2166095f1fc4SLee Schermerhorn 
216771fe804bSLee Schermerhorn /**
216871fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
216971fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
217071fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
217171fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
217271fe804bSLee Schermerhorn  * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
217371fe804bSLee Schermerhorn  *
21741a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
21751a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
21761a75a6c8SChristoph Lameter  * or an error (negative)
21771a75a6c8SChristoph Lameter  */
217871fe804bSLee Schermerhorn int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
21791a75a6c8SChristoph Lameter {
21801a75a6c8SChristoph Lameter 	char *p = buffer;
21811a75a6c8SChristoph Lameter 	int l;
21821a75a6c8SChristoph Lameter 	nodemask_t nodes;
2183bea904d5SLee Schermerhorn 	unsigned short mode;
2184f5b087b5SDavid Rientjes 	unsigned short flags = pol ? pol->flags : 0;
21851a75a6c8SChristoph Lameter 
21862291990aSLee Schermerhorn 	/*
21872291990aSLee Schermerhorn 	 * Sanity check:  room for longest mode, flag and some nodes
21882291990aSLee Schermerhorn 	 */
21892291990aSLee Schermerhorn 	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
21902291990aSLee Schermerhorn 
2191bea904d5SLee Schermerhorn 	if (!pol || pol == &default_policy)
2192bea904d5SLee Schermerhorn 		mode = MPOL_DEFAULT;
2193bea904d5SLee Schermerhorn 	else
2194bea904d5SLee Schermerhorn 		mode = pol->mode;
2195bea904d5SLee Schermerhorn 
21961a75a6c8SChristoph Lameter 	switch (mode) {
21971a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
21981a75a6c8SChristoph Lameter 		nodes_clear(nodes);
21991a75a6c8SChristoph Lameter 		break;
22001a75a6c8SChristoph Lameter 
22011a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
22021a75a6c8SChristoph Lameter 		nodes_clear(nodes);
2203fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
220453f2556bSLee Schermerhorn 			mode = MPOL_LOCAL;	/* pseudo-policy */
220553f2556bSLee Schermerhorn 		else
2206fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
22071a75a6c8SChristoph Lameter 		break;
22081a75a6c8SChristoph Lameter 
22091a75a6c8SChristoph Lameter 	case MPOL_BIND:
221019770b32SMel Gorman 		/* Fall through */
22111a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
221271fe804bSLee Schermerhorn 		if (no_context)
221371fe804bSLee Schermerhorn 			nodes = pol->w.user_nodemask;
221471fe804bSLee Schermerhorn 		else
22151a75a6c8SChristoph Lameter 			nodes = pol->v.nodes;
22161a75a6c8SChristoph Lameter 		break;
22171a75a6c8SChristoph Lameter 
22181a75a6c8SChristoph Lameter 	default:
22191a75a6c8SChristoph Lameter 		BUG();
22201a75a6c8SChristoph Lameter 	}
22211a75a6c8SChristoph Lameter 
22221a75a6c8SChristoph Lameter 	l = strlen(policy_types[mode]);
22231a75a6c8SChristoph Lameter 	if (buffer + maxlen < p + l + 1)
22241a75a6c8SChristoph Lameter 		return -ENOSPC;
22251a75a6c8SChristoph Lameter 
22261a75a6c8SChristoph Lameter 	strcpy(p, policy_types[mode]);
22271a75a6c8SChristoph Lameter 	p += l;
22281a75a6c8SChristoph Lameter 
2229fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2230f5b087b5SDavid Rientjes 		if (buffer + maxlen < p + 2)
2231f5b087b5SDavid Rientjes 			return -ENOSPC;
2232f5b087b5SDavid Rientjes 		*p++ = '=';
2233f5b087b5SDavid Rientjes 
22342291990aSLee Schermerhorn 		/*
22352291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
22362291990aSLee Schermerhorn 		 */
2237f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
22382291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
22392291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
22402291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2241f5b087b5SDavid Rientjes 	}
2242f5b087b5SDavid Rientjes 
22431a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
22441a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
22451a75a6c8SChristoph Lameter 			return -ENOSPC;
2246095f1fc4SLee Schermerhorn 		*p++ = ':';
22471a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
22481a75a6c8SChristoph Lameter 	}
22491a75a6c8SChristoph Lameter 	return p - buffer;
22501a75a6c8SChristoph Lameter }
22511a75a6c8SChristoph Lameter 
22521a75a6c8SChristoph Lameter struct numa_maps {
22531a75a6c8SChristoph Lameter 	unsigned long pages;
22541a75a6c8SChristoph Lameter 	unsigned long anon;
2255397874dfSChristoph Lameter 	unsigned long active;
2256397874dfSChristoph Lameter 	unsigned long writeback;
22571a75a6c8SChristoph Lameter 	unsigned long mapcount_max;
2258397874dfSChristoph Lameter 	unsigned long dirty;
2259397874dfSChristoph Lameter 	unsigned long swapcache;
22601a75a6c8SChristoph Lameter 	unsigned long node[MAX_NUMNODES];
22611a75a6c8SChristoph Lameter };
22621a75a6c8SChristoph Lameter 
2263397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty)
22641a75a6c8SChristoph Lameter {
22651a75a6c8SChristoph Lameter 	struct numa_maps *md = private;
22661a75a6c8SChristoph Lameter 	int count = page_mapcount(page);
22671a75a6c8SChristoph Lameter 
22681a75a6c8SChristoph Lameter 	md->pages++;
2269397874dfSChristoph Lameter 	if (pte_dirty || PageDirty(page))
2270397874dfSChristoph Lameter 		md->dirty++;
2271397874dfSChristoph Lameter 
2272397874dfSChristoph Lameter 	if (PageSwapCache(page))
2273397874dfSChristoph Lameter 		md->swapcache++;
2274397874dfSChristoph Lameter 
2275894bc310SLee Schermerhorn 	if (PageActive(page) || PageUnevictable(page))
2276397874dfSChristoph Lameter 		md->active++;
2277397874dfSChristoph Lameter 
2278397874dfSChristoph Lameter 	if (PageWriteback(page))
2279397874dfSChristoph Lameter 		md->writeback++;
22801a75a6c8SChristoph Lameter 
22811a75a6c8SChristoph Lameter 	if (PageAnon(page))
22821a75a6c8SChristoph Lameter 		md->anon++;
22831a75a6c8SChristoph Lameter 
2284397874dfSChristoph Lameter 	if (count > md->mapcount_max)
2285397874dfSChristoph Lameter 		md->mapcount_max = count;
2286397874dfSChristoph Lameter 
22871a75a6c8SChristoph Lameter 	md->node[page_to_nid(page)]++;
22881a75a6c8SChristoph Lameter }
22891a75a6c8SChristoph Lameter 
22907f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE
2291397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma,
2292397874dfSChristoph Lameter 		unsigned long start, unsigned long end,
2293397874dfSChristoph Lameter 		struct numa_maps *md)
2294397874dfSChristoph Lameter {
2295397874dfSChristoph Lameter 	unsigned long addr;
2296397874dfSChristoph Lameter 	struct page *page;
2297a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
2298a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
2299397874dfSChristoph Lameter 
2300a5516438SAndi Kleen 	for (addr = start; addr < end; addr += sz) {
2301a5516438SAndi Kleen 		pte_t *ptep = huge_pte_offset(vma->vm_mm,
2302a5516438SAndi Kleen 						addr & huge_page_mask(h));
2303397874dfSChristoph Lameter 		pte_t pte;
2304397874dfSChristoph Lameter 
2305397874dfSChristoph Lameter 		if (!ptep)
2306397874dfSChristoph Lameter 			continue;
2307397874dfSChristoph Lameter 
2308397874dfSChristoph Lameter 		pte = *ptep;
2309397874dfSChristoph Lameter 		if (pte_none(pte))
2310397874dfSChristoph Lameter 			continue;
2311397874dfSChristoph Lameter 
2312397874dfSChristoph Lameter 		page = pte_page(pte);
2313397874dfSChristoph Lameter 		if (!page)
2314397874dfSChristoph Lameter 			continue;
2315397874dfSChristoph Lameter 
2316397874dfSChristoph Lameter 		gather_stats(page, md, pte_dirty(*ptep));
2317397874dfSChristoph Lameter 	}
2318397874dfSChristoph Lameter }
23197f709ed0SAndrew Morton #else
23207f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma,
23217f709ed0SAndrew Morton 		unsigned long start, unsigned long end,
23227f709ed0SAndrew Morton 		struct numa_maps *md)
23237f709ed0SAndrew Morton {
23247f709ed0SAndrew Morton }
23257f709ed0SAndrew Morton #endif
2326397874dfSChristoph Lameter 
232753f2556bSLee Schermerhorn /*
232853f2556bSLee Schermerhorn  * Display pages allocated per node and memory policy via /proc.
232953f2556bSLee Schermerhorn  */
23301a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v)
23311a75a6c8SChristoph Lameter {
233299f89551SEric W. Biederman 	struct proc_maps_private *priv = m->private;
23331a75a6c8SChristoph Lameter 	struct vm_area_struct *vma = v;
23341a75a6c8SChristoph Lameter 	struct numa_maps *md;
2335397874dfSChristoph Lameter 	struct file *file = vma->vm_file;
2336397874dfSChristoph Lameter 	struct mm_struct *mm = vma->vm_mm;
2337480eccf9SLee Schermerhorn 	struct mempolicy *pol;
23381a75a6c8SChristoph Lameter 	int n;
23391a75a6c8SChristoph Lameter 	char buffer[50];
23401a75a6c8SChristoph Lameter 
2341397874dfSChristoph Lameter 	if (!mm)
23421a75a6c8SChristoph Lameter 		return 0;
23431a75a6c8SChristoph Lameter 
23441a75a6c8SChristoph Lameter 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
23451a75a6c8SChristoph Lameter 	if (!md)
23461a75a6c8SChristoph Lameter 		return 0;
23471a75a6c8SChristoph Lameter 
2348480eccf9SLee Schermerhorn 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
234971fe804bSLee Schermerhorn 	mpol_to_str(buffer, sizeof(buffer), pol, 0);
235052cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
23511a75a6c8SChristoph Lameter 
2352397874dfSChristoph Lameter 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2353397874dfSChristoph Lameter 
2354397874dfSChristoph Lameter 	if (file) {
2355397874dfSChristoph Lameter 		seq_printf(m, " file=");
2356c32c2f63SJan Blunck 		seq_path(m, &file->f_path, "\n\t= ");
2357397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2358397874dfSChristoph Lameter 		seq_printf(m, " heap");
2359397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->start_stack &&
2360397874dfSChristoph Lameter 			vma->vm_end >= mm->start_stack) {
2361397874dfSChristoph Lameter 		seq_printf(m, " stack");
2362397874dfSChristoph Lameter 	}
2363397874dfSChristoph Lameter 
2364397874dfSChristoph Lameter 	if (is_vm_hugetlb_page(vma)) {
2365397874dfSChristoph Lameter 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2366397874dfSChristoph Lameter 		seq_printf(m, " huge");
2367397874dfSChristoph Lameter 	} else {
2368397874dfSChristoph Lameter 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
236956bbd65dSChristoph Lameter 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2370397874dfSChristoph Lameter 	}
2371397874dfSChristoph Lameter 
2372397874dfSChristoph Lameter 	if (!md->pages)
2373397874dfSChristoph Lameter 		goto out;
23741a75a6c8SChristoph Lameter 
23751a75a6c8SChristoph Lameter 	if (md->anon)
23761a75a6c8SChristoph Lameter 		seq_printf(m," anon=%lu",md->anon);
23771a75a6c8SChristoph Lameter 
2378397874dfSChristoph Lameter 	if (md->dirty)
2379397874dfSChristoph Lameter 		seq_printf(m," dirty=%lu",md->dirty);
2380397874dfSChristoph Lameter 
2381397874dfSChristoph Lameter 	if (md->pages != md->anon && md->pages != md->dirty)
2382397874dfSChristoph Lameter 		seq_printf(m, " mapped=%lu", md->pages);
2383397874dfSChristoph Lameter 
2384397874dfSChristoph Lameter 	if (md->mapcount_max > 1)
2385397874dfSChristoph Lameter 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2386397874dfSChristoph Lameter 
2387397874dfSChristoph Lameter 	if (md->swapcache)
2388397874dfSChristoph Lameter 		seq_printf(m," swapcache=%lu", md->swapcache);
2389397874dfSChristoph Lameter 
2390397874dfSChristoph Lameter 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2391397874dfSChristoph Lameter 		seq_printf(m," active=%lu", md->active);
2392397874dfSChristoph Lameter 
2393397874dfSChristoph Lameter 	if (md->writeback)
2394397874dfSChristoph Lameter 		seq_printf(m," writeback=%lu", md->writeback);
2395397874dfSChristoph Lameter 
239656bbd65dSChristoph Lameter 	for_each_node_state(n, N_HIGH_MEMORY)
23971a75a6c8SChristoph Lameter 		if (md->node[n])
23981a75a6c8SChristoph Lameter 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2399397874dfSChristoph Lameter out:
24001a75a6c8SChristoph Lameter 	seq_putc(m, '\n');
24011a75a6c8SChristoph Lameter 	kfree(md);
24021a75a6c8SChristoph Lameter 
24031a75a6c8SChristoph Lameter 	if (m->count < m->size)
240499f89551SEric W. Biederman 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
24051a75a6c8SChristoph Lameter 	return 0;
24061a75a6c8SChristoph Lameter }
2407