xref: /openbmc/linux/mm/mempolicy.c (revision 2291990ab36b4b2d8a81b1f92e7a046e51632a60)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/mm.h>
701da177e4SLinus Torvalds #include <linux/highmem.h>
711da177e4SLinus Torvalds #include <linux/hugetlb.h>
721da177e4SLinus Torvalds #include <linux/kernel.h>
731da177e4SLinus Torvalds #include <linux/sched.h>
741da177e4SLinus Torvalds #include <linux/nodemask.h>
751da177e4SLinus Torvalds #include <linux/cpuset.h>
761da177e4SLinus Torvalds #include <linux/gfp.h>
771da177e4SLinus Torvalds #include <linux/slab.h>
781da177e4SLinus Torvalds #include <linux/string.h>
791da177e4SLinus Torvalds #include <linux/module.h>
80b488893aSPavel Emelyanov #include <linux/nsproxy.h>
811da177e4SLinus Torvalds #include <linux/interrupt.h>
821da177e4SLinus Torvalds #include <linux/init.h>
831da177e4SLinus Torvalds #include <linux/compat.h>
84dc9aa5b9SChristoph Lameter #include <linux/swap.h>
851a75a6c8SChristoph Lameter #include <linux/seq_file.h>
861a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
87b20a3503SChristoph Lameter #include <linux/migrate.h>
8895a402c3SChristoph Lameter #include <linux/rmap.h>
8986c3a764SDavid Quigley #include <linux/security.h>
90dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
91dc9aa5b9SChristoph Lameter 
921da177e4SLinus Torvalds #include <asm/tlbflush.h>
931da177e4SLinus Torvalds #include <asm/uaccess.h>
941da177e4SLinus Torvalds 
9538e35860SChristoph Lameter /* Internal flags */
96dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
9738e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
981a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
99dc9aa5b9SChristoph Lameter 
100fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
101fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1021da177e4SLinus Torvalds 
1031da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1041da177e4SLinus Torvalds    policied. */
1056267276fSChristoph Lameter enum zone_type policy_zone = 0;
1061da177e4SLinus Torvalds 
107bea904d5SLee Schermerhorn /*
108bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
109bea904d5SLee Schermerhorn  */
110d42c6997SAndi Kleen struct mempolicy default_policy = {
1111da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
112bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
113fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1141da177e4SLinus Torvalds };
1151da177e4SLinus Torvalds 
11637012946SDavid Rientjes static const struct mempolicy_operations {
11737012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
11837012946SDavid Rientjes 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
11937012946SDavid Rientjes } mpol_ops[MPOL_MAX];
12037012946SDavid Rientjes 
12119770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
12237012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask)
1231da177e4SLinus Torvalds {
12419770b32SMel Gorman 	int nd, k;
1251da177e4SLinus Torvalds 
12619770b32SMel Gorman 	/* Check that there is something useful in this mask */
12719770b32SMel Gorman 	k = policy_zone;
12819770b32SMel Gorman 
12919770b32SMel Gorman 	for_each_node_mask(nd, *nodemask) {
13019770b32SMel Gorman 		struct zone *z;
13119770b32SMel Gorman 
13219770b32SMel Gorman 		for (k = 0; k <= policy_zone; k++) {
13319770b32SMel Gorman 			z = &NODE_DATA(nd)->node_zones[k];
134dd942ae3SAndi Kleen 			if (z->present_pages > 0)
13519770b32SMel Gorman 				return 1;
136dd942ae3SAndi Kleen 		}
137dd942ae3SAndi Kleen 	}
13819770b32SMel Gorman 
13919770b32SMel Gorman 	return 0;
1401da177e4SLinus Torvalds }
1411da177e4SLinus Torvalds 
142f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
143f5b087b5SDavid Rientjes {
1444c50bc01SDavid Rientjes 	return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
1454c50bc01SDavid Rientjes }
1464c50bc01SDavid Rientjes 
1474c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1484c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1494c50bc01SDavid Rientjes {
1504c50bc01SDavid Rientjes 	nodemask_t tmp;
1514c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1524c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
153f5b087b5SDavid Rientjes }
154f5b087b5SDavid Rientjes 
15537012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
15637012946SDavid Rientjes {
15737012946SDavid Rientjes 	if (nodes_empty(*nodes))
15837012946SDavid Rientjes 		return -EINVAL;
15937012946SDavid Rientjes 	pol->v.nodes = *nodes;
16037012946SDavid Rientjes 	return 0;
16137012946SDavid Rientjes }
16237012946SDavid Rientjes 
16337012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
16437012946SDavid Rientjes {
16537012946SDavid Rientjes 	if (!nodes)
166fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
16737012946SDavid Rientjes 	else if (nodes_empty(*nodes))
16837012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
16937012946SDavid Rientjes 	else
17037012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
17137012946SDavid Rientjes 	return 0;
17237012946SDavid Rientjes }
17337012946SDavid Rientjes 
17437012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
17537012946SDavid Rientjes {
17637012946SDavid Rientjes 	if (!is_valid_nodemask(nodes))
17737012946SDavid Rientjes 		return -EINVAL;
17837012946SDavid Rientjes 	pol->v.nodes = *nodes;
17937012946SDavid Rientjes 	return 0;
18037012946SDavid Rientjes }
18137012946SDavid Rientjes 
1821da177e4SLinus Torvalds /* Create a new policy */
183028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
184028fec41SDavid Rientjes 				  nodemask_t *nodes)
1851da177e4SLinus Torvalds {
1861da177e4SLinus Torvalds 	struct mempolicy *policy;
187f5b087b5SDavid Rientjes 	nodemask_t cpuset_context_nmask;
18837012946SDavid Rientjes 	int ret;
1891da177e4SLinus Torvalds 
190028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
191028fec41SDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
192140d5a49SPaul Mundt 
1933e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
1943e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
19537012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
196bea904d5SLee Schermerhorn 		return NULL;	/* simply delete any existing policy */
19737012946SDavid Rientjes 	}
1983e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
1993e1f0645SDavid Rientjes 
2003e1f0645SDavid Rientjes 	/*
2013e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2023e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2033e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2043e1f0645SDavid Rientjes 	 */
2053e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2063e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2073e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2083e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2093e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2103e1f0645SDavid Rientjes 			nodes = NULL;	/* flag local alloc */
2113e1f0645SDavid Rientjes 		}
2123e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2133e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2141da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2151da177e4SLinus Torvalds 	if (!policy)
2161da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2171da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
21845c4745aSLee Schermerhorn 	policy->mode = mode;
21937012946SDavid Rientjes 	policy->flags = flags;
2203e1f0645SDavid Rientjes 
2213e1f0645SDavid Rientjes 	if (nodes) {
2223e1f0645SDavid Rientjes 		/*
2233e1f0645SDavid Rientjes 		 * cpuset related setup doesn't apply to local allocation
2243e1f0645SDavid Rientjes 		 */
225f5b087b5SDavid Rientjes 		cpuset_update_task_memory_state();
2264c50bc01SDavid Rientjes 		if (flags & MPOL_F_RELATIVE_NODES)
2274c50bc01SDavid Rientjes 			mpol_relative_nodemask(&cpuset_context_nmask, nodes,
2284c50bc01SDavid Rientjes 					       &cpuset_current_mems_allowed);
2294c50bc01SDavid Rientjes 		else
2304c50bc01SDavid Rientjes 			nodes_and(cpuset_context_nmask, *nodes,
2314c50bc01SDavid Rientjes 				  cpuset_current_mems_allowed);
232f5b087b5SDavid Rientjes 		if (mpol_store_user_nodemask(policy))
233f5b087b5SDavid Rientjes 			policy->w.user_nodemask = *nodes;
234f5b087b5SDavid Rientjes 		else
23537012946SDavid Rientjes 			policy->w.cpuset_mems_allowed =
23637012946SDavid Rientjes 						cpuset_mems_allowed(current);
2371da177e4SLinus Torvalds 	}
2381da177e4SLinus Torvalds 
23937012946SDavid Rientjes 	ret = mpol_ops[mode].create(policy,
2403e1f0645SDavid Rientjes 				nodes ? &cpuset_context_nmask : NULL);
24137012946SDavid Rientjes 	if (ret < 0) {
24237012946SDavid Rientjes 		kmem_cache_free(policy_cache, policy);
24337012946SDavid Rientjes 		return ERR_PTR(ret);
24437012946SDavid Rientjes 	}
24537012946SDavid Rientjes 	return policy;
24637012946SDavid Rientjes }
24737012946SDavid Rientjes 
24852cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
24952cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
25052cd3b07SLee Schermerhorn {
25152cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
25252cd3b07SLee Schermerhorn 		return;
25352cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
25452cd3b07SLee Schermerhorn }
25552cd3b07SLee Schermerhorn 
25637012946SDavid Rientjes static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
25737012946SDavid Rientjes {
25837012946SDavid Rientjes }
25937012946SDavid Rientjes 
26037012946SDavid Rientjes static void mpol_rebind_nodemask(struct mempolicy *pol,
26137012946SDavid Rientjes 				 const nodemask_t *nodes)
2621d0d2680SDavid Rientjes {
2631d0d2680SDavid Rientjes 	nodemask_t tmp;
2641d0d2680SDavid Rientjes 
26537012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
26637012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
26737012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
26837012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
2691d0d2680SDavid Rientjes 	else {
27037012946SDavid Rientjes 		nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
27137012946SDavid Rientjes 			    *nodes);
27237012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
2731d0d2680SDavid Rientjes 	}
27437012946SDavid Rientjes 
2751d0d2680SDavid Rientjes 	pol->v.nodes = tmp;
2761d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
2771d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
2781d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
2791d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
2801d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
2811d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
2821d0d2680SDavid Rientjes 	}
28337012946SDavid Rientjes }
28437012946SDavid Rientjes 
28537012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
28637012946SDavid Rientjes 				  const nodemask_t *nodes)
28737012946SDavid Rientjes {
28837012946SDavid Rientjes 	nodemask_t tmp;
28937012946SDavid Rientjes 
29037012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
2911d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
2921d0d2680SDavid Rientjes 
293fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
2941d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
295fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
296fc36b8d3SLee Schermerhorn 		} else
297fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
29837012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
29937012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3001d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
301fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3021d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
30337012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
30437012946SDavid Rientjes 						   *nodes);
30537012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3061d0d2680SDavid Rientjes 	}
3071d0d2680SDavid Rientjes }
30837012946SDavid Rientjes 
30937012946SDavid Rientjes /* Migrate a policy to a different set of nodes */
31037012946SDavid Rientjes static void mpol_rebind_policy(struct mempolicy *pol,
31137012946SDavid Rientjes 			       const nodemask_t *newmask)
31237012946SDavid Rientjes {
31337012946SDavid Rientjes 	if (!pol)
31437012946SDavid Rientjes 		return;
31537012946SDavid Rientjes 	if (!mpol_store_user_nodemask(pol) &&
31637012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
31737012946SDavid Rientjes 		return;
31845c4745aSLee Schermerhorn 	mpol_ops[pol->mode].rebind(pol, newmask);
3191d0d2680SDavid Rientjes }
3201d0d2680SDavid Rientjes 
3211d0d2680SDavid Rientjes /*
3221d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3231d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
3241d0d2680SDavid Rientjes  */
3251d0d2680SDavid Rientjes 
3261d0d2680SDavid Rientjes void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3271d0d2680SDavid Rientjes {
3281d0d2680SDavid Rientjes 	mpol_rebind_policy(tsk->mempolicy, new);
3291d0d2680SDavid Rientjes }
3301d0d2680SDavid Rientjes 
3311d0d2680SDavid Rientjes /*
3321d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3331d0d2680SDavid Rientjes  *
3341d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
3351d0d2680SDavid Rientjes  */
3361d0d2680SDavid Rientjes 
3371d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3381d0d2680SDavid Rientjes {
3391d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
3401d0d2680SDavid Rientjes 
3411d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
3421d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
3431d0d2680SDavid Rientjes 		mpol_rebind_policy(vma->vm_policy, new);
3441d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
3451d0d2680SDavid Rientjes }
3461d0d2680SDavid Rientjes 
34737012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
34837012946SDavid Rientjes 	[MPOL_DEFAULT] = {
34937012946SDavid Rientjes 		.rebind = mpol_rebind_default,
35037012946SDavid Rientjes 	},
35137012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
35237012946SDavid Rientjes 		.create = mpol_new_interleave,
35337012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
35437012946SDavid Rientjes 	},
35537012946SDavid Rientjes 	[MPOL_PREFERRED] = {
35637012946SDavid Rientjes 		.create = mpol_new_preferred,
35737012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
35837012946SDavid Rientjes 	},
35937012946SDavid Rientjes 	[MPOL_BIND] = {
36037012946SDavid Rientjes 		.create = mpol_new_bind,
36137012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
36237012946SDavid Rientjes 	},
36337012946SDavid Rientjes };
36437012946SDavid Rientjes 
365397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty);
366fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
367fc301289SChristoph Lameter 				unsigned long flags);
3681a75a6c8SChristoph Lameter 
36938e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
370b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
371dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
372dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
37338e35860SChristoph Lameter 		void *private)
3741da177e4SLinus Torvalds {
37591612e0dSHugh Dickins 	pte_t *orig_pte;
37691612e0dSHugh Dickins 	pte_t *pte;
377705e87c0SHugh Dickins 	spinlock_t *ptl;
378941150a3SHugh Dickins 
379705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
38091612e0dSHugh Dickins 	do {
3816aab341eSLinus Torvalds 		struct page *page;
38225ba77c1SAndy Whitcroft 		int nid;
38391612e0dSHugh Dickins 
38491612e0dSHugh Dickins 		if (!pte_present(*pte))
38591612e0dSHugh Dickins 			continue;
3866aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
3876aab341eSLinus Torvalds 		if (!page)
38891612e0dSHugh Dickins 			continue;
389053837fcSNick Piggin 		/*
390053837fcSNick Piggin 		 * The check for PageReserved here is important to avoid
391053837fcSNick Piggin 		 * handling zero pages and other pages that may have been
392053837fcSNick Piggin 		 * marked special by the system.
393053837fcSNick Piggin 		 *
394053837fcSNick Piggin 		 * If the PageReserved would not be checked here then f.e.
395053837fcSNick Piggin 		 * the location of the zero page could have an influence
396053837fcSNick Piggin 		 * on MPOL_MF_STRICT, zero pages would be counted for
397053837fcSNick Piggin 		 * the per node stats, and there would be useless attempts
398053837fcSNick Piggin 		 * to put zero pages on the migration list.
399053837fcSNick Piggin 		 */
400f4598c8bSChristoph Lameter 		if (PageReserved(page))
401f4598c8bSChristoph Lameter 			continue;
4026aab341eSLinus Torvalds 		nid = page_to_nid(page);
40338e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
40438e35860SChristoph Lameter 			continue;
40538e35860SChristoph Lameter 
4061a75a6c8SChristoph Lameter 		if (flags & MPOL_MF_STATS)
407397874dfSChristoph Lameter 			gather_stats(page, private, pte_dirty(*pte));
408053837fcSNick Piggin 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
409fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
410dc9aa5b9SChristoph Lameter 		else
4111da177e4SLinus Torvalds 			break;
41291612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
413705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
41491612e0dSHugh Dickins 	return addr != end;
41591612e0dSHugh Dickins }
41691612e0dSHugh Dickins 
417b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
418dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
419dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
42038e35860SChristoph Lameter 		void *private)
42191612e0dSHugh Dickins {
42291612e0dSHugh Dickins 	pmd_t *pmd;
42391612e0dSHugh Dickins 	unsigned long next;
42491612e0dSHugh Dickins 
42591612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
42691612e0dSHugh Dickins 	do {
42791612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
42891612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
42991612e0dSHugh Dickins 			continue;
430dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
43138e35860SChristoph Lameter 				    flags, private))
43291612e0dSHugh Dickins 			return -EIO;
43391612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
43491612e0dSHugh Dickins 	return 0;
43591612e0dSHugh Dickins }
43691612e0dSHugh Dickins 
437b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
438dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
439dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
44038e35860SChristoph Lameter 		void *private)
44191612e0dSHugh Dickins {
44291612e0dSHugh Dickins 	pud_t *pud;
44391612e0dSHugh Dickins 	unsigned long next;
44491612e0dSHugh Dickins 
44591612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
44691612e0dSHugh Dickins 	do {
44791612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
44891612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
44991612e0dSHugh Dickins 			continue;
450dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
45138e35860SChristoph Lameter 				    flags, private))
45291612e0dSHugh Dickins 			return -EIO;
45391612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
45491612e0dSHugh Dickins 	return 0;
45591612e0dSHugh Dickins }
45691612e0dSHugh Dickins 
457b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
458dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
459dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
46038e35860SChristoph Lameter 		void *private)
46191612e0dSHugh Dickins {
46291612e0dSHugh Dickins 	pgd_t *pgd;
46391612e0dSHugh Dickins 	unsigned long next;
46491612e0dSHugh Dickins 
465b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
46691612e0dSHugh Dickins 	do {
46791612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
46891612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
46991612e0dSHugh Dickins 			continue;
470dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
47138e35860SChristoph Lameter 				    flags, private))
47291612e0dSHugh Dickins 			return -EIO;
47391612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
47491612e0dSHugh Dickins 	return 0;
4751da177e4SLinus Torvalds }
4761da177e4SLinus Torvalds 
477dc9aa5b9SChristoph Lameter /*
478dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
479dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
480dc9aa5b9SChristoph Lameter  * put them on the pagelist.
481dc9aa5b9SChristoph Lameter  */
4821da177e4SLinus Torvalds static struct vm_area_struct *
4831da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
48438e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
4851da177e4SLinus Torvalds {
4861da177e4SLinus Torvalds 	int err;
4871da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
4881da177e4SLinus Torvalds 
48990036ee5SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
49090036ee5SChristoph Lameter 
491b20a3503SChristoph Lameter 		err = migrate_prep();
492b20a3503SChristoph Lameter 		if (err)
493b20a3503SChristoph Lameter 			return ERR_PTR(err);
49490036ee5SChristoph Lameter 	}
495053837fcSNick Piggin 
4961da177e4SLinus Torvalds 	first = find_vma(mm, start);
4971da177e4SLinus Torvalds 	if (!first)
4981da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
4991da177e4SLinus Torvalds 	prev = NULL;
5001da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
501dc9aa5b9SChristoph Lameter 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
5021da177e4SLinus Torvalds 			if (!vma->vm_next && vma->vm_end < end)
5031da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
5041da177e4SLinus Torvalds 			if (prev && prev->vm_end < vma->vm_start)
5051da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
506dc9aa5b9SChristoph Lameter 		}
507dc9aa5b9SChristoph Lameter 		if (!is_vm_hugetlb_page(vma) &&
508dc9aa5b9SChristoph Lameter 		    ((flags & MPOL_MF_STRICT) ||
509dc9aa5b9SChristoph Lameter 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
510dc9aa5b9SChristoph Lameter 				vma_migratable(vma)))) {
5115b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
512dc9aa5b9SChristoph Lameter 
5135b952b3cSAndi Kleen 			if (endvma > end)
5145b952b3cSAndi Kleen 				endvma = end;
5155b952b3cSAndi Kleen 			if (vma->vm_start > start)
5165b952b3cSAndi Kleen 				start = vma->vm_start;
517dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
51838e35860SChristoph Lameter 						flags, private);
5191da177e4SLinus Torvalds 			if (err) {
5201da177e4SLinus Torvalds 				first = ERR_PTR(err);
5211da177e4SLinus Torvalds 				break;
5221da177e4SLinus Torvalds 			}
5231da177e4SLinus Torvalds 		}
5241da177e4SLinus Torvalds 		prev = vma;
5251da177e4SLinus Torvalds 	}
5261da177e4SLinus Torvalds 	return first;
5271da177e4SLinus Torvalds }
5281da177e4SLinus Torvalds 
5291da177e4SLinus Torvalds /* Apply policy to a single VMA */
5301da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
5311da177e4SLinus Torvalds {
5321da177e4SLinus Torvalds 	int err = 0;
5331da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
5341da177e4SLinus Torvalds 
535140d5a49SPaul Mundt 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
5361da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
5371da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
5381da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
5391da177e4SLinus Torvalds 
5401da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
5411da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
5421da177e4SLinus Torvalds 	if (!err) {
5431da177e4SLinus Torvalds 		mpol_get(new);
5441da177e4SLinus Torvalds 		vma->vm_policy = new;
545f0be3d32SLee Schermerhorn 		mpol_put(old);
5461da177e4SLinus Torvalds 	}
5471da177e4SLinus Torvalds 	return err;
5481da177e4SLinus Torvalds }
5491da177e4SLinus Torvalds 
5501da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
5511da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start,
5521da177e4SLinus Torvalds 		       unsigned long end, struct mempolicy *new)
5531da177e4SLinus Torvalds {
5541da177e4SLinus Torvalds 	struct vm_area_struct *next;
5551da177e4SLinus Torvalds 	int err;
5561da177e4SLinus Torvalds 
5571da177e4SLinus Torvalds 	err = 0;
5581da177e4SLinus Torvalds 	for (; vma && vma->vm_start < end; vma = next) {
5591da177e4SLinus Torvalds 		next = vma->vm_next;
5601da177e4SLinus Torvalds 		if (vma->vm_start < start)
5611da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, start, 1);
5621da177e4SLinus Torvalds 		if (!err && vma->vm_end > end)
5631da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, end, 0);
5641da177e4SLinus Torvalds 		if (!err)
5651da177e4SLinus Torvalds 			err = policy_vma(vma, new);
5661da177e4SLinus Torvalds 		if (err)
5671da177e4SLinus Torvalds 			break;
5681da177e4SLinus Torvalds 	}
5691da177e4SLinus Torvalds 	return err;
5701da177e4SLinus Torvalds }
5711da177e4SLinus Torvalds 
572c61afb18SPaul Jackson /*
573c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
574c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
575c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
576c61afb18SPaul Jackson  *
577c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
578c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
579c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
580c61afb18SPaul Jackson  *
581c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
582c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
583c61afb18SPaul Jackson  *
584c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
585c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
586c61afb18SPaul Jackson  * for use within this file.
587c61afb18SPaul Jackson  */
588c61afb18SPaul Jackson 
589c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
590c61afb18SPaul Jackson {
591c61afb18SPaul Jackson 	if (p->mempolicy)
592c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
593c61afb18SPaul Jackson 	else
594c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
595c61afb18SPaul Jackson }
596c61afb18SPaul Jackson 
597c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
598c61afb18SPaul Jackson {
599c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
600c61afb18SPaul Jackson }
601c61afb18SPaul Jackson 
6021da177e4SLinus Torvalds /* Set the process memory policy */
603028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
604028fec41SDavid Rientjes 			     nodemask_t *nodes)
6051da177e4SLinus Torvalds {
6061da177e4SLinus Torvalds 	struct mempolicy *new;
607f4e53d91SLee Schermerhorn 	struct mm_struct *mm = current->mm;
6081da177e4SLinus Torvalds 
609028fec41SDavid Rientjes 	new = mpol_new(mode, flags, nodes);
6101da177e4SLinus Torvalds 	if (IS_ERR(new))
6111da177e4SLinus Torvalds 		return PTR_ERR(new);
612f4e53d91SLee Schermerhorn 
613f4e53d91SLee Schermerhorn 	/*
614f4e53d91SLee Schermerhorn 	 * prevent changing our mempolicy while show_numa_maps()
615f4e53d91SLee Schermerhorn 	 * is using it.
616f4e53d91SLee Schermerhorn 	 * Note:  do_set_mempolicy() can be called at init time
617f4e53d91SLee Schermerhorn 	 * with no 'mm'.
618f4e53d91SLee Schermerhorn 	 */
619f4e53d91SLee Schermerhorn 	if (mm)
620f4e53d91SLee Schermerhorn 		down_write(&mm->mmap_sem);
621f0be3d32SLee Schermerhorn 	mpol_put(current->mempolicy);
6221da177e4SLinus Torvalds 	current->mempolicy = new;
623c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
62445c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
625f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
626dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
627f4e53d91SLee Schermerhorn 	if (mm)
628f4e53d91SLee Schermerhorn 		up_write(&mm->mmap_sem);
629f4e53d91SLee Schermerhorn 
6301da177e4SLinus Torvalds 	return 0;
6311da177e4SLinus Torvalds }
6321da177e4SLinus Torvalds 
633bea904d5SLee Schermerhorn /*
634bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
635bea904d5SLee Schermerhorn  */
636bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
6371da177e4SLinus Torvalds {
638dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
639bea904d5SLee Schermerhorn 	if (p == &default_policy)
640bea904d5SLee Schermerhorn 		return;
641bea904d5SLee Schermerhorn 
64245c4745aSLee Schermerhorn 	switch (p->mode) {
64319770b32SMel Gorman 	case MPOL_BIND:
64419770b32SMel Gorman 		/* Fall through */
6451da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
646dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
6471da177e4SLinus Torvalds 		break;
6481da177e4SLinus Torvalds 	case MPOL_PREFERRED:
649fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
650dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
65153f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
6521da177e4SLinus Torvalds 		break;
6531da177e4SLinus Torvalds 	default:
6541da177e4SLinus Torvalds 		BUG();
6551da177e4SLinus Torvalds 	}
6561da177e4SLinus Torvalds }
6571da177e4SLinus Torvalds 
6581da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
6591da177e4SLinus Torvalds {
6601da177e4SLinus Torvalds 	struct page *p;
6611da177e4SLinus Torvalds 	int err;
6621da177e4SLinus Torvalds 
6631da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
6641da177e4SLinus Torvalds 	if (err >= 0) {
6651da177e4SLinus Torvalds 		err = page_to_nid(p);
6661da177e4SLinus Torvalds 		put_page(p);
6671da177e4SLinus Torvalds 	}
6681da177e4SLinus Torvalds 	return err;
6691da177e4SLinus Torvalds }
6701da177e4SLinus Torvalds 
6711da177e4SLinus Torvalds /* Retrieve NUMA policy */
672dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
6731da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
6741da177e4SLinus Torvalds {
6758bccd85fSChristoph Lameter 	int err;
6761da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
6771da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
6781da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
6791da177e4SLinus Torvalds 
680cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
681754af6f5SLee Schermerhorn 	if (flags &
682754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
6831da177e4SLinus Torvalds 		return -EINVAL;
684754af6f5SLee Schermerhorn 
685754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
686754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
687754af6f5SLee Schermerhorn 			return -EINVAL;
688754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
689754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
690754af6f5SLee Schermerhorn 		return 0;
691754af6f5SLee Schermerhorn 	}
692754af6f5SLee Schermerhorn 
6931da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
694bea904d5SLee Schermerhorn 		/*
695bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
696bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
697bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
698bea904d5SLee Schermerhorn 		 */
6991da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
7001da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
7011da177e4SLinus Torvalds 		if (!vma) {
7021da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
7031da177e4SLinus Torvalds 			return -EFAULT;
7041da177e4SLinus Torvalds 		}
7051da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
7061da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
7071da177e4SLinus Torvalds 		else
7081da177e4SLinus Torvalds 			pol = vma->vm_policy;
7091da177e4SLinus Torvalds 	} else if (addr)
7101da177e4SLinus Torvalds 		return -EINVAL;
7111da177e4SLinus Torvalds 
7121da177e4SLinus Torvalds 	if (!pol)
713bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
7141da177e4SLinus Torvalds 
7151da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
7161da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
7171da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
7181da177e4SLinus Torvalds 			if (err < 0)
7191da177e4SLinus Torvalds 				goto out;
7208bccd85fSChristoph Lameter 			*policy = err;
7211da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
72245c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
7238bccd85fSChristoph Lameter 			*policy = current->il_next;
7241da177e4SLinus Torvalds 		} else {
7251da177e4SLinus Torvalds 			err = -EINVAL;
7261da177e4SLinus Torvalds 			goto out;
7271da177e4SLinus Torvalds 		}
728bea904d5SLee Schermerhorn 	} else {
729bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
730bea904d5SLee Schermerhorn 						pol->mode;
731bea904d5SLee Schermerhorn 		*policy |= pol->flags;
732bea904d5SLee Schermerhorn 	}
7331da177e4SLinus Torvalds 
7341da177e4SLinus Torvalds 	if (vma) {
7351da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
7361da177e4SLinus Torvalds 		vma = NULL;
7371da177e4SLinus Torvalds 	}
7381da177e4SLinus Torvalds 
7391da177e4SLinus Torvalds 	err = 0;
7408bccd85fSChristoph Lameter 	if (nmask)
741bea904d5SLee Schermerhorn 		get_policy_nodemask(pol, nmask);
7421da177e4SLinus Torvalds 
7431da177e4SLinus Torvalds  out:
74452cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
7451da177e4SLinus Torvalds 	if (vma)
7461da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
7471da177e4SLinus Torvalds 	return err;
7481da177e4SLinus Torvalds }
7491da177e4SLinus Torvalds 
750b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
7518bccd85fSChristoph Lameter /*
7526ce3c4c0SChristoph Lameter  * page migration
7536ce3c4c0SChristoph Lameter  */
754fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
755fc301289SChristoph Lameter 				unsigned long flags)
7566ce3c4c0SChristoph Lameter {
7576ce3c4c0SChristoph Lameter 	/*
758fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
7596ce3c4c0SChristoph Lameter 	 */
760b20a3503SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
761b20a3503SChristoph Lameter 		isolate_lru_page(page, pagelist);
7626ce3c4c0SChristoph Lameter }
7636ce3c4c0SChristoph Lameter 
764742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
76595a402c3SChristoph Lameter {
766769848c0SMel Gorman 	return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
76795a402c3SChristoph Lameter }
76895a402c3SChristoph Lameter 
7696ce3c4c0SChristoph Lameter /*
7707e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
7717e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
7727e2ab150SChristoph Lameter  */
773dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
774dbcb0f19SAdrian Bunk 			   int flags)
7757e2ab150SChristoph Lameter {
7767e2ab150SChristoph Lameter 	nodemask_t nmask;
7777e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
7787e2ab150SChristoph Lameter 	int err = 0;
7797e2ab150SChristoph Lameter 
7807e2ab150SChristoph Lameter 	nodes_clear(nmask);
7817e2ab150SChristoph Lameter 	node_set(source, nmask);
7827e2ab150SChristoph Lameter 
7837e2ab150SChristoph Lameter 	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
7847e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
7857e2ab150SChristoph Lameter 
7867e2ab150SChristoph Lameter 	if (!list_empty(&pagelist))
78795a402c3SChristoph Lameter 		err = migrate_pages(&pagelist, new_node_page, dest);
78895a402c3SChristoph Lameter 
7897e2ab150SChristoph Lameter 	return err;
7907e2ab150SChristoph Lameter }
7917e2ab150SChristoph Lameter 
7927e2ab150SChristoph Lameter /*
7937e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
7947e2ab150SChristoph Lameter  * layout as much as possible.
79539743889SChristoph Lameter  *
79639743889SChristoph Lameter  * Returns the number of page that could not be moved.
79739743889SChristoph Lameter  */
79839743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
79939743889SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
80039743889SChristoph Lameter {
80139743889SChristoph Lameter 	LIST_HEAD(pagelist);
8027e2ab150SChristoph Lameter 	int busy = 0;
8037e2ab150SChristoph Lameter 	int err = 0;
8047e2ab150SChristoph Lameter 	nodemask_t tmp;
80539743889SChristoph Lameter 
80639743889SChristoph Lameter 	down_read(&mm->mmap_sem);
807d4984711SChristoph Lameter 
8087b2259b3SChristoph Lameter 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
8097b2259b3SChristoph Lameter 	if (err)
8107b2259b3SChristoph Lameter 		goto out;
8117b2259b3SChristoph Lameter 
8127e2ab150SChristoph Lameter /*
8137e2ab150SChristoph Lameter  * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
8147e2ab150SChristoph Lameter  * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
8157e2ab150SChristoph Lameter  * bit in 'tmp', and return that <source, dest> pair for migration.
8167e2ab150SChristoph Lameter  * The pair of nodemasks 'to' and 'from' define the map.
8177e2ab150SChristoph Lameter  *
8187e2ab150SChristoph Lameter  * If no pair of bits is found that way, fallback to picking some
8197e2ab150SChristoph Lameter  * pair of 'source' and 'dest' bits that are not the same.  If the
8207e2ab150SChristoph Lameter  * 'source' and 'dest' bits are the same, this represents a node
8217e2ab150SChristoph Lameter  * that will be migrating to itself, so no pages need move.
8227e2ab150SChristoph Lameter  *
8237e2ab150SChristoph Lameter  * If no bits are left in 'tmp', or if all remaining bits left
8247e2ab150SChristoph Lameter  * in 'tmp' correspond to the same bit in 'to', return false
8257e2ab150SChristoph Lameter  * (nothing left to migrate).
8267e2ab150SChristoph Lameter  *
8277e2ab150SChristoph Lameter  * This lets us pick a pair of nodes to migrate between, such that
8287e2ab150SChristoph Lameter  * if possible the dest node is not already occupied by some other
8297e2ab150SChristoph Lameter  * source node, minimizing the risk of overloading the memory on a
8307e2ab150SChristoph Lameter  * node that would happen if we migrated incoming memory to a node
8317e2ab150SChristoph Lameter  * before migrating outgoing memory source that same node.
8327e2ab150SChristoph Lameter  *
8337e2ab150SChristoph Lameter  * A single scan of tmp is sufficient.  As we go, we remember the
8347e2ab150SChristoph Lameter  * most recent <s, d> pair that moved (s != d).  If we find a pair
8357e2ab150SChristoph Lameter  * that not only moved, but what's better, moved to an empty slot
8367e2ab150SChristoph Lameter  * (d is not set in tmp), then we break out then, with that pair.
8377e2ab150SChristoph Lameter  * Otherwise when we finish scannng from_tmp, we at least have the
8387e2ab150SChristoph Lameter  * most recent <s, d> pair that moved.  If we get all the way through
8397e2ab150SChristoph Lameter  * the scan of tmp without finding any node that moved, much less
8407e2ab150SChristoph Lameter  * moved to an empty node, then there is nothing left worth migrating.
8417e2ab150SChristoph Lameter  */
8427e2ab150SChristoph Lameter 
8437e2ab150SChristoph Lameter 	tmp = *from_nodes;
8447e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
8457e2ab150SChristoph Lameter 		int s,d;
8467e2ab150SChristoph Lameter 		int source = -1;
8477e2ab150SChristoph Lameter 		int dest = 0;
8487e2ab150SChristoph Lameter 
8497e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
8507e2ab150SChristoph Lameter 			d = node_remap(s, *from_nodes, *to_nodes);
8517e2ab150SChristoph Lameter 			if (s == d)
8527e2ab150SChristoph Lameter 				continue;
8537e2ab150SChristoph Lameter 
8547e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
8557e2ab150SChristoph Lameter 			dest = d;
8567e2ab150SChristoph Lameter 
8577e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
8587e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
8597e2ab150SChristoph Lameter 				break;
8607e2ab150SChristoph Lameter 		}
8617e2ab150SChristoph Lameter 		if (source == -1)
8627e2ab150SChristoph Lameter 			break;
8637e2ab150SChristoph Lameter 
8647e2ab150SChristoph Lameter 		node_clear(source, tmp);
8657e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
8667e2ab150SChristoph Lameter 		if (err > 0)
8677e2ab150SChristoph Lameter 			busy += err;
8687e2ab150SChristoph Lameter 		if (err < 0)
8697e2ab150SChristoph Lameter 			break;
87039743889SChristoph Lameter 	}
8717b2259b3SChristoph Lameter out:
87239743889SChristoph Lameter 	up_read(&mm->mmap_sem);
8737e2ab150SChristoph Lameter 	if (err < 0)
8747e2ab150SChristoph Lameter 		return err;
8757e2ab150SChristoph Lameter 	return busy;
876b20a3503SChristoph Lameter 
87739743889SChristoph Lameter }
87839743889SChristoph Lameter 
8793ad33b24SLee Schermerhorn /*
8803ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
8813ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
8823ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
8833ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
8843ad33b24SLee Schermerhorn  * is in virtual address order.
8853ad33b24SLee Schermerhorn  */
886742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
88795a402c3SChristoph Lameter {
88895a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
8893ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
89095a402c3SChristoph Lameter 
8913ad33b24SLee Schermerhorn 	while (vma) {
8923ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
8933ad33b24SLee Schermerhorn 		if (address != -EFAULT)
8943ad33b24SLee Schermerhorn 			break;
8953ad33b24SLee Schermerhorn 		vma = vma->vm_next;
8963ad33b24SLee Schermerhorn 	}
8973ad33b24SLee Schermerhorn 
8983ad33b24SLee Schermerhorn 	/*
8993ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
9003ad33b24SLee Schermerhorn 	 */
9013ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
90295a402c3SChristoph Lameter }
903b20a3503SChristoph Lameter #else
904b20a3503SChristoph Lameter 
905b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
906b20a3503SChristoph Lameter 				unsigned long flags)
907b20a3503SChristoph Lameter {
908b20a3503SChristoph Lameter }
909b20a3503SChristoph Lameter 
910b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
911b20a3503SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
912b20a3503SChristoph Lameter {
913b20a3503SChristoph Lameter 	return -ENOSYS;
914b20a3503SChristoph Lameter }
91595a402c3SChristoph Lameter 
91669939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
91795a402c3SChristoph Lameter {
91895a402c3SChristoph Lameter 	return NULL;
91995a402c3SChristoph Lameter }
920b20a3503SChristoph Lameter #endif
921b20a3503SChristoph Lameter 
922dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
923028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
924028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
9256ce3c4c0SChristoph Lameter {
9266ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
9276ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
9286ce3c4c0SChristoph Lameter 	struct mempolicy *new;
9296ce3c4c0SChristoph Lameter 	unsigned long end;
9306ce3c4c0SChristoph Lameter 	int err;
9316ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
9326ce3c4c0SChristoph Lameter 
933a3b51e01SDavid Rientjes 	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
9346ce3c4c0SChristoph Lameter 				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
9356ce3c4c0SChristoph Lameter 		return -EINVAL;
93674c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
9376ce3c4c0SChristoph Lameter 		return -EPERM;
9386ce3c4c0SChristoph Lameter 
9396ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
9406ce3c4c0SChristoph Lameter 		return -EINVAL;
9416ce3c4c0SChristoph Lameter 
9426ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
9436ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
9446ce3c4c0SChristoph Lameter 
9456ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
9466ce3c4c0SChristoph Lameter 	end = start + len;
9476ce3c4c0SChristoph Lameter 
9486ce3c4c0SChristoph Lameter 	if (end < start)
9496ce3c4c0SChristoph Lameter 		return -EINVAL;
9506ce3c4c0SChristoph Lameter 	if (end == start)
9516ce3c4c0SChristoph Lameter 		return 0;
9526ce3c4c0SChristoph Lameter 
953028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
9546ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
9556ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
9566ce3c4c0SChristoph Lameter 
9576ce3c4c0SChristoph Lameter 	/*
9586ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
9596ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
9606ce3c4c0SChristoph Lameter 	 */
9616ce3c4c0SChristoph Lameter 	if (!new)
9626ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
9636ce3c4c0SChristoph Lameter 
964028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
965028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
966028fec41SDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : -1);
9676ce3c4c0SChristoph Lameter 
9686ce3c4c0SChristoph Lameter 	down_write(&mm->mmap_sem);
9696ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
9706ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
9716ce3c4c0SChristoph Lameter 
9726ce3c4c0SChristoph Lameter 	err = PTR_ERR(vma);
9736ce3c4c0SChristoph Lameter 	if (!IS_ERR(vma)) {
9746ce3c4c0SChristoph Lameter 		int nr_failed = 0;
9756ce3c4c0SChristoph Lameter 
9766ce3c4c0SChristoph Lameter 		err = mbind_range(vma, start, end, new);
9777e2ab150SChristoph Lameter 
9786ce3c4c0SChristoph Lameter 		if (!list_empty(&pagelist))
97995a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
98095a402c3SChristoph Lameter 						(unsigned long)vma);
9816ce3c4c0SChristoph Lameter 
9826ce3c4c0SChristoph Lameter 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
9836ce3c4c0SChristoph Lameter 			err = -EIO;
9846ce3c4c0SChristoph Lameter 	}
985b20a3503SChristoph Lameter 
9866ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
987f0be3d32SLee Schermerhorn 	mpol_put(new);
9886ce3c4c0SChristoph Lameter 	return err;
9896ce3c4c0SChristoph Lameter }
9906ce3c4c0SChristoph Lameter 
99139743889SChristoph Lameter /*
9928bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
9938bccd85fSChristoph Lameter  */
9948bccd85fSChristoph Lameter 
9958bccd85fSChristoph Lameter /* Copy a node mask from user space. */
99639743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
9978bccd85fSChristoph Lameter 		     unsigned long maxnode)
9988bccd85fSChristoph Lameter {
9998bccd85fSChristoph Lameter 	unsigned long k;
10008bccd85fSChristoph Lameter 	unsigned long nlongs;
10018bccd85fSChristoph Lameter 	unsigned long endmask;
10028bccd85fSChristoph Lameter 
10038bccd85fSChristoph Lameter 	--maxnode;
10048bccd85fSChristoph Lameter 	nodes_clear(*nodes);
10058bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
10068bccd85fSChristoph Lameter 		return 0;
1007a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1008636f13c1SChris Wright 		return -EINVAL;
10098bccd85fSChristoph Lameter 
10108bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
10118bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
10128bccd85fSChristoph Lameter 		endmask = ~0UL;
10138bccd85fSChristoph Lameter 	else
10148bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
10158bccd85fSChristoph Lameter 
10168bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
10178bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
10188bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
10198bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
10208bccd85fSChristoph Lameter 			return -EINVAL;
10218bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
10228bccd85fSChristoph Lameter 			unsigned long t;
10238bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
10248bccd85fSChristoph Lameter 				return -EFAULT;
10258bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
10268bccd85fSChristoph Lameter 				if (t & endmask)
10278bccd85fSChristoph Lameter 					return -EINVAL;
10288bccd85fSChristoph Lameter 			} else if (t)
10298bccd85fSChristoph Lameter 				return -EINVAL;
10308bccd85fSChristoph Lameter 		}
10318bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
10328bccd85fSChristoph Lameter 		endmask = ~0UL;
10338bccd85fSChristoph Lameter 	}
10348bccd85fSChristoph Lameter 
10358bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
10368bccd85fSChristoph Lameter 		return -EFAULT;
10378bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
10388bccd85fSChristoph Lameter 	return 0;
10398bccd85fSChristoph Lameter }
10408bccd85fSChristoph Lameter 
10418bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
10428bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
10438bccd85fSChristoph Lameter 			      nodemask_t *nodes)
10448bccd85fSChristoph Lameter {
10458bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
10468bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
10478bccd85fSChristoph Lameter 
10488bccd85fSChristoph Lameter 	if (copy > nbytes) {
10498bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
10508bccd85fSChristoph Lameter 			return -EINVAL;
10518bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
10528bccd85fSChristoph Lameter 			return -EFAULT;
10538bccd85fSChristoph Lameter 		copy = nbytes;
10548bccd85fSChristoph Lameter 	}
10558bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
10568bccd85fSChristoph Lameter }
10578bccd85fSChristoph Lameter 
10588bccd85fSChristoph Lameter asmlinkage long sys_mbind(unsigned long start, unsigned long len,
10598bccd85fSChristoph Lameter 			unsigned long mode,
10608bccd85fSChristoph Lameter 			unsigned long __user *nmask, unsigned long maxnode,
10618bccd85fSChristoph Lameter 			unsigned flags)
10628bccd85fSChristoph Lameter {
10638bccd85fSChristoph Lameter 	nodemask_t nodes;
10648bccd85fSChristoph Lameter 	int err;
1065028fec41SDavid Rientjes 	unsigned short mode_flags;
10668bccd85fSChristoph Lameter 
1067028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1068028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1069a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1070a3b51e01SDavid Rientjes 		return -EINVAL;
10714c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
10724c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
10734c50bc01SDavid Rientjes 		return -EINVAL;
10748bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
10758bccd85fSChristoph Lameter 	if (err)
10768bccd85fSChristoph Lameter 		return err;
1077028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
10788bccd85fSChristoph Lameter }
10798bccd85fSChristoph Lameter 
10808bccd85fSChristoph Lameter /* Set the process memory policy */
10818bccd85fSChristoph Lameter asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
10828bccd85fSChristoph Lameter 		unsigned long maxnode)
10838bccd85fSChristoph Lameter {
10848bccd85fSChristoph Lameter 	int err;
10858bccd85fSChristoph Lameter 	nodemask_t nodes;
1086028fec41SDavid Rientjes 	unsigned short flags;
10878bccd85fSChristoph Lameter 
1088028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1089028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1090028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
10918bccd85fSChristoph Lameter 		return -EINVAL;
10924c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
10934c50bc01SDavid Rientjes 		return -EINVAL;
10948bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
10958bccd85fSChristoph Lameter 	if (err)
10968bccd85fSChristoph Lameter 		return err;
1097028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
10988bccd85fSChristoph Lameter }
10998bccd85fSChristoph Lameter 
110039743889SChristoph Lameter asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
110139743889SChristoph Lameter 		const unsigned long __user *old_nodes,
110239743889SChristoph Lameter 		const unsigned long __user *new_nodes)
110339743889SChristoph Lameter {
110439743889SChristoph Lameter 	struct mm_struct *mm;
110539743889SChristoph Lameter 	struct task_struct *task;
110639743889SChristoph Lameter 	nodemask_t old;
110739743889SChristoph Lameter 	nodemask_t new;
110839743889SChristoph Lameter 	nodemask_t task_nodes;
110939743889SChristoph Lameter 	int err;
111039743889SChristoph Lameter 
111139743889SChristoph Lameter 	err = get_nodes(&old, old_nodes, maxnode);
111239743889SChristoph Lameter 	if (err)
111339743889SChristoph Lameter 		return err;
111439743889SChristoph Lameter 
111539743889SChristoph Lameter 	err = get_nodes(&new, new_nodes, maxnode);
111639743889SChristoph Lameter 	if (err)
111739743889SChristoph Lameter 		return err;
111839743889SChristoph Lameter 
111939743889SChristoph Lameter 	/* Find the mm_struct */
112039743889SChristoph Lameter 	read_lock(&tasklist_lock);
1121228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
112239743889SChristoph Lameter 	if (!task) {
112339743889SChristoph Lameter 		read_unlock(&tasklist_lock);
112439743889SChristoph Lameter 		return -ESRCH;
112539743889SChristoph Lameter 	}
112639743889SChristoph Lameter 	mm = get_task_mm(task);
112739743889SChristoph Lameter 	read_unlock(&tasklist_lock);
112839743889SChristoph Lameter 
112939743889SChristoph Lameter 	if (!mm)
113039743889SChristoph Lameter 		return -EINVAL;
113139743889SChristoph Lameter 
113239743889SChristoph Lameter 	/*
113339743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
113439743889SChristoph Lameter 	 * process. The right exists if the process has administrative
11357f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
113639743889SChristoph Lameter 	 * userid as the target process.
113739743889SChristoph Lameter 	 */
113839743889SChristoph Lameter 	if ((current->euid != task->suid) && (current->euid != task->uid) &&
113939743889SChristoph Lameter 	    (current->uid != task->suid) && (current->uid != task->uid) &&
114074c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
114139743889SChristoph Lameter 		err = -EPERM;
114239743889SChristoph Lameter 		goto out;
114339743889SChristoph Lameter 	}
114439743889SChristoph Lameter 
114539743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
114639743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
114774c00241SChristoph Lameter 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
114839743889SChristoph Lameter 		err = -EPERM;
114939743889SChristoph Lameter 		goto out;
115039743889SChristoph Lameter 	}
115139743889SChristoph Lameter 
115237b07e41SLee Schermerhorn 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
11533b42d28bSChristoph Lameter 		err = -EINVAL;
11543b42d28bSChristoph Lameter 		goto out;
11553b42d28bSChristoph Lameter 	}
11563b42d28bSChristoph Lameter 
115786c3a764SDavid Quigley 	err = security_task_movememory(task);
115886c3a764SDavid Quigley 	if (err)
115986c3a764SDavid Quigley 		goto out;
116086c3a764SDavid Quigley 
1161511030bcSChristoph Lameter 	err = do_migrate_pages(mm, &old, &new,
116274c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
116339743889SChristoph Lameter out:
116439743889SChristoph Lameter 	mmput(mm);
116539743889SChristoph Lameter 	return err;
116639743889SChristoph Lameter }
116739743889SChristoph Lameter 
116839743889SChristoph Lameter 
11698bccd85fSChristoph Lameter /* Retrieve NUMA policy */
11708bccd85fSChristoph Lameter asmlinkage long sys_get_mempolicy(int __user *policy,
11718bccd85fSChristoph Lameter 				unsigned long __user *nmask,
11728bccd85fSChristoph Lameter 				unsigned long maxnode,
11738bccd85fSChristoph Lameter 				unsigned long addr, unsigned long flags)
11748bccd85fSChristoph Lameter {
1175dbcb0f19SAdrian Bunk 	int err;
1176dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
11778bccd85fSChristoph Lameter 	nodemask_t nodes;
11788bccd85fSChristoph Lameter 
11798bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
11808bccd85fSChristoph Lameter 		return -EINVAL;
11818bccd85fSChristoph Lameter 
11828bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
11838bccd85fSChristoph Lameter 
11848bccd85fSChristoph Lameter 	if (err)
11858bccd85fSChristoph Lameter 		return err;
11868bccd85fSChristoph Lameter 
11878bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
11888bccd85fSChristoph Lameter 		return -EFAULT;
11898bccd85fSChristoph Lameter 
11908bccd85fSChristoph Lameter 	if (nmask)
11918bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
11928bccd85fSChristoph Lameter 
11938bccd85fSChristoph Lameter 	return err;
11948bccd85fSChristoph Lameter }
11958bccd85fSChristoph Lameter 
11961da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
11971da177e4SLinus Torvalds 
11981da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
11991da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
12001da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
12011da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
12021da177e4SLinus Torvalds {
12031da177e4SLinus Torvalds 	long err;
12041da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
12051da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
12061da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
12071da177e4SLinus Torvalds 
12081da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
12091da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
12101da177e4SLinus Torvalds 
12111da177e4SLinus Torvalds 	if (nmask)
12121da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
12131da177e4SLinus Torvalds 
12141da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
12151da177e4SLinus Torvalds 
12161da177e4SLinus Torvalds 	if (!err && nmask) {
12171da177e4SLinus Torvalds 		err = copy_from_user(bm, nm, alloc_size);
12181da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
12191da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
12201da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
12211da177e4SLinus Torvalds 	}
12221da177e4SLinus Torvalds 
12231da177e4SLinus Torvalds 	return err;
12241da177e4SLinus Torvalds }
12251da177e4SLinus Torvalds 
12261da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
12271da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
12281da177e4SLinus Torvalds {
12291da177e4SLinus Torvalds 	long err = 0;
12301da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
12311da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
12321da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
12331da177e4SLinus Torvalds 
12341da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
12351da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
12361da177e4SLinus Torvalds 
12371da177e4SLinus Torvalds 	if (nmask) {
12381da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
12391da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
12401da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
12411da177e4SLinus Torvalds 	}
12421da177e4SLinus Torvalds 
12431da177e4SLinus Torvalds 	if (err)
12441da177e4SLinus Torvalds 		return -EFAULT;
12451da177e4SLinus Torvalds 
12461da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
12471da177e4SLinus Torvalds }
12481da177e4SLinus Torvalds 
12491da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
12501da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
12511da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
12521da177e4SLinus Torvalds {
12531da177e4SLinus Torvalds 	long err = 0;
12541da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
12551da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1256dfcd3c0dSAndi Kleen 	nodemask_t bm;
12571da177e4SLinus Torvalds 
12581da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
12591da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
12601da177e4SLinus Torvalds 
12611da177e4SLinus Torvalds 	if (nmask) {
1262dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
12631da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1264dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
12651da177e4SLinus Torvalds 	}
12661da177e4SLinus Torvalds 
12671da177e4SLinus Torvalds 	if (err)
12681da177e4SLinus Torvalds 		return -EFAULT;
12691da177e4SLinus Torvalds 
12701da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
12711da177e4SLinus Torvalds }
12721da177e4SLinus Torvalds 
12731da177e4SLinus Torvalds #endif
12741da177e4SLinus Torvalds 
1275480eccf9SLee Schermerhorn /*
1276480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1277480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1278480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1279480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1280480eccf9SLee Schermerhorn  *
1281480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1282480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
128352cd3b07SLee Schermerhorn  * Current or other task's task mempolicy and non-shared vma policies
128452cd3b07SLee Schermerhorn  * are protected by the task's mmap_sem, which must be held for read by
128552cd3b07SLee Schermerhorn  * the caller.
128652cd3b07SLee Schermerhorn  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
128752cd3b07SLee Schermerhorn  * count--added by the get_policy() vm_op, as appropriate--to protect against
128852cd3b07SLee Schermerhorn  * freeing by another task.  It is the caller's responsibility to free the
128952cd3b07SLee Schermerhorn  * extra reference for shared policies.
1290480eccf9SLee Schermerhorn  */
129148fce342SChristoph Lameter static struct mempolicy *get_vma_policy(struct task_struct *task,
129248fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
12931da177e4SLinus Torvalds {
12946e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
12951da177e4SLinus Torvalds 
12961da177e4SLinus Torvalds 	if (vma) {
1297480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1298ae4d8c16SLee Schermerhorn 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1299ae4d8c16SLee Schermerhorn 									addr);
1300ae4d8c16SLee Schermerhorn 			if (vpol)
1301ae4d8c16SLee Schermerhorn 				pol = vpol;
1302bea904d5SLee Schermerhorn 		} else if (vma->vm_policy)
13031da177e4SLinus Torvalds 			pol = vma->vm_policy;
13041da177e4SLinus Torvalds 	}
13051da177e4SLinus Torvalds 	if (!pol)
13061da177e4SLinus Torvalds 		pol = &default_policy;
13071da177e4SLinus Torvalds 	return pol;
13081da177e4SLinus Torvalds }
13091da177e4SLinus Torvalds 
131052cd3b07SLee Schermerhorn /*
131152cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
131252cd3b07SLee Schermerhorn  * page allocation
131352cd3b07SLee Schermerhorn  */
131452cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
131519770b32SMel Gorman {
131619770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
131745c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
131819770b32SMel Gorman 			gfp_zone(gfp) >= policy_zone &&
131919770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
132019770b32SMel Gorman 		return &policy->v.nodes;
132119770b32SMel Gorman 
132219770b32SMel Gorman 	return NULL;
132319770b32SMel Gorman }
132419770b32SMel Gorman 
132552cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
132652cd3b07SLee Schermerhorn static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
13271da177e4SLinus Torvalds {
1328fc36b8d3SLee Schermerhorn 	int nd = numa_node_id();
13291da177e4SLinus Torvalds 
133045c4745aSLee Schermerhorn 	switch (policy->mode) {
13311da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1332fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
13331da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
13341da177e4SLinus Torvalds 		break;
13351da177e4SLinus Torvalds 	case MPOL_BIND:
133619770b32SMel Gorman 		/*
133752cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
133852cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
133952cd3b07SLee Schermerhorn 		 * current node is part of the mask, we use the zonelist for
134052cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
134119770b32SMel Gorman 		 */
134219770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
134319770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
134419770b32SMel Gorman 			nd = first_node(policy->v.nodes);
134519770b32SMel Gorman 		break;
13461da177e4SLinus Torvalds 	case MPOL_INTERLEAVE: /* should not happen */
13471da177e4SLinus Torvalds 		break;
13481da177e4SLinus Torvalds 	default:
13491da177e4SLinus Torvalds 		BUG();
13501da177e4SLinus Torvalds 	}
13510e88460dSMel Gorman 	return node_zonelist(nd, gfp);
13521da177e4SLinus Torvalds }
13531da177e4SLinus Torvalds 
13541da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
13551da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
13561da177e4SLinus Torvalds {
13571da177e4SLinus Torvalds 	unsigned nid, next;
13581da177e4SLinus Torvalds 	struct task_struct *me = current;
13591da177e4SLinus Torvalds 
13601da177e4SLinus Torvalds 	nid = me->il_next;
1361dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
13621da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1363dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1364f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
13651da177e4SLinus Torvalds 		me->il_next = next;
13661da177e4SLinus Torvalds 	return nid;
13671da177e4SLinus Torvalds }
13681da177e4SLinus Torvalds 
1369dc85da15SChristoph Lameter /*
1370dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1371dc85da15SChristoph Lameter  * next slab entry.
137252cd3b07SLee Schermerhorn  * @policy must be protected by freeing by the caller.  If @policy is
137352cd3b07SLee Schermerhorn  * the current task's mempolicy, this protection is implicit, as only the
137452cd3b07SLee Schermerhorn  * task can change it's policy.  The system default policy requires no
137552cd3b07SLee Schermerhorn  * such protection.
1376dc85da15SChristoph Lameter  */
1377dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy)
1378dc85da15SChristoph Lameter {
1379fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
1380bea904d5SLee Schermerhorn 		return numa_node_id();
1381765c4507SChristoph Lameter 
1382bea904d5SLee Schermerhorn 	switch (policy->mode) {
1383bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1384fc36b8d3SLee Schermerhorn 		/*
1385fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1386fc36b8d3SLee Schermerhorn 		 */
1387bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1388bea904d5SLee Schermerhorn 
1389dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1390dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1391dc85da15SChristoph Lameter 
1392dd1a239fSMel Gorman 	case MPOL_BIND: {
1393dc85da15SChristoph Lameter 		/*
1394dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1395dc85da15SChristoph Lameter 		 * first node.
1396dc85da15SChristoph Lameter 		 */
139719770b32SMel Gorman 		struct zonelist *zonelist;
139819770b32SMel Gorman 		struct zone *zone;
139919770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
140019770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
140119770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
140219770b32SMel Gorman 							&policy->v.nodes,
140319770b32SMel Gorman 							&zone);
140419770b32SMel Gorman 		return zone->node;
1405dd1a239fSMel Gorman 	}
1406dc85da15SChristoph Lameter 
1407dc85da15SChristoph Lameter 	default:
1408bea904d5SLee Schermerhorn 		BUG();
1409dc85da15SChristoph Lameter 	}
1410dc85da15SChristoph Lameter }
1411dc85da15SChristoph Lameter 
14121da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
14131da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
14141da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
14151da177e4SLinus Torvalds {
1416dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1417f5b087b5SDavid Rientjes 	unsigned target;
14181da177e4SLinus Torvalds 	int c;
14191da177e4SLinus Torvalds 	int nid = -1;
14201da177e4SLinus Torvalds 
1421f5b087b5SDavid Rientjes 	if (!nnodes)
1422f5b087b5SDavid Rientjes 		return numa_node_id();
1423f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
14241da177e4SLinus Torvalds 	c = 0;
14251da177e4SLinus Torvalds 	do {
1426dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
14271da177e4SLinus Torvalds 		c++;
14281da177e4SLinus Torvalds 	} while (c <= target);
14291da177e4SLinus Torvalds 	return nid;
14301da177e4SLinus Torvalds }
14311da177e4SLinus Torvalds 
14325da7ca86SChristoph Lameter /* Determine a node number for interleave */
14335da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
14345da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
14355da7ca86SChristoph Lameter {
14365da7ca86SChristoph Lameter 	if (vma) {
14375da7ca86SChristoph Lameter 		unsigned long off;
14385da7ca86SChristoph Lameter 
14393b98b087SNishanth Aravamudan 		/*
14403b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
14413b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
14423b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
14433b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
14443b98b087SNishanth Aravamudan 		 * a useful offset.
14453b98b087SNishanth Aravamudan 		 */
14463b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
14473b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
14485da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
14495da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
14505da7ca86SChristoph Lameter 	} else
14515da7ca86SChristoph Lameter 		return interleave_nodes(pol);
14525da7ca86SChristoph Lameter }
14535da7ca86SChristoph Lameter 
145400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1455480eccf9SLee Schermerhorn /*
1456480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1457480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1458480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1459480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
146019770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
146119770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1462480eccf9SLee Schermerhorn  *
146352cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
146452cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
146552cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
146652cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1467480eccf9SLee Schermerhorn  */
1468396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
146919770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
147019770b32SMel Gorman 				nodemask_t **nodemask)
14715da7ca86SChristoph Lameter {
1472480eccf9SLee Schermerhorn 	struct zonelist *zl;
14735da7ca86SChristoph Lameter 
147452cd3b07SLee Schermerhorn 	*mpol = get_vma_policy(current, vma, addr);
147519770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
14765da7ca86SChristoph Lameter 
147752cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
147852cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
147952cd3b07SLee Schermerhorn 						HPAGE_SHIFT), gfp_flags);
148052cd3b07SLee Schermerhorn 	} else {
148152cd3b07SLee Schermerhorn 		zl = policy_zonelist(gfp_flags, *mpol);
148252cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
148352cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1484480eccf9SLee Schermerhorn 	}
1485480eccf9SLee Schermerhorn 	return zl;
14865da7ca86SChristoph Lameter }
148700ac59adSChen, Kenneth W #endif
14885da7ca86SChristoph Lameter 
14891da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
14901da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1491662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1492662f3a0bSAndi Kleen 					unsigned nid)
14931da177e4SLinus Torvalds {
14941da177e4SLinus Torvalds 	struct zonelist *zl;
14951da177e4SLinus Torvalds 	struct page *page;
14961da177e4SLinus Torvalds 
14970e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
14981da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1499dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1500ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
15011da177e4SLinus Torvalds 	return page;
15021da177e4SLinus Torvalds }
15031da177e4SLinus Torvalds 
15041da177e4SLinus Torvalds /**
15051da177e4SLinus Torvalds  * 	alloc_page_vma	- Allocate a page for a VMA.
15061da177e4SLinus Torvalds  *
15071da177e4SLinus Torvalds  * 	@gfp:
15081da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
15091da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
15101da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
15111da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
15121da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
15131da177e4SLinus Torvalds  *
15141da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
15151da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
15161da177e4SLinus Torvalds  *
15171da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
15181da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
15191da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
15201da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
15211da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
15221da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
15231da177e4SLinus Torvalds  *
15241da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
15251da177e4SLinus Torvalds  */
15261da177e4SLinus Torvalds struct page *
1527dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
15281da177e4SLinus Torvalds {
15296e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1530480eccf9SLee Schermerhorn 	struct zonelist *zl;
15311da177e4SLinus Torvalds 
1532cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
15331da177e4SLinus Torvalds 
153445c4745aSLee Schermerhorn 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
15351da177e4SLinus Torvalds 		unsigned nid;
15365da7ca86SChristoph Lameter 
15375da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
153852cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
15391da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, 0, nid);
15401da177e4SLinus Torvalds 	}
154152cd3b07SLee Schermerhorn 	zl = policy_zonelist(gfp, pol);
154252cd3b07SLee Schermerhorn 	if (unlikely(mpol_needs_cond_ref(pol))) {
1543480eccf9SLee Schermerhorn 		/*
154452cd3b07SLee Schermerhorn 		 * slow path: ref counted shared policy
1545480eccf9SLee Schermerhorn 		 */
154619770b32SMel Gorman 		struct page *page =  __alloc_pages_nodemask(gfp, 0,
154752cd3b07SLee Schermerhorn 						zl, policy_nodemask(gfp, pol));
1548f0be3d32SLee Schermerhorn 		__mpol_put(pol);
1549480eccf9SLee Schermerhorn 		return page;
1550480eccf9SLee Schermerhorn 	}
1551480eccf9SLee Schermerhorn 	/*
1552480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
1553480eccf9SLee Schermerhorn 	 */
155452cd3b07SLee Schermerhorn 	return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
15551da177e4SLinus Torvalds }
15561da177e4SLinus Torvalds 
15571da177e4SLinus Torvalds /**
15581da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
15591da177e4SLinus Torvalds  *
15601da177e4SLinus Torvalds  *	@gfp:
15611da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
15621da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
15631da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
15641da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
15651da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
15661da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
15671da177e4SLinus Torvalds  *
15681da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
15691da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
15701da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
15711da177e4SLinus Torvalds  *
1572cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
15731da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
15741da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
15751da177e4SLinus Torvalds  */
1576dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
15771da177e4SLinus Torvalds {
15781da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
15791da177e4SLinus Torvalds 
15801da177e4SLinus Torvalds 	if ((gfp & __GFP_WAIT) && !in_interrupt())
1581cf2a473cSPaul Jackson 		cpuset_update_task_memory_state();
15829b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
15831da177e4SLinus Torvalds 		pol = &default_policy;
158452cd3b07SLee Schermerhorn 
158552cd3b07SLee Schermerhorn 	/*
158652cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
158752cd3b07SLee Schermerhorn 	 * nor system default_policy
158852cd3b07SLee Schermerhorn 	 */
158945c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
15901da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
159119770b32SMel Gorman 	return __alloc_pages_nodemask(gfp, order,
159252cd3b07SLee Schermerhorn 			policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
15931da177e4SLinus Torvalds }
15941da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
15951da177e4SLinus Torvalds 
15964225399aSPaul Jackson /*
1597846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
15984225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
15994225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
16004225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
16014225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
16024225399aSPaul Jackson  */
16034225399aSPaul Jackson 
1604846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
1605846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
16061da177e4SLinus Torvalds {
16071da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
16081da177e4SLinus Torvalds 
16091da177e4SLinus Torvalds 	if (!new)
16101da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
16114225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
16124225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
16134225399aSPaul Jackson 		mpol_rebind_policy(old, &mems);
16144225399aSPaul Jackson 	}
16151da177e4SLinus Torvalds 	*new = *old;
16161da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
16171da177e4SLinus Torvalds 	return new;
16181da177e4SLinus Torvalds }
16191da177e4SLinus Torvalds 
162052cd3b07SLee Schermerhorn /*
162152cd3b07SLee Schermerhorn  * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
162252cd3b07SLee Schermerhorn  * eliminate the * MPOL_F_* flags that require conditional ref and
162352cd3b07SLee Schermerhorn  * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
162452cd3b07SLee Schermerhorn  * after return.  Use the returned value.
162552cd3b07SLee Schermerhorn  *
162652cd3b07SLee Schermerhorn  * Allows use of a mempolicy for, e.g., multiple allocations with a single
162752cd3b07SLee Schermerhorn  * policy lookup, even if the policy needs/has extra ref on lookup.
162852cd3b07SLee Schermerhorn  * shmem_readahead needs this.
162952cd3b07SLee Schermerhorn  */
163052cd3b07SLee Schermerhorn struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
163152cd3b07SLee Schermerhorn 						struct mempolicy *frompol)
163252cd3b07SLee Schermerhorn {
163352cd3b07SLee Schermerhorn 	if (!mpol_needs_cond_ref(frompol))
163452cd3b07SLee Schermerhorn 		return frompol;
163552cd3b07SLee Schermerhorn 
163652cd3b07SLee Schermerhorn 	*tompol = *frompol;
163752cd3b07SLee Schermerhorn 	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
163852cd3b07SLee Schermerhorn 	__mpol_put(frompol);
163952cd3b07SLee Schermerhorn 	return tompol;
164052cd3b07SLee Schermerhorn }
164152cd3b07SLee Schermerhorn 
1642f5b087b5SDavid Rientjes static int mpol_match_intent(const struct mempolicy *a,
1643f5b087b5SDavid Rientjes 			     const struct mempolicy *b)
1644f5b087b5SDavid Rientjes {
1645f5b087b5SDavid Rientjes 	if (a->flags != b->flags)
1646f5b087b5SDavid Rientjes 		return 0;
1647f5b087b5SDavid Rientjes 	if (!mpol_store_user_nodemask(a))
1648f5b087b5SDavid Rientjes 		return 1;
1649f5b087b5SDavid Rientjes 	return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1650f5b087b5SDavid Rientjes }
1651f5b087b5SDavid Rientjes 
16521da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
16531da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
16541da177e4SLinus Torvalds {
16551da177e4SLinus Torvalds 	if (!a || !b)
16561da177e4SLinus Torvalds 		return 0;
165745c4745aSLee Schermerhorn 	if (a->mode != b->mode)
16581da177e4SLinus Torvalds 		return 0;
165945c4745aSLee Schermerhorn 	if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
1660f5b087b5SDavid Rientjes 		return 0;
166145c4745aSLee Schermerhorn 	switch (a->mode) {
166219770b32SMel Gorman 	case MPOL_BIND:
166319770b32SMel Gorman 		/* Fall through */
16641da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1665dfcd3c0dSAndi Kleen 		return nodes_equal(a->v.nodes, b->v.nodes);
16661da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1667fc36b8d3SLee Schermerhorn 		return a->v.preferred_node == b->v.preferred_node &&
1668fc36b8d3SLee Schermerhorn 			a->flags == b->flags;
16691da177e4SLinus Torvalds 	default:
16701da177e4SLinus Torvalds 		BUG();
16711da177e4SLinus Torvalds 		return 0;
16721da177e4SLinus Torvalds 	}
16731da177e4SLinus Torvalds }
16741da177e4SLinus Torvalds 
16751da177e4SLinus Torvalds /*
16761da177e4SLinus Torvalds  * Shared memory backing store policy support.
16771da177e4SLinus Torvalds  *
16781da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
16791da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
16801da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
16811da177e4SLinus Torvalds  * for any accesses to the tree.
16821da177e4SLinus Torvalds  */
16831da177e4SLinus Torvalds 
16841da177e4SLinus Torvalds /* lookup first element intersecting start-end */
16851da177e4SLinus Torvalds /* Caller holds sp->lock */
16861da177e4SLinus Torvalds static struct sp_node *
16871da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
16881da177e4SLinus Torvalds {
16891da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
16901da177e4SLinus Torvalds 
16911da177e4SLinus Torvalds 	while (n) {
16921da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
16931da177e4SLinus Torvalds 
16941da177e4SLinus Torvalds 		if (start >= p->end)
16951da177e4SLinus Torvalds 			n = n->rb_right;
16961da177e4SLinus Torvalds 		else if (end <= p->start)
16971da177e4SLinus Torvalds 			n = n->rb_left;
16981da177e4SLinus Torvalds 		else
16991da177e4SLinus Torvalds 			break;
17001da177e4SLinus Torvalds 	}
17011da177e4SLinus Torvalds 	if (!n)
17021da177e4SLinus Torvalds 		return NULL;
17031da177e4SLinus Torvalds 	for (;;) {
17041da177e4SLinus Torvalds 		struct sp_node *w = NULL;
17051da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
17061da177e4SLinus Torvalds 		if (!prev)
17071da177e4SLinus Torvalds 			break;
17081da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
17091da177e4SLinus Torvalds 		if (w->end <= start)
17101da177e4SLinus Torvalds 			break;
17111da177e4SLinus Torvalds 		n = prev;
17121da177e4SLinus Torvalds 	}
17131da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
17141da177e4SLinus Torvalds }
17151da177e4SLinus Torvalds 
17161da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
17171da177e4SLinus Torvalds /* Caller holds sp->lock */
17181da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
17191da177e4SLinus Torvalds {
17201da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
17211da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
17221da177e4SLinus Torvalds 	struct sp_node *nd;
17231da177e4SLinus Torvalds 
17241da177e4SLinus Torvalds 	while (*p) {
17251da177e4SLinus Torvalds 		parent = *p;
17261da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
17271da177e4SLinus Torvalds 		if (new->start < nd->start)
17281da177e4SLinus Torvalds 			p = &(*p)->rb_left;
17291da177e4SLinus Torvalds 		else if (new->end > nd->end)
17301da177e4SLinus Torvalds 			p = &(*p)->rb_right;
17311da177e4SLinus Torvalds 		else
17321da177e4SLinus Torvalds 			BUG();
17331da177e4SLinus Torvalds 	}
17341da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
17351da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
1736140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
173745c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
17381da177e4SLinus Torvalds }
17391da177e4SLinus Torvalds 
17401da177e4SLinus Torvalds /* Find shared policy intersecting idx */
17411da177e4SLinus Torvalds struct mempolicy *
17421da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
17431da177e4SLinus Torvalds {
17441da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
17451da177e4SLinus Torvalds 	struct sp_node *sn;
17461da177e4SLinus Torvalds 
17471da177e4SLinus Torvalds 	if (!sp->root.rb_node)
17481da177e4SLinus Torvalds 		return NULL;
17491da177e4SLinus Torvalds 	spin_lock(&sp->lock);
17501da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
17511da177e4SLinus Torvalds 	if (sn) {
17521da177e4SLinus Torvalds 		mpol_get(sn->policy);
17531da177e4SLinus Torvalds 		pol = sn->policy;
17541da177e4SLinus Torvalds 	}
17551da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
17561da177e4SLinus Torvalds 	return pol;
17571da177e4SLinus Torvalds }
17581da177e4SLinus Torvalds 
17591da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
17601da177e4SLinus Torvalds {
1761140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
17621da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
1763f0be3d32SLee Schermerhorn 	mpol_put(n->policy);
17641da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
17651da177e4SLinus Torvalds }
17661da177e4SLinus Torvalds 
1767dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1768dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
17691da177e4SLinus Torvalds {
17701da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
17711da177e4SLinus Torvalds 
17721da177e4SLinus Torvalds 	if (!n)
17731da177e4SLinus Torvalds 		return NULL;
17741da177e4SLinus Torvalds 	n->start = start;
17751da177e4SLinus Torvalds 	n->end = end;
17761da177e4SLinus Torvalds 	mpol_get(pol);
1777aab0b102SLee Schermerhorn 	pol->flags |= MPOL_F_SHARED;	/* for unref */
17781da177e4SLinus Torvalds 	n->policy = pol;
17791da177e4SLinus Torvalds 	return n;
17801da177e4SLinus Torvalds }
17811da177e4SLinus Torvalds 
17821da177e4SLinus Torvalds /* Replace a policy range. */
17831da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
17841da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
17851da177e4SLinus Torvalds {
17861da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
17871da177e4SLinus Torvalds 
17881da177e4SLinus Torvalds restart:
17891da177e4SLinus Torvalds 	spin_lock(&sp->lock);
17901da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
17911da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
17921da177e4SLinus Torvalds 	while (n && n->start < end) {
17931da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
17941da177e4SLinus Torvalds 		if (n->start >= start) {
17951da177e4SLinus Torvalds 			if (n->end <= end)
17961da177e4SLinus Torvalds 				sp_delete(sp, n);
17971da177e4SLinus Torvalds 			else
17981da177e4SLinus Torvalds 				n->start = end;
17991da177e4SLinus Torvalds 		} else {
18001da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
18011da177e4SLinus Torvalds 			if (n->end > end) {
18021da177e4SLinus Torvalds 				if (!new2) {
18031da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
18041da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
18051da177e4SLinus Torvalds 					if (!new2)
18061da177e4SLinus Torvalds 						return -ENOMEM;
18071da177e4SLinus Torvalds 					goto restart;
18081da177e4SLinus Torvalds 				}
18091da177e4SLinus Torvalds 				n->end = start;
18101da177e4SLinus Torvalds 				sp_insert(sp, new2);
18111da177e4SLinus Torvalds 				new2 = NULL;
18121da177e4SLinus Torvalds 				break;
18131da177e4SLinus Torvalds 			} else
18141da177e4SLinus Torvalds 				n->end = start;
18151da177e4SLinus Torvalds 		}
18161da177e4SLinus Torvalds 		if (!next)
18171da177e4SLinus Torvalds 			break;
18181da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
18191da177e4SLinus Torvalds 	}
18201da177e4SLinus Torvalds 	if (new)
18211da177e4SLinus Torvalds 		sp_insert(sp, new);
18221da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
18231da177e4SLinus Torvalds 	if (new2) {
1824f0be3d32SLee Schermerhorn 		mpol_put(new2->policy);
18251da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
18261da177e4SLinus Torvalds 	}
18271da177e4SLinus Torvalds 	return 0;
18281da177e4SLinus Torvalds }
18291da177e4SLinus Torvalds 
1830a3b51e01SDavid Rientjes void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
1831028fec41SDavid Rientjes 			unsigned short flags, nodemask_t *policy_nodes)
18327339ff83SRobin Holt {
18337339ff83SRobin Holt 	info->root = RB_ROOT;
18347339ff83SRobin Holt 	spin_lock_init(&info->lock);
18357339ff83SRobin Holt 
18367339ff83SRobin Holt 	if (policy != MPOL_DEFAULT) {
18377339ff83SRobin Holt 		struct mempolicy *newpol;
18387339ff83SRobin Holt 
1839bea904d5SLee Schermerhorn 		/* Falls back to NULL policy [MPOL_DEFAULT] on any error */
1840028fec41SDavid Rientjes 		newpol = mpol_new(policy, flags, policy_nodes);
18417339ff83SRobin Holt 		if (!IS_ERR(newpol)) {
18427339ff83SRobin Holt 			/* Create pseudo-vma that contains just the policy */
18437339ff83SRobin Holt 			struct vm_area_struct pvma;
18447339ff83SRobin Holt 
18457339ff83SRobin Holt 			memset(&pvma, 0, sizeof(struct vm_area_struct));
18467339ff83SRobin Holt 			/* Policy covers entire file */
18477339ff83SRobin Holt 			pvma.vm_end = TASK_SIZE;
18487339ff83SRobin Holt 			mpol_set_shared_policy(info, &pvma, newpol);
1849f0be3d32SLee Schermerhorn 			mpol_put(newpol);
18507339ff83SRobin Holt 		}
18517339ff83SRobin Holt 	}
18527339ff83SRobin Holt }
18537339ff83SRobin Holt 
18541da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
18551da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
18561da177e4SLinus Torvalds {
18571da177e4SLinus Torvalds 	int err;
18581da177e4SLinus Torvalds 	struct sp_node *new = NULL;
18591da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
18601da177e4SLinus Torvalds 
1861028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
18621da177e4SLinus Torvalds 		 vma->vm_pgoff,
186345c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
1864028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
1865dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
18661da177e4SLinus Torvalds 
18671da177e4SLinus Torvalds 	if (npol) {
18681da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
18691da177e4SLinus Torvalds 		if (!new)
18701da177e4SLinus Torvalds 			return -ENOMEM;
18711da177e4SLinus Torvalds 	}
18721da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
18731da177e4SLinus Torvalds 	if (err && new)
18741da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
18751da177e4SLinus Torvalds 	return err;
18761da177e4SLinus Torvalds }
18771da177e4SLinus Torvalds 
18781da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
18791da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
18801da177e4SLinus Torvalds {
18811da177e4SLinus Torvalds 	struct sp_node *n;
18821da177e4SLinus Torvalds 	struct rb_node *next;
18831da177e4SLinus Torvalds 
18841da177e4SLinus Torvalds 	if (!p->root.rb_node)
18851da177e4SLinus Torvalds 		return;
18861da177e4SLinus Torvalds 	spin_lock(&p->lock);
18871da177e4SLinus Torvalds 	next = rb_first(&p->root);
18881da177e4SLinus Torvalds 	while (next) {
18891da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
18901da177e4SLinus Torvalds 		next = rb_next(&n->nd);
189190c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
1892f0be3d32SLee Schermerhorn 		mpol_put(n->policy);
18931da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
18941da177e4SLinus Torvalds 	}
18951da177e4SLinus Torvalds 	spin_unlock(&p->lock);
18961da177e4SLinus Torvalds }
18971da177e4SLinus Torvalds 
18981da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
18991da177e4SLinus Torvalds void __init numa_policy_init(void)
19001da177e4SLinus Torvalds {
1901b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
1902b71636e2SPaul Mundt 	unsigned long largest = 0;
1903b71636e2SPaul Mundt 	int nid, prefer = 0;
1904b71636e2SPaul Mundt 
19051da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
19061da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
190720c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
19081da177e4SLinus Torvalds 
19091da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
19101da177e4SLinus Torvalds 				     sizeof(struct sp_node),
191120c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
19121da177e4SLinus Torvalds 
1913b71636e2SPaul Mundt 	/*
1914b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
1915b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
1916b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
1917b71636e2SPaul Mundt 	 */
1918b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
191956bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
1920b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
19211da177e4SLinus Torvalds 
1922b71636e2SPaul Mundt 		/* Preserve the largest node */
1923b71636e2SPaul Mundt 		if (largest < total_pages) {
1924b71636e2SPaul Mundt 			largest = total_pages;
1925b71636e2SPaul Mundt 			prefer = nid;
1926b71636e2SPaul Mundt 		}
1927b71636e2SPaul Mundt 
1928b71636e2SPaul Mundt 		/* Interleave this node? */
1929b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1930b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
1931b71636e2SPaul Mundt 	}
1932b71636e2SPaul Mundt 
1933b71636e2SPaul Mundt 	/* All too small, use the largest */
1934b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
1935b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
1936b71636e2SPaul Mundt 
1937028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
19381da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
19391da177e4SLinus Torvalds }
19401da177e4SLinus Torvalds 
19418bccd85fSChristoph Lameter /* Reset policy of current process to default */
19421da177e4SLinus Torvalds void numa_default_policy(void)
19431da177e4SLinus Torvalds {
1944028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
19451da177e4SLinus Torvalds }
194668860ec1SPaul Jackson 
19474225399aSPaul Jackson /*
1948fc36b8d3SLee Schermerhorn  * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
194953f2556bSLee Schermerhorn  * Used only for mpol_to_str()
19501a75a6c8SChristoph Lameter  */
195153f2556bSLee Schermerhorn #define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
195215ad7cdcSHelge Deller static const char * const policy_types[] =
195353f2556bSLee Schermerhorn 	{ "default", "prefer", "bind", "interleave", "local" };
19541a75a6c8SChristoph Lameter 
19551a75a6c8SChristoph Lameter /*
19561a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
19571a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
19581a75a6c8SChristoph Lameter  * or an error (negative)
19591a75a6c8SChristoph Lameter  */
19601a75a6c8SChristoph Lameter static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
19611a75a6c8SChristoph Lameter {
19621a75a6c8SChristoph Lameter 	char *p = buffer;
19631a75a6c8SChristoph Lameter 	int l;
19641a75a6c8SChristoph Lameter 	nodemask_t nodes;
1965bea904d5SLee Schermerhorn 	unsigned short mode;
1966f5b087b5SDavid Rientjes 	unsigned short flags = pol ? pol->flags : 0;
19671a75a6c8SChristoph Lameter 
1968*2291990aSLee Schermerhorn 	/*
1969*2291990aSLee Schermerhorn 	 * Sanity check:  room for longest mode, flag and some nodes
1970*2291990aSLee Schermerhorn 	 */
1971*2291990aSLee Schermerhorn 	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
1972*2291990aSLee Schermerhorn 
1973bea904d5SLee Schermerhorn 	if (!pol || pol == &default_policy)
1974bea904d5SLee Schermerhorn 		mode = MPOL_DEFAULT;
1975bea904d5SLee Schermerhorn 	else
1976bea904d5SLee Schermerhorn 		mode = pol->mode;
1977bea904d5SLee Schermerhorn 
19781a75a6c8SChristoph Lameter 	switch (mode) {
19791a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
19801a75a6c8SChristoph Lameter 		nodes_clear(nodes);
19811a75a6c8SChristoph Lameter 		break;
19821a75a6c8SChristoph Lameter 
19831a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
19841a75a6c8SChristoph Lameter 		nodes_clear(nodes);
1985fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
198653f2556bSLee Schermerhorn 			mode = MPOL_LOCAL;	/* pseudo-policy */
198753f2556bSLee Schermerhorn 		else
1988fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
19891a75a6c8SChristoph Lameter 		break;
19901a75a6c8SChristoph Lameter 
19911a75a6c8SChristoph Lameter 	case MPOL_BIND:
199219770b32SMel Gorman 		/* Fall through */
19931a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
19941a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
19951a75a6c8SChristoph Lameter 		break;
19961a75a6c8SChristoph Lameter 
19971a75a6c8SChristoph Lameter 	default:
19981a75a6c8SChristoph Lameter 		BUG();
19991a75a6c8SChristoph Lameter 	}
20001a75a6c8SChristoph Lameter 
20011a75a6c8SChristoph Lameter 	l = strlen(policy_types[mode]);
20021a75a6c8SChristoph Lameter 	if (buffer + maxlen < p + l + 1)
20031a75a6c8SChristoph Lameter 		return -ENOSPC;
20041a75a6c8SChristoph Lameter 
20051a75a6c8SChristoph Lameter 	strcpy(p, policy_types[mode]);
20061a75a6c8SChristoph Lameter 	p += l;
20071a75a6c8SChristoph Lameter 
2008fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2009f5b087b5SDavid Rientjes 		if (buffer + maxlen < p + 2)
2010f5b087b5SDavid Rientjes 			return -ENOSPC;
2011f5b087b5SDavid Rientjes 		*p++ = '=';
2012f5b087b5SDavid Rientjes 
2013*2291990aSLee Schermerhorn 		/*
2014*2291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
2015*2291990aSLee Schermerhorn 		 */
2016f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
2017*2291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
2018*2291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
2019*2291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2020f5b087b5SDavid Rientjes 	}
2021f5b087b5SDavid Rientjes 
20221a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
20231a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
20241a75a6c8SChristoph Lameter 			return -ENOSPC;
20251a75a6c8SChristoph Lameter 		*p++ = '=';
20261a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
20271a75a6c8SChristoph Lameter 	}
20281a75a6c8SChristoph Lameter 	return p - buffer;
20291a75a6c8SChristoph Lameter }
20301a75a6c8SChristoph Lameter 
20311a75a6c8SChristoph Lameter struct numa_maps {
20321a75a6c8SChristoph Lameter 	unsigned long pages;
20331a75a6c8SChristoph Lameter 	unsigned long anon;
2034397874dfSChristoph Lameter 	unsigned long active;
2035397874dfSChristoph Lameter 	unsigned long writeback;
20361a75a6c8SChristoph Lameter 	unsigned long mapcount_max;
2037397874dfSChristoph Lameter 	unsigned long dirty;
2038397874dfSChristoph Lameter 	unsigned long swapcache;
20391a75a6c8SChristoph Lameter 	unsigned long node[MAX_NUMNODES];
20401a75a6c8SChristoph Lameter };
20411a75a6c8SChristoph Lameter 
2042397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty)
20431a75a6c8SChristoph Lameter {
20441a75a6c8SChristoph Lameter 	struct numa_maps *md = private;
20451a75a6c8SChristoph Lameter 	int count = page_mapcount(page);
20461a75a6c8SChristoph Lameter 
20471a75a6c8SChristoph Lameter 	md->pages++;
2048397874dfSChristoph Lameter 	if (pte_dirty || PageDirty(page))
2049397874dfSChristoph Lameter 		md->dirty++;
2050397874dfSChristoph Lameter 
2051397874dfSChristoph Lameter 	if (PageSwapCache(page))
2052397874dfSChristoph Lameter 		md->swapcache++;
2053397874dfSChristoph Lameter 
2054397874dfSChristoph Lameter 	if (PageActive(page))
2055397874dfSChristoph Lameter 		md->active++;
2056397874dfSChristoph Lameter 
2057397874dfSChristoph Lameter 	if (PageWriteback(page))
2058397874dfSChristoph Lameter 		md->writeback++;
20591a75a6c8SChristoph Lameter 
20601a75a6c8SChristoph Lameter 	if (PageAnon(page))
20611a75a6c8SChristoph Lameter 		md->anon++;
20621a75a6c8SChristoph Lameter 
2063397874dfSChristoph Lameter 	if (count > md->mapcount_max)
2064397874dfSChristoph Lameter 		md->mapcount_max = count;
2065397874dfSChristoph Lameter 
20661a75a6c8SChristoph Lameter 	md->node[page_to_nid(page)]++;
20671a75a6c8SChristoph Lameter }
20681a75a6c8SChristoph Lameter 
20697f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE
2070397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma,
2071397874dfSChristoph Lameter 		unsigned long start, unsigned long end,
2072397874dfSChristoph Lameter 		struct numa_maps *md)
2073397874dfSChristoph Lameter {
2074397874dfSChristoph Lameter 	unsigned long addr;
2075397874dfSChristoph Lameter 	struct page *page;
2076397874dfSChristoph Lameter 
2077397874dfSChristoph Lameter 	for (addr = start; addr < end; addr += HPAGE_SIZE) {
2078397874dfSChristoph Lameter 		pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
2079397874dfSChristoph Lameter 		pte_t pte;
2080397874dfSChristoph Lameter 
2081397874dfSChristoph Lameter 		if (!ptep)
2082397874dfSChristoph Lameter 			continue;
2083397874dfSChristoph Lameter 
2084397874dfSChristoph Lameter 		pte = *ptep;
2085397874dfSChristoph Lameter 		if (pte_none(pte))
2086397874dfSChristoph Lameter 			continue;
2087397874dfSChristoph Lameter 
2088397874dfSChristoph Lameter 		page = pte_page(pte);
2089397874dfSChristoph Lameter 		if (!page)
2090397874dfSChristoph Lameter 			continue;
2091397874dfSChristoph Lameter 
2092397874dfSChristoph Lameter 		gather_stats(page, md, pte_dirty(*ptep));
2093397874dfSChristoph Lameter 	}
2094397874dfSChristoph Lameter }
20957f709ed0SAndrew Morton #else
20967f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma,
20977f709ed0SAndrew Morton 		unsigned long start, unsigned long end,
20987f709ed0SAndrew Morton 		struct numa_maps *md)
20997f709ed0SAndrew Morton {
21007f709ed0SAndrew Morton }
21017f709ed0SAndrew Morton #endif
2102397874dfSChristoph Lameter 
210353f2556bSLee Schermerhorn /*
210453f2556bSLee Schermerhorn  * Display pages allocated per node and memory policy via /proc.
210553f2556bSLee Schermerhorn  */
21061a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v)
21071a75a6c8SChristoph Lameter {
210899f89551SEric W. Biederman 	struct proc_maps_private *priv = m->private;
21091a75a6c8SChristoph Lameter 	struct vm_area_struct *vma = v;
21101a75a6c8SChristoph Lameter 	struct numa_maps *md;
2111397874dfSChristoph Lameter 	struct file *file = vma->vm_file;
2112397874dfSChristoph Lameter 	struct mm_struct *mm = vma->vm_mm;
2113480eccf9SLee Schermerhorn 	struct mempolicy *pol;
21141a75a6c8SChristoph Lameter 	int n;
21151a75a6c8SChristoph Lameter 	char buffer[50];
21161a75a6c8SChristoph Lameter 
2117397874dfSChristoph Lameter 	if (!mm)
21181a75a6c8SChristoph Lameter 		return 0;
21191a75a6c8SChristoph Lameter 
21201a75a6c8SChristoph Lameter 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
21211a75a6c8SChristoph Lameter 	if (!md)
21221a75a6c8SChristoph Lameter 		return 0;
21231a75a6c8SChristoph Lameter 
2124480eccf9SLee Schermerhorn 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
2125480eccf9SLee Schermerhorn 	mpol_to_str(buffer, sizeof(buffer), pol);
212652cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
21271a75a6c8SChristoph Lameter 
2128397874dfSChristoph Lameter 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2129397874dfSChristoph Lameter 
2130397874dfSChristoph Lameter 	if (file) {
2131397874dfSChristoph Lameter 		seq_printf(m, " file=");
2132c32c2f63SJan Blunck 		seq_path(m, &file->f_path, "\n\t= ");
2133397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2134397874dfSChristoph Lameter 		seq_printf(m, " heap");
2135397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->start_stack &&
2136397874dfSChristoph Lameter 			vma->vm_end >= mm->start_stack) {
2137397874dfSChristoph Lameter 		seq_printf(m, " stack");
2138397874dfSChristoph Lameter 	}
2139397874dfSChristoph Lameter 
2140397874dfSChristoph Lameter 	if (is_vm_hugetlb_page(vma)) {
2141397874dfSChristoph Lameter 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2142397874dfSChristoph Lameter 		seq_printf(m, " huge");
2143397874dfSChristoph Lameter 	} else {
2144397874dfSChristoph Lameter 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
214556bbd65dSChristoph Lameter 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2146397874dfSChristoph Lameter 	}
2147397874dfSChristoph Lameter 
2148397874dfSChristoph Lameter 	if (!md->pages)
2149397874dfSChristoph Lameter 		goto out;
21501a75a6c8SChristoph Lameter 
21511a75a6c8SChristoph Lameter 	if (md->anon)
21521a75a6c8SChristoph Lameter 		seq_printf(m," anon=%lu",md->anon);
21531a75a6c8SChristoph Lameter 
2154397874dfSChristoph Lameter 	if (md->dirty)
2155397874dfSChristoph Lameter 		seq_printf(m," dirty=%lu",md->dirty);
2156397874dfSChristoph Lameter 
2157397874dfSChristoph Lameter 	if (md->pages != md->anon && md->pages != md->dirty)
2158397874dfSChristoph Lameter 		seq_printf(m, " mapped=%lu", md->pages);
2159397874dfSChristoph Lameter 
2160397874dfSChristoph Lameter 	if (md->mapcount_max > 1)
2161397874dfSChristoph Lameter 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2162397874dfSChristoph Lameter 
2163397874dfSChristoph Lameter 	if (md->swapcache)
2164397874dfSChristoph Lameter 		seq_printf(m," swapcache=%lu", md->swapcache);
2165397874dfSChristoph Lameter 
2166397874dfSChristoph Lameter 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2167397874dfSChristoph Lameter 		seq_printf(m," active=%lu", md->active);
2168397874dfSChristoph Lameter 
2169397874dfSChristoph Lameter 	if (md->writeback)
2170397874dfSChristoph Lameter 		seq_printf(m," writeback=%lu", md->writeback);
2171397874dfSChristoph Lameter 
217256bbd65dSChristoph Lameter 	for_each_node_state(n, N_HIGH_MEMORY)
21731a75a6c8SChristoph Lameter 		if (md->node[n])
21741a75a6c8SChristoph Lameter 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2175397874dfSChristoph Lameter out:
21761a75a6c8SChristoph Lameter 	seq_putc(m, '\n');
21771a75a6c8SChristoph Lameter 	kfree(md);
21781a75a6c8SChristoph Lameter 
21791a75a6c8SChristoph Lameter 	if (m->count < m->size)
218099f89551SEric W. Biederman 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
21811a75a6c8SChristoph Lameter 	return 0;
21821a75a6c8SChristoph Lameter }
2183