xref: /openbmc/linux/mm/mempolicy.c (revision 708c1bbc9d0c3e57f40501794d9b0eed29d10fce)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/mm.h>
701da177e4SLinus Torvalds #include <linux/highmem.h>
711da177e4SLinus Torvalds #include <linux/hugetlb.h>
721da177e4SLinus Torvalds #include <linux/kernel.h>
731da177e4SLinus Torvalds #include <linux/sched.h>
741da177e4SLinus Torvalds #include <linux/nodemask.h>
751da177e4SLinus Torvalds #include <linux/cpuset.h>
761da177e4SLinus Torvalds #include <linux/slab.h>
771da177e4SLinus Torvalds #include <linux/string.h>
781da177e4SLinus Torvalds #include <linux/module.h>
79b488893aSPavel Emelyanov #include <linux/nsproxy.h>
801da177e4SLinus Torvalds #include <linux/interrupt.h>
811da177e4SLinus Torvalds #include <linux/init.h>
821da177e4SLinus Torvalds #include <linux/compat.h>
83dc9aa5b9SChristoph Lameter #include <linux/swap.h>
841a75a6c8SChristoph Lameter #include <linux/seq_file.h>
851a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
86b20a3503SChristoph Lameter #include <linux/migrate.h>
8762b61f61SHugh Dickins #include <linux/ksm.h>
8895a402c3SChristoph Lameter #include <linux/rmap.h>
8986c3a764SDavid Quigley #include <linux/security.h>
90dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
91095f1fc4SLee Schermerhorn #include <linux/ctype.h>
926d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
93dc9aa5b9SChristoph Lameter 
941da177e4SLinus Torvalds #include <asm/tlbflush.h>
951da177e4SLinus Torvalds #include <asm/uaccess.h>
961da177e4SLinus Torvalds 
9762695a84SNick Piggin #include "internal.h"
9862695a84SNick Piggin 
9938e35860SChristoph Lameter /* Internal flags */
100dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10138e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
1021a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
103dc9aa5b9SChristoph Lameter 
104fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
105fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1081da177e4SLinus Torvalds    policied. */
1096267276fSChristoph Lameter enum zone_type policy_zone = 0;
1101da177e4SLinus Torvalds 
111bea904d5SLee Schermerhorn /*
112bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
113bea904d5SLee Schermerhorn  */
114d42c6997SAndi Kleen struct mempolicy default_policy = {
1151da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
116bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
117fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1181da177e4SLinus Torvalds };
1191da177e4SLinus Torvalds 
12037012946SDavid Rientjes static const struct mempolicy_operations {
12137012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122*708c1bbcSMiao Xie 	/*
123*708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
124*708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
125*708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
126*708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
127*708c1bbcSMiao Xie 	 * page.
128*708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
129*708c1bbcSMiao Xie 	 * rebind directly.
130*708c1bbcSMiao Xie 	 *
131*708c1bbcSMiao Xie 	 * step:
132*708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
133*708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
134*708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
135*708c1bbcSMiao Xie 	 */
136*708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
137*708c1bbcSMiao Xie 			enum mpol_rebind_step step);
13837012946SDavid Rientjes } mpol_ops[MPOL_MAX];
13937012946SDavid Rientjes 
14019770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
14137012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask)
1421da177e4SLinus Torvalds {
14319770b32SMel Gorman 	int nd, k;
1441da177e4SLinus Torvalds 
14519770b32SMel Gorman 	for_each_node_mask(nd, *nodemask) {
14619770b32SMel Gorman 		struct zone *z;
14719770b32SMel Gorman 
14819770b32SMel Gorman 		for (k = 0; k <= policy_zone; k++) {
14919770b32SMel Gorman 			z = &NODE_DATA(nd)->node_zones[k];
150dd942ae3SAndi Kleen 			if (z->present_pages > 0)
15119770b32SMel Gorman 				return 1;
152dd942ae3SAndi Kleen 		}
153dd942ae3SAndi Kleen 	}
15419770b32SMel Gorman 
15519770b32SMel Gorman 	return 0;
1561da177e4SLinus Torvalds }
1571da177e4SLinus Torvalds 
158f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
159f5b087b5SDavid Rientjes {
1606d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1614c50bc01SDavid Rientjes }
1624c50bc01SDavid Rientjes 
1634c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1644c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1654c50bc01SDavid Rientjes {
1664c50bc01SDavid Rientjes 	nodemask_t tmp;
1674c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1684c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
169f5b087b5SDavid Rientjes }
170f5b087b5SDavid Rientjes 
17137012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
17237012946SDavid Rientjes {
17337012946SDavid Rientjes 	if (nodes_empty(*nodes))
17437012946SDavid Rientjes 		return -EINVAL;
17537012946SDavid Rientjes 	pol->v.nodes = *nodes;
17637012946SDavid Rientjes 	return 0;
17737012946SDavid Rientjes }
17837012946SDavid Rientjes 
17937012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
18037012946SDavid Rientjes {
18137012946SDavid Rientjes 	if (!nodes)
182fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
18337012946SDavid Rientjes 	else if (nodes_empty(*nodes))
18437012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
18537012946SDavid Rientjes 	else
18637012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
18737012946SDavid Rientjes 	return 0;
18837012946SDavid Rientjes }
18937012946SDavid Rientjes 
19037012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
19137012946SDavid Rientjes {
19237012946SDavid Rientjes 	if (!is_valid_nodemask(nodes))
19337012946SDavid Rientjes 		return -EINVAL;
19437012946SDavid Rientjes 	pol->v.nodes = *nodes;
19537012946SDavid Rientjes 	return 0;
19637012946SDavid Rientjes }
19737012946SDavid Rientjes 
19858568d2aSMiao Xie /*
19958568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
20058568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
20158568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
20258568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
20358568d2aSMiao Xie  *
20458568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
20558568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
20658568d2aSMiao Xie  */
2074bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2084bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
20958568d2aSMiao Xie {
21058568d2aSMiao Xie 	int ret;
21158568d2aSMiao Xie 
21258568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
21358568d2aSMiao Xie 	if (pol == NULL)
21458568d2aSMiao Xie 		return 0;
2154bfc4495SKAMEZAWA Hiroyuki 	/* Check N_HIGH_MEMORY */
2164bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
2174bfc4495SKAMEZAWA Hiroyuki 		  cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
21858568d2aSMiao Xie 
21958568d2aSMiao Xie 	VM_BUG_ON(!nodes);
22058568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
22158568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
22258568d2aSMiao Xie 	else {
22358568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2244bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
22558568d2aSMiao Xie 		else
2264bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2274bfc4495SKAMEZAWA Hiroyuki 
22858568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
22958568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
23058568d2aSMiao Xie 		else
23158568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
23258568d2aSMiao Xie 						cpuset_current_mems_allowed;
23358568d2aSMiao Xie 	}
23458568d2aSMiao Xie 
2354bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2364bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2374bfc4495SKAMEZAWA Hiroyuki 	else
2384bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
23958568d2aSMiao Xie 	return ret;
24058568d2aSMiao Xie }
24158568d2aSMiao Xie 
24258568d2aSMiao Xie /*
24358568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
24458568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
24558568d2aSMiao Xie  */
246028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
247028fec41SDavid Rientjes 				  nodemask_t *nodes)
2481da177e4SLinus Torvalds {
2491da177e4SLinus Torvalds 	struct mempolicy *policy;
2501da177e4SLinus Torvalds 
251028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
252028fec41SDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
253140d5a49SPaul Mundt 
2543e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2553e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
25637012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
257bea904d5SLee Schermerhorn 		return NULL;	/* simply delete any existing policy */
25837012946SDavid Rientjes 	}
2593e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2603e1f0645SDavid Rientjes 
2613e1f0645SDavid Rientjes 	/*
2623e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2633e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2643e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2653e1f0645SDavid Rientjes 	 */
2663e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2673e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2683e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2693e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2703e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2713e1f0645SDavid Rientjes 		}
2723e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2733e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2741da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2751da177e4SLinus Torvalds 	if (!policy)
2761da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2771da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
27845c4745aSLee Schermerhorn 	policy->mode = mode;
27937012946SDavid Rientjes 	policy->flags = flags;
2803e1f0645SDavid Rientjes 
28137012946SDavid Rientjes 	return policy;
28237012946SDavid Rientjes }
28337012946SDavid Rientjes 
28452cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
28552cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
28652cd3b07SLee Schermerhorn {
28752cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
28852cd3b07SLee Schermerhorn 		return;
28952cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
29052cd3b07SLee Schermerhorn }
29152cd3b07SLee Schermerhorn 
292*708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
293*708c1bbcSMiao Xie 				enum mpol_rebind_step step)
29437012946SDavid Rientjes {
29537012946SDavid Rientjes }
29637012946SDavid Rientjes 
297*708c1bbcSMiao Xie /*
298*708c1bbcSMiao Xie  * step:
299*708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
300*708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
301*708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
302*708c1bbcSMiao Xie  */
303*708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
304*708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3051d0d2680SDavid Rientjes {
3061d0d2680SDavid Rientjes 	nodemask_t tmp;
3071d0d2680SDavid Rientjes 
30837012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
30937012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
31037012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
31137012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3121d0d2680SDavid Rientjes 	else {
313*708c1bbcSMiao Xie 		/*
314*708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
315*708c1bbcSMiao Xie 		 * result
316*708c1bbcSMiao Xie 		 */
317*708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
318*708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
319*708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
320*708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
321*708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
322*708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
32337012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
324*708c1bbcSMiao Xie 		} else
325*708c1bbcSMiao Xie 			BUG();
3261d0d2680SDavid Rientjes 	}
32737012946SDavid Rientjes 
328*708c1bbcSMiao Xie 	if (nodes_empty(tmp))
329*708c1bbcSMiao Xie 		tmp = *nodes;
330*708c1bbcSMiao Xie 
331*708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
332*708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
333*708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3341d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
335*708c1bbcSMiao Xie 	else
336*708c1bbcSMiao Xie 		BUG();
337*708c1bbcSMiao Xie 
3381d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
3391d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
3401d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3411d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
3421d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3431d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3441d0d2680SDavid Rientjes 	}
34537012946SDavid Rientjes }
34637012946SDavid Rientjes 
34737012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
348*708c1bbcSMiao Xie 				  const nodemask_t *nodes,
349*708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
35037012946SDavid Rientjes {
35137012946SDavid Rientjes 	nodemask_t tmp;
35237012946SDavid Rientjes 
35337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3541d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3551d0d2680SDavid Rientjes 
356fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3571d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
358fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
359fc36b8d3SLee Schermerhorn 		} else
360fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
36137012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
36237012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3631d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
364fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3651d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
36637012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
36737012946SDavid Rientjes 						   *nodes);
36837012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3691d0d2680SDavid Rientjes 	}
3701d0d2680SDavid Rientjes }
37137012946SDavid Rientjes 
372*708c1bbcSMiao Xie /*
373*708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
374*708c1bbcSMiao Xie  *
375*708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
376*708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
377*708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
378*708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
379*708c1bbcSMiao Xie  * page.
380*708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
381*708c1bbcSMiao Xie  * rebind directly.
382*708c1bbcSMiao Xie  *
383*708c1bbcSMiao Xie  * step:
384*708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
385*708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
386*708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
387*708c1bbcSMiao Xie  */
388*708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
389*708c1bbcSMiao Xie 				enum mpol_rebind_step step)
39037012946SDavid Rientjes {
39137012946SDavid Rientjes 	if (!pol)
39237012946SDavid Rientjes 		return;
393*708c1bbcSMiao Xie 	if (!mpol_store_user_nodemask(pol) && step == 0 &&
39437012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
39537012946SDavid Rientjes 		return;
396*708c1bbcSMiao Xie 
397*708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
398*708c1bbcSMiao Xie 		return;
399*708c1bbcSMiao Xie 
400*708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
401*708c1bbcSMiao Xie 		BUG();
402*708c1bbcSMiao Xie 
403*708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
404*708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
405*708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
406*708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
407*708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
408*708c1bbcSMiao Xie 		BUG();
409*708c1bbcSMiao Xie 
410*708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4111d0d2680SDavid Rientjes }
4121d0d2680SDavid Rientjes 
4131d0d2680SDavid Rientjes /*
4141d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4151d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
41658568d2aSMiao Xie  *
41758568d2aSMiao Xie  * Called with task's alloc_lock held.
4181d0d2680SDavid Rientjes  */
4191d0d2680SDavid Rientjes 
420*708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
421*708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4221d0d2680SDavid Rientjes {
423*708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4241d0d2680SDavid Rientjes }
4251d0d2680SDavid Rientjes 
4261d0d2680SDavid Rientjes /*
4271d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4281d0d2680SDavid Rientjes  *
4291d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4301d0d2680SDavid Rientjes  */
4311d0d2680SDavid Rientjes 
4321d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4331d0d2680SDavid Rientjes {
4341d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4351d0d2680SDavid Rientjes 
4361d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4371d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
438*708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4391d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4401d0d2680SDavid Rientjes }
4411d0d2680SDavid Rientjes 
44237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
44337012946SDavid Rientjes 	[MPOL_DEFAULT] = {
44437012946SDavid Rientjes 		.rebind = mpol_rebind_default,
44537012946SDavid Rientjes 	},
44637012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
44737012946SDavid Rientjes 		.create = mpol_new_interleave,
44837012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
44937012946SDavid Rientjes 	},
45037012946SDavid Rientjes 	[MPOL_PREFERRED] = {
45137012946SDavid Rientjes 		.create = mpol_new_preferred,
45237012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
45337012946SDavid Rientjes 	},
45437012946SDavid Rientjes 	[MPOL_BIND] = {
45537012946SDavid Rientjes 		.create = mpol_new_bind,
45637012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
45737012946SDavid Rientjes 	},
45837012946SDavid Rientjes };
45937012946SDavid Rientjes 
460397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty);
461fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
462fc301289SChristoph Lameter 				unsigned long flags);
4631a75a6c8SChristoph Lameter 
46438e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
465b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
466dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
467dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
46838e35860SChristoph Lameter 		void *private)
4691da177e4SLinus Torvalds {
47091612e0dSHugh Dickins 	pte_t *orig_pte;
47191612e0dSHugh Dickins 	pte_t *pte;
472705e87c0SHugh Dickins 	spinlock_t *ptl;
473941150a3SHugh Dickins 
474705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
47591612e0dSHugh Dickins 	do {
4766aab341eSLinus Torvalds 		struct page *page;
47725ba77c1SAndy Whitcroft 		int nid;
47891612e0dSHugh Dickins 
47991612e0dSHugh Dickins 		if (!pte_present(*pte))
48091612e0dSHugh Dickins 			continue;
4816aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
4826aab341eSLinus Torvalds 		if (!page)
48391612e0dSHugh Dickins 			continue;
484053837fcSNick Piggin 		/*
48562b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
48662b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
48762b61f61SHugh Dickins 		 * And we cannot move PageKsm pages sensibly or safely yet.
488053837fcSNick Piggin 		 */
48962b61f61SHugh Dickins 		if (PageReserved(page) || PageKsm(page))
490f4598c8bSChristoph Lameter 			continue;
4916aab341eSLinus Torvalds 		nid = page_to_nid(page);
49238e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
49338e35860SChristoph Lameter 			continue;
49438e35860SChristoph Lameter 
4951a75a6c8SChristoph Lameter 		if (flags & MPOL_MF_STATS)
496397874dfSChristoph Lameter 			gather_stats(page, private, pte_dirty(*pte));
497053837fcSNick Piggin 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
498fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
499dc9aa5b9SChristoph Lameter 		else
5001da177e4SLinus Torvalds 			break;
50191612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
502705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
50391612e0dSHugh Dickins 	return addr != end;
50491612e0dSHugh Dickins }
50591612e0dSHugh Dickins 
506b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
507dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
508dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
50938e35860SChristoph Lameter 		void *private)
51091612e0dSHugh Dickins {
51191612e0dSHugh Dickins 	pmd_t *pmd;
51291612e0dSHugh Dickins 	unsigned long next;
51391612e0dSHugh Dickins 
51491612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
51591612e0dSHugh Dickins 	do {
51691612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
51791612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
51891612e0dSHugh Dickins 			continue;
519dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
52038e35860SChristoph Lameter 				    flags, private))
52191612e0dSHugh Dickins 			return -EIO;
52291612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
52391612e0dSHugh Dickins 	return 0;
52491612e0dSHugh Dickins }
52591612e0dSHugh Dickins 
526b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
527dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
528dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
52938e35860SChristoph Lameter 		void *private)
53091612e0dSHugh Dickins {
53191612e0dSHugh Dickins 	pud_t *pud;
53291612e0dSHugh Dickins 	unsigned long next;
53391612e0dSHugh Dickins 
53491612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
53591612e0dSHugh Dickins 	do {
53691612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
53791612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
53891612e0dSHugh Dickins 			continue;
539dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
54038e35860SChristoph Lameter 				    flags, private))
54191612e0dSHugh Dickins 			return -EIO;
54291612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
54391612e0dSHugh Dickins 	return 0;
54491612e0dSHugh Dickins }
54591612e0dSHugh Dickins 
546b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
547dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
548dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
54938e35860SChristoph Lameter 		void *private)
55091612e0dSHugh Dickins {
55191612e0dSHugh Dickins 	pgd_t *pgd;
55291612e0dSHugh Dickins 	unsigned long next;
55391612e0dSHugh Dickins 
554b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
55591612e0dSHugh Dickins 	do {
55691612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
55791612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
55891612e0dSHugh Dickins 			continue;
559dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
56038e35860SChristoph Lameter 				    flags, private))
56191612e0dSHugh Dickins 			return -EIO;
56291612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
56391612e0dSHugh Dickins 	return 0;
5641da177e4SLinus Torvalds }
5651da177e4SLinus Torvalds 
566dc9aa5b9SChristoph Lameter /*
567dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
568dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
569dc9aa5b9SChristoph Lameter  * put them on the pagelist.
570dc9aa5b9SChristoph Lameter  */
5711da177e4SLinus Torvalds static struct vm_area_struct *
5721da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
57338e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
5741da177e4SLinus Torvalds {
5751da177e4SLinus Torvalds 	int err;
5761da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
5771da177e4SLinus Torvalds 
578053837fcSNick Piggin 
5791da177e4SLinus Torvalds 	first = find_vma(mm, start);
5801da177e4SLinus Torvalds 	if (!first)
5811da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
5821da177e4SLinus Torvalds 	prev = NULL;
5831da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
584dc9aa5b9SChristoph Lameter 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
5851da177e4SLinus Torvalds 			if (!vma->vm_next && vma->vm_end < end)
5861da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
5871da177e4SLinus Torvalds 			if (prev && prev->vm_end < vma->vm_start)
5881da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
589dc9aa5b9SChristoph Lameter 		}
590dc9aa5b9SChristoph Lameter 		if (!is_vm_hugetlb_page(vma) &&
591dc9aa5b9SChristoph Lameter 		    ((flags & MPOL_MF_STRICT) ||
592dc9aa5b9SChristoph Lameter 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
593dc9aa5b9SChristoph Lameter 				vma_migratable(vma)))) {
5945b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
595dc9aa5b9SChristoph Lameter 
5965b952b3cSAndi Kleen 			if (endvma > end)
5975b952b3cSAndi Kleen 				endvma = end;
5985b952b3cSAndi Kleen 			if (vma->vm_start > start)
5995b952b3cSAndi Kleen 				start = vma->vm_start;
600dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
60138e35860SChristoph Lameter 						flags, private);
6021da177e4SLinus Torvalds 			if (err) {
6031da177e4SLinus Torvalds 				first = ERR_PTR(err);
6041da177e4SLinus Torvalds 				break;
6051da177e4SLinus Torvalds 			}
6061da177e4SLinus Torvalds 		}
6071da177e4SLinus Torvalds 		prev = vma;
6081da177e4SLinus Torvalds 	}
6091da177e4SLinus Torvalds 	return first;
6101da177e4SLinus Torvalds }
6111da177e4SLinus Torvalds 
6121da177e4SLinus Torvalds /* Apply policy to a single VMA */
6131da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
6141da177e4SLinus Torvalds {
6151da177e4SLinus Torvalds 	int err = 0;
6161da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
6171da177e4SLinus Torvalds 
618140d5a49SPaul Mundt 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
6191da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
6201da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
6211da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
6221da177e4SLinus Torvalds 
6231da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
6241da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
6251da177e4SLinus Torvalds 	if (!err) {
6261da177e4SLinus Torvalds 		mpol_get(new);
6271da177e4SLinus Torvalds 		vma->vm_policy = new;
628f0be3d32SLee Schermerhorn 		mpol_put(old);
6291da177e4SLinus Torvalds 	}
6301da177e4SLinus Torvalds 	return err;
6311da177e4SLinus Torvalds }
6321da177e4SLinus Torvalds 
6331da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
6349d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
6359d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
6361da177e4SLinus Torvalds {
6371da177e4SLinus Torvalds 	struct vm_area_struct *next;
6389d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
6399d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
6409d8cebd4SKOSAKI Motohiro 	int err = 0;
6419d8cebd4SKOSAKI Motohiro 	pgoff_t pgoff;
6429d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
6439d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
6441da177e4SLinus Torvalds 
6459d8cebd4SKOSAKI Motohiro 	vma = find_vma_prev(mm, start, &prev);
6469d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
6479d8cebd4SKOSAKI Motohiro 		return -EFAULT;
6489d8cebd4SKOSAKI Motohiro 
6499d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
6501da177e4SLinus Torvalds 		next = vma->vm_next;
6519d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
6529d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
6539d8cebd4SKOSAKI Motohiro 
6549d8cebd4SKOSAKI Motohiro 		pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
6559d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
6569d8cebd4SKOSAKI Motohiro 				  vma->anon_vma, vma->vm_file, pgoff, new_pol);
6579d8cebd4SKOSAKI Motohiro 		if (prev) {
6589d8cebd4SKOSAKI Motohiro 			vma = prev;
6599d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
6609d8cebd4SKOSAKI Motohiro 			continue;
6611da177e4SLinus Torvalds 		}
6629d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
6639d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
6649d8cebd4SKOSAKI Motohiro 			if (err)
6659d8cebd4SKOSAKI Motohiro 				goto out;
6669d8cebd4SKOSAKI Motohiro 		}
6679d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
6689d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
6699d8cebd4SKOSAKI Motohiro 			if (err)
6709d8cebd4SKOSAKI Motohiro 				goto out;
6719d8cebd4SKOSAKI Motohiro 		}
6729d8cebd4SKOSAKI Motohiro 		err = policy_vma(vma, new_pol);
6739d8cebd4SKOSAKI Motohiro 		if (err)
6749d8cebd4SKOSAKI Motohiro 			goto out;
6759d8cebd4SKOSAKI Motohiro 	}
6769d8cebd4SKOSAKI Motohiro 
6779d8cebd4SKOSAKI Motohiro  out:
6781da177e4SLinus Torvalds 	return err;
6791da177e4SLinus Torvalds }
6801da177e4SLinus Torvalds 
681c61afb18SPaul Jackson /*
682c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
683c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
684c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
685c61afb18SPaul Jackson  *
686c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
687c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
688c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
689c61afb18SPaul Jackson  *
690c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
691c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
692c61afb18SPaul Jackson  *
693c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
694c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
695c61afb18SPaul Jackson  * for use within this file.
696c61afb18SPaul Jackson  */
697c61afb18SPaul Jackson 
698c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
699c61afb18SPaul Jackson {
700c61afb18SPaul Jackson 	if (p->mempolicy)
701c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
702c61afb18SPaul Jackson 	else
703c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
704c61afb18SPaul Jackson }
705c61afb18SPaul Jackson 
706c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
707c61afb18SPaul Jackson {
708c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
709c61afb18SPaul Jackson }
710c61afb18SPaul Jackson 
7111da177e4SLinus Torvalds /* Set the process memory policy */
712028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
713028fec41SDavid Rientjes 			     nodemask_t *nodes)
7141da177e4SLinus Torvalds {
71558568d2aSMiao Xie 	struct mempolicy *new, *old;
716f4e53d91SLee Schermerhorn 	struct mm_struct *mm = current->mm;
7174bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
71858568d2aSMiao Xie 	int ret;
7191da177e4SLinus Torvalds 
7204bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7214bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
722f4e53d91SLee Schermerhorn 
7234bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
7244bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
7254bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
7264bfc4495SKAMEZAWA Hiroyuki 		goto out;
7274bfc4495SKAMEZAWA Hiroyuki 	}
728f4e53d91SLee Schermerhorn 	/*
729f4e53d91SLee Schermerhorn 	 * prevent changing our mempolicy while show_numa_maps()
730f4e53d91SLee Schermerhorn 	 * is using it.
731f4e53d91SLee Schermerhorn 	 * Note:  do_set_mempolicy() can be called at init time
732f4e53d91SLee Schermerhorn 	 * with no 'mm'.
733f4e53d91SLee Schermerhorn 	 */
734f4e53d91SLee Schermerhorn 	if (mm)
735f4e53d91SLee Schermerhorn 		down_write(&mm->mmap_sem);
73658568d2aSMiao Xie 	task_lock(current);
7374bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
73858568d2aSMiao Xie 	if (ret) {
73958568d2aSMiao Xie 		task_unlock(current);
74058568d2aSMiao Xie 		if (mm)
74158568d2aSMiao Xie 			up_write(&mm->mmap_sem);
74258568d2aSMiao Xie 		mpol_put(new);
7434bfc4495SKAMEZAWA Hiroyuki 		goto out;
74458568d2aSMiao Xie 	}
74558568d2aSMiao Xie 	old = current->mempolicy;
7461da177e4SLinus Torvalds 	current->mempolicy = new;
747c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
74845c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
749f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
750dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
75158568d2aSMiao Xie 	task_unlock(current);
752f4e53d91SLee Schermerhorn 	if (mm)
753f4e53d91SLee Schermerhorn 		up_write(&mm->mmap_sem);
754f4e53d91SLee Schermerhorn 
75558568d2aSMiao Xie 	mpol_put(old);
7564bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
7574bfc4495SKAMEZAWA Hiroyuki out:
7584bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
7594bfc4495SKAMEZAWA Hiroyuki 	return ret;
7601da177e4SLinus Torvalds }
7611da177e4SLinus Torvalds 
762bea904d5SLee Schermerhorn /*
763bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
76458568d2aSMiao Xie  *
76558568d2aSMiao Xie  * Called with task's alloc_lock held
766bea904d5SLee Schermerhorn  */
767bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
7681da177e4SLinus Torvalds {
769dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
770bea904d5SLee Schermerhorn 	if (p == &default_policy)
771bea904d5SLee Schermerhorn 		return;
772bea904d5SLee Schermerhorn 
77345c4745aSLee Schermerhorn 	switch (p->mode) {
77419770b32SMel Gorman 	case MPOL_BIND:
77519770b32SMel Gorman 		/* Fall through */
7761da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
777dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
7781da177e4SLinus Torvalds 		break;
7791da177e4SLinus Torvalds 	case MPOL_PREFERRED:
780fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
781dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
78253f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
7831da177e4SLinus Torvalds 		break;
7841da177e4SLinus Torvalds 	default:
7851da177e4SLinus Torvalds 		BUG();
7861da177e4SLinus Torvalds 	}
7871da177e4SLinus Torvalds }
7881da177e4SLinus Torvalds 
7891da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
7901da177e4SLinus Torvalds {
7911da177e4SLinus Torvalds 	struct page *p;
7921da177e4SLinus Torvalds 	int err;
7931da177e4SLinus Torvalds 
7941da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
7951da177e4SLinus Torvalds 	if (err >= 0) {
7961da177e4SLinus Torvalds 		err = page_to_nid(p);
7971da177e4SLinus Torvalds 		put_page(p);
7981da177e4SLinus Torvalds 	}
7991da177e4SLinus Torvalds 	return err;
8001da177e4SLinus Torvalds }
8011da177e4SLinus Torvalds 
8021da177e4SLinus Torvalds /* Retrieve NUMA policy */
803dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8041da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8051da177e4SLinus Torvalds {
8068bccd85fSChristoph Lameter 	int err;
8071da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8081da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8091da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8101da177e4SLinus Torvalds 
811754af6f5SLee Schermerhorn 	if (flags &
812754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8131da177e4SLinus Torvalds 		return -EINVAL;
814754af6f5SLee Schermerhorn 
815754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
816754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
817754af6f5SLee Schermerhorn 			return -EINVAL;
818754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
81958568d2aSMiao Xie 		task_lock(current);
820754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
82158568d2aSMiao Xie 		task_unlock(current);
822754af6f5SLee Schermerhorn 		return 0;
823754af6f5SLee Schermerhorn 	}
824754af6f5SLee Schermerhorn 
8251da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
826bea904d5SLee Schermerhorn 		/*
827bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
828bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
829bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
830bea904d5SLee Schermerhorn 		 */
8311da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8321da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
8331da177e4SLinus Torvalds 		if (!vma) {
8341da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
8351da177e4SLinus Torvalds 			return -EFAULT;
8361da177e4SLinus Torvalds 		}
8371da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
8381da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
8391da177e4SLinus Torvalds 		else
8401da177e4SLinus Torvalds 			pol = vma->vm_policy;
8411da177e4SLinus Torvalds 	} else if (addr)
8421da177e4SLinus Torvalds 		return -EINVAL;
8431da177e4SLinus Torvalds 
8441da177e4SLinus Torvalds 	if (!pol)
845bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
8461da177e4SLinus Torvalds 
8471da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
8481da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
8491da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
8501da177e4SLinus Torvalds 			if (err < 0)
8511da177e4SLinus Torvalds 				goto out;
8528bccd85fSChristoph Lameter 			*policy = err;
8531da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
85445c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
8558bccd85fSChristoph Lameter 			*policy = current->il_next;
8561da177e4SLinus Torvalds 		} else {
8571da177e4SLinus Torvalds 			err = -EINVAL;
8581da177e4SLinus Torvalds 			goto out;
8591da177e4SLinus Torvalds 		}
860bea904d5SLee Schermerhorn 	} else {
861bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
862bea904d5SLee Schermerhorn 						pol->mode;
863d79df630SDavid Rientjes 		/*
864d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
865d79df630SDavid Rientjes 		 * the policy to userspace.
866d79df630SDavid Rientjes 		 */
867d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
868bea904d5SLee Schermerhorn 	}
8691da177e4SLinus Torvalds 
8701da177e4SLinus Torvalds 	if (vma) {
8711da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
8721da177e4SLinus Torvalds 		vma = NULL;
8731da177e4SLinus Torvalds 	}
8741da177e4SLinus Torvalds 
8751da177e4SLinus Torvalds 	err = 0;
87658568d2aSMiao Xie 	if (nmask) {
877c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
878c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
879c6b6ef8bSLee Schermerhorn 		} else {
88058568d2aSMiao Xie 			task_lock(current);
881bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
88258568d2aSMiao Xie 			task_unlock(current);
88358568d2aSMiao Xie 		}
884c6b6ef8bSLee Schermerhorn 	}
8851da177e4SLinus Torvalds 
8861da177e4SLinus Torvalds  out:
88752cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
8881da177e4SLinus Torvalds 	if (vma)
8891da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
8901da177e4SLinus Torvalds 	return err;
8911da177e4SLinus Torvalds }
8921da177e4SLinus Torvalds 
893b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
8948bccd85fSChristoph Lameter /*
8956ce3c4c0SChristoph Lameter  * page migration
8966ce3c4c0SChristoph Lameter  */
897fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
898fc301289SChristoph Lameter 				unsigned long flags)
8996ce3c4c0SChristoph Lameter {
9006ce3c4c0SChristoph Lameter 	/*
901fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9026ce3c4c0SChristoph Lameter 	 */
90362695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
90462695a84SNick Piggin 		if (!isolate_lru_page(page)) {
90562695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
9066d9c285aSKOSAKI Motohiro 			inc_zone_page_state(page, NR_ISOLATED_ANON +
9076d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
90862695a84SNick Piggin 		}
90962695a84SNick Piggin 	}
9106ce3c4c0SChristoph Lameter }
9116ce3c4c0SChristoph Lameter 
912742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
91395a402c3SChristoph Lameter {
9146484eb3eSMel Gorman 	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
91595a402c3SChristoph Lameter }
91695a402c3SChristoph Lameter 
9176ce3c4c0SChristoph Lameter /*
9187e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
9197e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
9207e2ab150SChristoph Lameter  */
921dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
922dbcb0f19SAdrian Bunk 			   int flags)
9237e2ab150SChristoph Lameter {
9247e2ab150SChristoph Lameter 	nodemask_t nmask;
9257e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
9267e2ab150SChristoph Lameter 	int err = 0;
9277e2ab150SChristoph Lameter 
9287e2ab150SChristoph Lameter 	nodes_clear(nmask);
9297e2ab150SChristoph Lameter 	node_set(source, nmask);
9307e2ab150SChristoph Lameter 
9317e2ab150SChristoph Lameter 	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
9327e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
9337e2ab150SChristoph Lameter 
9347e2ab150SChristoph Lameter 	if (!list_empty(&pagelist))
93562b61f61SHugh Dickins 		err = migrate_pages(&pagelist, new_node_page, dest, 0);
93695a402c3SChristoph Lameter 
9377e2ab150SChristoph Lameter 	return err;
9387e2ab150SChristoph Lameter }
9397e2ab150SChristoph Lameter 
9407e2ab150SChristoph Lameter /*
9417e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
9427e2ab150SChristoph Lameter  * layout as much as possible.
94339743889SChristoph Lameter  *
94439743889SChristoph Lameter  * Returns the number of page that could not be moved.
94539743889SChristoph Lameter  */
94639743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
94739743889SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
94839743889SChristoph Lameter {
9497e2ab150SChristoph Lameter 	int busy = 0;
9500aedadf9SChristoph Lameter 	int err;
9517e2ab150SChristoph Lameter 	nodemask_t tmp;
95239743889SChristoph Lameter 
9530aedadf9SChristoph Lameter 	err = migrate_prep();
9540aedadf9SChristoph Lameter 	if (err)
9550aedadf9SChristoph Lameter 		return err;
9560aedadf9SChristoph Lameter 
95739743889SChristoph Lameter 	down_read(&mm->mmap_sem);
958d4984711SChristoph Lameter 
9597b2259b3SChristoph Lameter 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
9607b2259b3SChristoph Lameter 	if (err)
9617b2259b3SChristoph Lameter 		goto out;
9627b2259b3SChristoph Lameter 
9637e2ab150SChristoph Lameter 	/*
9647e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
9657e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
9667e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
9677e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
9687e2ab150SChristoph Lameter 	 *
9697e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
9707e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
9717e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
9727e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
9737e2ab150SChristoph Lameter 	 *
9747e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
9757e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
9767e2ab150SChristoph Lameter 	 * (nothing left to migrate).
9777e2ab150SChristoph Lameter 	 *
9787e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
9797e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
9807e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
9817e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
9827e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
9837e2ab150SChristoph Lameter 	 *
9847e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
9857e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
9867e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
9877e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
9887e2ab150SChristoph Lameter 	 * Otherwise when we finish scannng from_tmp, we at least have the
9897e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
9907e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
9917e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
9927e2ab150SChristoph Lameter 	 */
9937e2ab150SChristoph Lameter 
9947e2ab150SChristoph Lameter 	tmp = *from_nodes;
9957e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
9967e2ab150SChristoph Lameter 		int s,d;
9977e2ab150SChristoph Lameter 		int source = -1;
9987e2ab150SChristoph Lameter 		int dest = 0;
9997e2ab150SChristoph Lameter 
10007e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10017e2ab150SChristoph Lameter 			d = node_remap(s, *from_nodes, *to_nodes);
10027e2ab150SChristoph Lameter 			if (s == d)
10037e2ab150SChristoph Lameter 				continue;
10047e2ab150SChristoph Lameter 
10057e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
10067e2ab150SChristoph Lameter 			dest = d;
10077e2ab150SChristoph Lameter 
10087e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
10097e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
10107e2ab150SChristoph Lameter 				break;
10117e2ab150SChristoph Lameter 		}
10127e2ab150SChristoph Lameter 		if (source == -1)
10137e2ab150SChristoph Lameter 			break;
10147e2ab150SChristoph Lameter 
10157e2ab150SChristoph Lameter 		node_clear(source, tmp);
10167e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
10177e2ab150SChristoph Lameter 		if (err > 0)
10187e2ab150SChristoph Lameter 			busy += err;
10197e2ab150SChristoph Lameter 		if (err < 0)
10207e2ab150SChristoph Lameter 			break;
102139743889SChristoph Lameter 	}
10227b2259b3SChristoph Lameter out:
102339743889SChristoph Lameter 	up_read(&mm->mmap_sem);
10247e2ab150SChristoph Lameter 	if (err < 0)
10257e2ab150SChristoph Lameter 		return err;
10267e2ab150SChristoph Lameter 	return busy;
1027b20a3503SChristoph Lameter 
102839743889SChristoph Lameter }
102939743889SChristoph Lameter 
10303ad33b24SLee Schermerhorn /*
10313ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
10323ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
10333ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
10343ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
10353ad33b24SLee Schermerhorn  * is in virtual address order.
10363ad33b24SLee Schermerhorn  */
1037742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
103895a402c3SChristoph Lameter {
103995a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
10403ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
104195a402c3SChristoph Lameter 
10423ad33b24SLee Schermerhorn 	while (vma) {
10433ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
10443ad33b24SLee Schermerhorn 		if (address != -EFAULT)
10453ad33b24SLee Schermerhorn 			break;
10463ad33b24SLee Schermerhorn 		vma = vma->vm_next;
10473ad33b24SLee Schermerhorn 	}
10483ad33b24SLee Schermerhorn 
10493ad33b24SLee Schermerhorn 	/*
10503ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
10513ad33b24SLee Schermerhorn 	 */
10523ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
105395a402c3SChristoph Lameter }
1054b20a3503SChristoph Lameter #else
1055b20a3503SChristoph Lameter 
1056b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1057b20a3503SChristoph Lameter 				unsigned long flags)
1058b20a3503SChristoph Lameter {
1059b20a3503SChristoph Lameter }
1060b20a3503SChristoph Lameter 
1061b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
1062b20a3503SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1063b20a3503SChristoph Lameter {
1064b20a3503SChristoph Lameter 	return -ENOSYS;
1065b20a3503SChristoph Lameter }
106695a402c3SChristoph Lameter 
106769939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
106895a402c3SChristoph Lameter {
106995a402c3SChristoph Lameter 	return NULL;
107095a402c3SChristoph Lameter }
1071b20a3503SChristoph Lameter #endif
1072b20a3503SChristoph Lameter 
1073dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1074028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1075028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
10766ce3c4c0SChristoph Lameter {
10776ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
10786ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
10796ce3c4c0SChristoph Lameter 	struct mempolicy *new;
10806ce3c4c0SChristoph Lameter 	unsigned long end;
10816ce3c4c0SChristoph Lameter 	int err;
10826ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
10836ce3c4c0SChristoph Lameter 
1084a3b51e01SDavid Rientjes 	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
10856ce3c4c0SChristoph Lameter 				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
10866ce3c4c0SChristoph Lameter 		return -EINVAL;
108774c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
10886ce3c4c0SChristoph Lameter 		return -EPERM;
10896ce3c4c0SChristoph Lameter 
10906ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
10916ce3c4c0SChristoph Lameter 		return -EINVAL;
10926ce3c4c0SChristoph Lameter 
10936ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
10946ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
10956ce3c4c0SChristoph Lameter 
10966ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
10976ce3c4c0SChristoph Lameter 	end = start + len;
10986ce3c4c0SChristoph Lameter 
10996ce3c4c0SChristoph Lameter 	if (end < start)
11006ce3c4c0SChristoph Lameter 		return -EINVAL;
11016ce3c4c0SChristoph Lameter 	if (end == start)
11026ce3c4c0SChristoph Lameter 		return 0;
11036ce3c4c0SChristoph Lameter 
1104028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
11056ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
11066ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
11076ce3c4c0SChristoph Lameter 
11086ce3c4c0SChristoph Lameter 	/*
11096ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
11106ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
11116ce3c4c0SChristoph Lameter 	 */
11126ce3c4c0SChristoph Lameter 	if (!new)
11136ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
11146ce3c4c0SChristoph Lameter 
1115028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1116028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
1117028fec41SDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : -1);
11186ce3c4c0SChristoph Lameter 
11190aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
11200aedadf9SChristoph Lameter 
11210aedadf9SChristoph Lameter 		err = migrate_prep();
11220aedadf9SChristoph Lameter 		if (err)
1123b05ca738SKOSAKI Motohiro 			goto mpol_out;
11240aedadf9SChristoph Lameter 	}
11254bfc4495SKAMEZAWA Hiroyuki 	{
11264bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
11274bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
11286ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
112958568d2aSMiao Xie 			task_lock(current);
11304bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
113158568d2aSMiao Xie 			task_unlock(current);
11324bfc4495SKAMEZAWA Hiroyuki 			if (err)
113358568d2aSMiao Xie 				up_write(&mm->mmap_sem);
11344bfc4495SKAMEZAWA Hiroyuki 		} else
11354bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
11364bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
11374bfc4495SKAMEZAWA Hiroyuki 	}
1138b05ca738SKOSAKI Motohiro 	if (err)
1139b05ca738SKOSAKI Motohiro 		goto mpol_out;
1140b05ca738SKOSAKI Motohiro 
11416ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
11426ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
11436ce3c4c0SChristoph Lameter 
11446ce3c4c0SChristoph Lameter 	err = PTR_ERR(vma);
11456ce3c4c0SChristoph Lameter 	if (!IS_ERR(vma)) {
11466ce3c4c0SChristoph Lameter 		int nr_failed = 0;
11476ce3c4c0SChristoph Lameter 
11489d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
11497e2ab150SChristoph Lameter 
11506ce3c4c0SChristoph Lameter 		if (!list_empty(&pagelist))
115195a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
115262b61f61SHugh Dickins 						(unsigned long)vma, 0);
11536ce3c4c0SChristoph Lameter 
11546ce3c4c0SChristoph Lameter 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
11556ce3c4c0SChristoph Lameter 			err = -EIO;
1156ab8a3e14SKOSAKI Motohiro 	} else
1157ab8a3e14SKOSAKI Motohiro 		putback_lru_pages(&pagelist);
1158b20a3503SChristoph Lameter 
11596ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1160b05ca738SKOSAKI Motohiro  mpol_out:
1161f0be3d32SLee Schermerhorn 	mpol_put(new);
11626ce3c4c0SChristoph Lameter 	return err;
11636ce3c4c0SChristoph Lameter }
11646ce3c4c0SChristoph Lameter 
116539743889SChristoph Lameter /*
11668bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
11678bccd85fSChristoph Lameter  */
11688bccd85fSChristoph Lameter 
11698bccd85fSChristoph Lameter /* Copy a node mask from user space. */
117039743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
11718bccd85fSChristoph Lameter 		     unsigned long maxnode)
11728bccd85fSChristoph Lameter {
11738bccd85fSChristoph Lameter 	unsigned long k;
11748bccd85fSChristoph Lameter 	unsigned long nlongs;
11758bccd85fSChristoph Lameter 	unsigned long endmask;
11768bccd85fSChristoph Lameter 
11778bccd85fSChristoph Lameter 	--maxnode;
11788bccd85fSChristoph Lameter 	nodes_clear(*nodes);
11798bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
11808bccd85fSChristoph Lameter 		return 0;
1181a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1182636f13c1SChris Wright 		return -EINVAL;
11838bccd85fSChristoph Lameter 
11848bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
11858bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
11868bccd85fSChristoph Lameter 		endmask = ~0UL;
11878bccd85fSChristoph Lameter 	else
11888bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
11898bccd85fSChristoph Lameter 
11908bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
11918bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
11928bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
11938bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
11948bccd85fSChristoph Lameter 			return -EINVAL;
11958bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
11968bccd85fSChristoph Lameter 			unsigned long t;
11978bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
11988bccd85fSChristoph Lameter 				return -EFAULT;
11998bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
12008bccd85fSChristoph Lameter 				if (t & endmask)
12018bccd85fSChristoph Lameter 					return -EINVAL;
12028bccd85fSChristoph Lameter 			} else if (t)
12038bccd85fSChristoph Lameter 				return -EINVAL;
12048bccd85fSChristoph Lameter 		}
12058bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
12068bccd85fSChristoph Lameter 		endmask = ~0UL;
12078bccd85fSChristoph Lameter 	}
12088bccd85fSChristoph Lameter 
12098bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
12108bccd85fSChristoph Lameter 		return -EFAULT;
12118bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
12128bccd85fSChristoph Lameter 	return 0;
12138bccd85fSChristoph Lameter }
12148bccd85fSChristoph Lameter 
12158bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
12168bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
12178bccd85fSChristoph Lameter 			      nodemask_t *nodes)
12188bccd85fSChristoph Lameter {
12198bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
12208bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
12218bccd85fSChristoph Lameter 
12228bccd85fSChristoph Lameter 	if (copy > nbytes) {
12238bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
12248bccd85fSChristoph Lameter 			return -EINVAL;
12258bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
12268bccd85fSChristoph Lameter 			return -EFAULT;
12278bccd85fSChristoph Lameter 		copy = nbytes;
12288bccd85fSChristoph Lameter 	}
12298bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
12308bccd85fSChristoph Lameter }
12318bccd85fSChristoph Lameter 
1232938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1233938bb9f5SHeiko Carstens 		unsigned long, mode, unsigned long __user *, nmask,
1234938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
12358bccd85fSChristoph Lameter {
12368bccd85fSChristoph Lameter 	nodemask_t nodes;
12378bccd85fSChristoph Lameter 	int err;
1238028fec41SDavid Rientjes 	unsigned short mode_flags;
12398bccd85fSChristoph Lameter 
1240028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1241028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1242a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1243a3b51e01SDavid Rientjes 		return -EINVAL;
12444c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
12454c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
12464c50bc01SDavid Rientjes 		return -EINVAL;
12478bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
12488bccd85fSChristoph Lameter 	if (err)
12498bccd85fSChristoph Lameter 		return err;
1250028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
12518bccd85fSChristoph Lameter }
12528bccd85fSChristoph Lameter 
12538bccd85fSChristoph Lameter /* Set the process memory policy */
1254938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1255938bb9f5SHeiko Carstens 		unsigned long, maxnode)
12568bccd85fSChristoph Lameter {
12578bccd85fSChristoph Lameter 	int err;
12588bccd85fSChristoph Lameter 	nodemask_t nodes;
1259028fec41SDavid Rientjes 	unsigned short flags;
12608bccd85fSChristoph Lameter 
1261028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1262028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1263028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
12648bccd85fSChristoph Lameter 		return -EINVAL;
12654c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
12664c50bc01SDavid Rientjes 		return -EINVAL;
12678bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
12688bccd85fSChristoph Lameter 	if (err)
12698bccd85fSChristoph Lameter 		return err;
1270028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
12718bccd85fSChristoph Lameter }
12728bccd85fSChristoph Lameter 
1273938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1274938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1275938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
127639743889SChristoph Lameter {
1277c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
127839743889SChristoph Lameter 	struct mm_struct *mm;
127939743889SChristoph Lameter 	struct task_struct *task;
128039743889SChristoph Lameter 	nodemask_t old;
128139743889SChristoph Lameter 	nodemask_t new;
128239743889SChristoph Lameter 	nodemask_t task_nodes;
128339743889SChristoph Lameter 	int err;
128439743889SChristoph Lameter 
128539743889SChristoph Lameter 	err = get_nodes(&old, old_nodes, maxnode);
128639743889SChristoph Lameter 	if (err)
128739743889SChristoph Lameter 		return err;
128839743889SChristoph Lameter 
128939743889SChristoph Lameter 	err = get_nodes(&new, new_nodes, maxnode);
129039743889SChristoph Lameter 	if (err)
129139743889SChristoph Lameter 		return err;
129239743889SChristoph Lameter 
129339743889SChristoph Lameter 	/* Find the mm_struct */
129439743889SChristoph Lameter 	read_lock(&tasklist_lock);
1295228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
129639743889SChristoph Lameter 	if (!task) {
129739743889SChristoph Lameter 		read_unlock(&tasklist_lock);
129839743889SChristoph Lameter 		return -ESRCH;
129939743889SChristoph Lameter 	}
130039743889SChristoph Lameter 	mm = get_task_mm(task);
130139743889SChristoph Lameter 	read_unlock(&tasklist_lock);
130239743889SChristoph Lameter 
130339743889SChristoph Lameter 	if (!mm)
130439743889SChristoph Lameter 		return -EINVAL;
130539743889SChristoph Lameter 
130639743889SChristoph Lameter 	/*
130739743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
130839743889SChristoph Lameter 	 * process. The right exists if the process has administrative
13097f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
131039743889SChristoph Lameter 	 * userid as the target process.
131139743889SChristoph Lameter 	 */
1312c69e8d9cSDavid Howells 	rcu_read_lock();
1313c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1314b6dff3ecSDavid Howells 	if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1315b6dff3ecSDavid Howells 	    cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
131674c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1317c69e8d9cSDavid Howells 		rcu_read_unlock();
131839743889SChristoph Lameter 		err = -EPERM;
131939743889SChristoph Lameter 		goto out;
132039743889SChristoph Lameter 	}
1321c69e8d9cSDavid Howells 	rcu_read_unlock();
132239743889SChristoph Lameter 
132339743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
132439743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
132574c00241SChristoph Lameter 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
132639743889SChristoph Lameter 		err = -EPERM;
132739743889SChristoph Lameter 		goto out;
132839743889SChristoph Lameter 	}
132939743889SChristoph Lameter 
133037b07e41SLee Schermerhorn 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
13313b42d28bSChristoph Lameter 		err = -EINVAL;
13323b42d28bSChristoph Lameter 		goto out;
13333b42d28bSChristoph Lameter 	}
13343b42d28bSChristoph Lameter 
133586c3a764SDavid Quigley 	err = security_task_movememory(task);
133686c3a764SDavid Quigley 	if (err)
133786c3a764SDavid Quigley 		goto out;
133886c3a764SDavid Quigley 
1339511030bcSChristoph Lameter 	err = do_migrate_pages(mm, &old, &new,
134074c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
134139743889SChristoph Lameter out:
134239743889SChristoph Lameter 	mmput(mm);
134339743889SChristoph Lameter 	return err;
134439743889SChristoph Lameter }
134539743889SChristoph Lameter 
134639743889SChristoph Lameter 
13478bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1348938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1349938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1350938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
13518bccd85fSChristoph Lameter {
1352dbcb0f19SAdrian Bunk 	int err;
1353dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
13548bccd85fSChristoph Lameter 	nodemask_t nodes;
13558bccd85fSChristoph Lameter 
13568bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
13578bccd85fSChristoph Lameter 		return -EINVAL;
13588bccd85fSChristoph Lameter 
13598bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
13608bccd85fSChristoph Lameter 
13618bccd85fSChristoph Lameter 	if (err)
13628bccd85fSChristoph Lameter 		return err;
13638bccd85fSChristoph Lameter 
13648bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
13658bccd85fSChristoph Lameter 		return -EFAULT;
13668bccd85fSChristoph Lameter 
13678bccd85fSChristoph Lameter 	if (nmask)
13688bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
13698bccd85fSChristoph Lameter 
13708bccd85fSChristoph Lameter 	return err;
13718bccd85fSChristoph Lameter }
13728bccd85fSChristoph Lameter 
13731da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
13741da177e4SLinus Torvalds 
13751da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
13761da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
13771da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
13781da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
13791da177e4SLinus Torvalds {
13801da177e4SLinus Torvalds 	long err;
13811da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
13821da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
13831da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
13841da177e4SLinus Torvalds 
13851da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
13861da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
13871da177e4SLinus Torvalds 
13881da177e4SLinus Torvalds 	if (nmask)
13891da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
13901da177e4SLinus Torvalds 
13911da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
13921da177e4SLinus Torvalds 
13931da177e4SLinus Torvalds 	if (!err && nmask) {
13941da177e4SLinus Torvalds 		err = copy_from_user(bm, nm, alloc_size);
13951da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
13961da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
13971da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
13981da177e4SLinus Torvalds 	}
13991da177e4SLinus Torvalds 
14001da177e4SLinus Torvalds 	return err;
14011da177e4SLinus Torvalds }
14021da177e4SLinus Torvalds 
14031da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
14041da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
14051da177e4SLinus Torvalds {
14061da177e4SLinus Torvalds 	long err = 0;
14071da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
14081da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
14091da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
14101da177e4SLinus Torvalds 
14111da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
14121da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
14131da177e4SLinus Torvalds 
14141da177e4SLinus Torvalds 	if (nmask) {
14151da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
14161da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
14171da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
14181da177e4SLinus Torvalds 	}
14191da177e4SLinus Torvalds 
14201da177e4SLinus Torvalds 	if (err)
14211da177e4SLinus Torvalds 		return -EFAULT;
14221da177e4SLinus Torvalds 
14231da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
14241da177e4SLinus Torvalds }
14251da177e4SLinus Torvalds 
14261da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
14271da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
14281da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
14291da177e4SLinus Torvalds {
14301da177e4SLinus Torvalds 	long err = 0;
14311da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
14321da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1433dfcd3c0dSAndi Kleen 	nodemask_t bm;
14341da177e4SLinus Torvalds 
14351da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
14361da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
14371da177e4SLinus Torvalds 
14381da177e4SLinus Torvalds 	if (nmask) {
1439dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
14401da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1441dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
14421da177e4SLinus Torvalds 	}
14431da177e4SLinus Torvalds 
14441da177e4SLinus Torvalds 	if (err)
14451da177e4SLinus Torvalds 		return -EFAULT;
14461da177e4SLinus Torvalds 
14471da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
14481da177e4SLinus Torvalds }
14491da177e4SLinus Torvalds 
14501da177e4SLinus Torvalds #endif
14511da177e4SLinus Torvalds 
1452480eccf9SLee Schermerhorn /*
1453480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1454480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1455480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1456480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1457480eccf9SLee Schermerhorn  *
1458480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1459480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
146052cd3b07SLee Schermerhorn  * Current or other task's task mempolicy and non-shared vma policies
146152cd3b07SLee Schermerhorn  * are protected by the task's mmap_sem, which must be held for read by
146252cd3b07SLee Schermerhorn  * the caller.
146352cd3b07SLee Schermerhorn  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
146452cd3b07SLee Schermerhorn  * count--added by the get_policy() vm_op, as appropriate--to protect against
146552cd3b07SLee Schermerhorn  * freeing by another task.  It is the caller's responsibility to free the
146652cd3b07SLee Schermerhorn  * extra reference for shared policies.
1467480eccf9SLee Schermerhorn  */
146848fce342SChristoph Lameter static struct mempolicy *get_vma_policy(struct task_struct *task,
146948fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
14701da177e4SLinus Torvalds {
14716e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
14721da177e4SLinus Torvalds 
14731da177e4SLinus Torvalds 	if (vma) {
1474480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1475ae4d8c16SLee Schermerhorn 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1476ae4d8c16SLee Schermerhorn 									addr);
1477ae4d8c16SLee Schermerhorn 			if (vpol)
1478ae4d8c16SLee Schermerhorn 				pol = vpol;
1479bea904d5SLee Schermerhorn 		} else if (vma->vm_policy)
14801da177e4SLinus Torvalds 			pol = vma->vm_policy;
14811da177e4SLinus Torvalds 	}
14821da177e4SLinus Torvalds 	if (!pol)
14831da177e4SLinus Torvalds 		pol = &default_policy;
14841da177e4SLinus Torvalds 	return pol;
14851da177e4SLinus Torvalds }
14861da177e4SLinus Torvalds 
148752cd3b07SLee Schermerhorn /*
148852cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
148952cd3b07SLee Schermerhorn  * page allocation
149052cd3b07SLee Schermerhorn  */
149152cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
149219770b32SMel Gorman {
149319770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
149445c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
149519770b32SMel Gorman 			gfp_zone(gfp) >= policy_zone &&
149619770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
149719770b32SMel Gorman 		return &policy->v.nodes;
149819770b32SMel Gorman 
149919770b32SMel Gorman 	return NULL;
150019770b32SMel Gorman }
150119770b32SMel Gorman 
150252cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
150352cd3b07SLee Schermerhorn static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
15041da177e4SLinus Torvalds {
1505fc36b8d3SLee Schermerhorn 	int nd = numa_node_id();
15061da177e4SLinus Torvalds 
150745c4745aSLee Schermerhorn 	switch (policy->mode) {
15081da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1509fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
15101da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
15111da177e4SLinus Torvalds 		break;
15121da177e4SLinus Torvalds 	case MPOL_BIND:
151319770b32SMel Gorman 		/*
151452cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
151552cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
15166eb27e1fSBob Liu 		 * current node isn't part of the mask, we use the zonelist for
151752cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
151819770b32SMel Gorman 		 */
151919770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
152019770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
152119770b32SMel Gorman 			nd = first_node(policy->v.nodes);
152219770b32SMel Gorman 		break;
15231da177e4SLinus Torvalds 	default:
15241da177e4SLinus Torvalds 		BUG();
15251da177e4SLinus Torvalds 	}
15260e88460dSMel Gorman 	return node_zonelist(nd, gfp);
15271da177e4SLinus Torvalds }
15281da177e4SLinus Torvalds 
15291da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
15301da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
15311da177e4SLinus Torvalds {
15321da177e4SLinus Torvalds 	unsigned nid, next;
15331da177e4SLinus Torvalds 	struct task_struct *me = current;
15341da177e4SLinus Torvalds 
15351da177e4SLinus Torvalds 	nid = me->il_next;
1536dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
15371da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1538dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1539f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
15401da177e4SLinus Torvalds 		me->il_next = next;
15411da177e4SLinus Torvalds 	return nid;
15421da177e4SLinus Torvalds }
15431da177e4SLinus Torvalds 
1544dc85da15SChristoph Lameter /*
1545dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1546dc85da15SChristoph Lameter  * next slab entry.
154752cd3b07SLee Schermerhorn  * @policy must be protected by freeing by the caller.  If @policy is
154852cd3b07SLee Schermerhorn  * the current task's mempolicy, this protection is implicit, as only the
154952cd3b07SLee Schermerhorn  * task can change it's policy.  The system default policy requires no
155052cd3b07SLee Schermerhorn  * such protection.
1551dc85da15SChristoph Lameter  */
1552dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy)
1553dc85da15SChristoph Lameter {
1554fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
1555bea904d5SLee Schermerhorn 		return numa_node_id();
1556765c4507SChristoph Lameter 
1557bea904d5SLee Schermerhorn 	switch (policy->mode) {
1558bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1559fc36b8d3SLee Schermerhorn 		/*
1560fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1561fc36b8d3SLee Schermerhorn 		 */
1562bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1563bea904d5SLee Schermerhorn 
1564dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1565dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1566dc85da15SChristoph Lameter 
1567dd1a239fSMel Gorman 	case MPOL_BIND: {
1568dc85da15SChristoph Lameter 		/*
1569dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1570dc85da15SChristoph Lameter 		 * first node.
1571dc85da15SChristoph Lameter 		 */
157219770b32SMel Gorman 		struct zonelist *zonelist;
157319770b32SMel Gorman 		struct zone *zone;
157419770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
157519770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
157619770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
157719770b32SMel Gorman 							&policy->v.nodes,
157819770b32SMel Gorman 							&zone);
157919770b32SMel Gorman 		return zone->node;
1580dd1a239fSMel Gorman 	}
1581dc85da15SChristoph Lameter 
1582dc85da15SChristoph Lameter 	default:
1583bea904d5SLee Schermerhorn 		BUG();
1584dc85da15SChristoph Lameter 	}
1585dc85da15SChristoph Lameter }
1586dc85da15SChristoph Lameter 
15871da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
15881da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
15891da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
15901da177e4SLinus Torvalds {
1591dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1592f5b087b5SDavid Rientjes 	unsigned target;
15931da177e4SLinus Torvalds 	int c;
15941da177e4SLinus Torvalds 	int nid = -1;
15951da177e4SLinus Torvalds 
1596f5b087b5SDavid Rientjes 	if (!nnodes)
1597f5b087b5SDavid Rientjes 		return numa_node_id();
1598f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
15991da177e4SLinus Torvalds 	c = 0;
16001da177e4SLinus Torvalds 	do {
1601dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
16021da177e4SLinus Torvalds 		c++;
16031da177e4SLinus Torvalds 	} while (c <= target);
16041da177e4SLinus Torvalds 	return nid;
16051da177e4SLinus Torvalds }
16061da177e4SLinus Torvalds 
16075da7ca86SChristoph Lameter /* Determine a node number for interleave */
16085da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
16095da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
16105da7ca86SChristoph Lameter {
16115da7ca86SChristoph Lameter 	if (vma) {
16125da7ca86SChristoph Lameter 		unsigned long off;
16135da7ca86SChristoph Lameter 
16143b98b087SNishanth Aravamudan 		/*
16153b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
16163b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
16173b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
16183b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
16193b98b087SNishanth Aravamudan 		 * a useful offset.
16203b98b087SNishanth Aravamudan 		 */
16213b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
16223b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
16235da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
16245da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
16255da7ca86SChristoph Lameter 	} else
16265da7ca86SChristoph Lameter 		return interleave_nodes(pol);
16275da7ca86SChristoph Lameter }
16285da7ca86SChristoph Lameter 
162900ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1630480eccf9SLee Schermerhorn /*
1631480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1632480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1633480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1634480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
163519770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
163619770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1637480eccf9SLee Schermerhorn  *
163852cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
163952cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
164052cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
164152cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1642480eccf9SLee Schermerhorn  */
1643396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
164419770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
164519770b32SMel Gorman 				nodemask_t **nodemask)
16465da7ca86SChristoph Lameter {
1647480eccf9SLee Schermerhorn 	struct zonelist *zl;
16485da7ca86SChristoph Lameter 
164952cd3b07SLee Schermerhorn 	*mpol = get_vma_policy(current, vma, addr);
165019770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
16515da7ca86SChristoph Lameter 
165252cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
165352cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1654a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
165552cd3b07SLee Schermerhorn 	} else {
165652cd3b07SLee Schermerhorn 		zl = policy_zonelist(gfp_flags, *mpol);
165752cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
165852cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1659480eccf9SLee Schermerhorn 	}
1660480eccf9SLee Schermerhorn 	return zl;
16615da7ca86SChristoph Lameter }
166206808b08SLee Schermerhorn 
166306808b08SLee Schermerhorn /*
166406808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
166506808b08SLee Schermerhorn  *
166606808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
166706808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
166806808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
166906808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
167006808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
167106808b08SLee Schermerhorn  * of non-default mempolicy.
167206808b08SLee Schermerhorn  *
167306808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
167406808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
167506808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
167606808b08SLee Schermerhorn  *
167706808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
167806808b08SLee Schermerhorn  */
167906808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
168006808b08SLee Schermerhorn {
168106808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
168206808b08SLee Schermerhorn 	int nid;
168306808b08SLee Schermerhorn 
168406808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
168506808b08SLee Schermerhorn 		return false;
168606808b08SLee Schermerhorn 
168706808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
168806808b08SLee Schermerhorn 	switch (mempolicy->mode) {
168906808b08SLee Schermerhorn 	case MPOL_PREFERRED:
169006808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
169106808b08SLee Schermerhorn 			nid = numa_node_id();
169206808b08SLee Schermerhorn 		else
169306808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
169406808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
169506808b08SLee Schermerhorn 		break;
169606808b08SLee Schermerhorn 
169706808b08SLee Schermerhorn 	case MPOL_BIND:
169806808b08SLee Schermerhorn 		/* Fall through */
169906808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
170006808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
170106808b08SLee Schermerhorn 		break;
170206808b08SLee Schermerhorn 
170306808b08SLee Schermerhorn 	default:
170406808b08SLee Schermerhorn 		BUG();
170506808b08SLee Schermerhorn 	}
170606808b08SLee Schermerhorn 
170706808b08SLee Schermerhorn 	return true;
170806808b08SLee Schermerhorn }
170900ac59adSChen, Kenneth W #endif
17105da7ca86SChristoph Lameter 
17111da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
17121da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1713662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1714662f3a0bSAndi Kleen 					unsigned nid)
17151da177e4SLinus Torvalds {
17161da177e4SLinus Torvalds 	struct zonelist *zl;
17171da177e4SLinus Torvalds 	struct page *page;
17181da177e4SLinus Torvalds 
17190e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
17201da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1721dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1722ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
17231da177e4SLinus Torvalds 	return page;
17241da177e4SLinus Torvalds }
17251da177e4SLinus Torvalds 
17261da177e4SLinus Torvalds /**
17271da177e4SLinus Torvalds  * 	alloc_page_vma	- Allocate a page for a VMA.
17281da177e4SLinus Torvalds  *
17291da177e4SLinus Torvalds  * 	@gfp:
17301da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
17311da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
17321da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
17331da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
17341da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
17351da177e4SLinus Torvalds  *
17361da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
17371da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
17381da177e4SLinus Torvalds  *
17391da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
17401da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
17411da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
17421da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
17431da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
17441da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
17451da177e4SLinus Torvalds  *
17461da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
17471da177e4SLinus Torvalds  */
17481da177e4SLinus Torvalds struct page *
1749dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
17501da177e4SLinus Torvalds {
17516e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1752480eccf9SLee Schermerhorn 	struct zonelist *zl;
17531da177e4SLinus Torvalds 
175445c4745aSLee Schermerhorn 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
17551da177e4SLinus Torvalds 		unsigned nid;
17565da7ca86SChristoph Lameter 
17575da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
175852cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
17591da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, 0, nid);
17601da177e4SLinus Torvalds 	}
176152cd3b07SLee Schermerhorn 	zl = policy_zonelist(gfp, pol);
176252cd3b07SLee Schermerhorn 	if (unlikely(mpol_needs_cond_ref(pol))) {
1763480eccf9SLee Schermerhorn 		/*
176452cd3b07SLee Schermerhorn 		 * slow path: ref counted shared policy
1765480eccf9SLee Schermerhorn 		 */
176619770b32SMel Gorman 		struct page *page =  __alloc_pages_nodemask(gfp, 0,
176752cd3b07SLee Schermerhorn 						zl, policy_nodemask(gfp, pol));
1768f0be3d32SLee Schermerhorn 		__mpol_put(pol);
1769480eccf9SLee Schermerhorn 		return page;
1770480eccf9SLee Schermerhorn 	}
1771480eccf9SLee Schermerhorn 	/*
1772480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
1773480eccf9SLee Schermerhorn 	 */
177452cd3b07SLee Schermerhorn 	return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
17751da177e4SLinus Torvalds }
17761da177e4SLinus Torvalds 
17771da177e4SLinus Torvalds /**
17781da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
17791da177e4SLinus Torvalds  *
17801da177e4SLinus Torvalds  *	@gfp:
17811da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
17821da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
17831da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
17841da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
17851da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
17861da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
17871da177e4SLinus Torvalds  *
17881da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
17891da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
17901da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
17911da177e4SLinus Torvalds  *
1792cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
17931da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
17941da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
17951da177e4SLinus Torvalds  */
1796dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
17971da177e4SLinus Torvalds {
17981da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
17991da177e4SLinus Torvalds 
18009b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
18011da177e4SLinus Torvalds 		pol = &default_policy;
180252cd3b07SLee Schermerhorn 
180352cd3b07SLee Schermerhorn 	/*
180452cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
180552cd3b07SLee Schermerhorn 	 * nor system default_policy
180652cd3b07SLee Schermerhorn 	 */
180745c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
18081da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
180919770b32SMel Gorman 	return __alloc_pages_nodemask(gfp, order,
181052cd3b07SLee Schermerhorn 			policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
18111da177e4SLinus Torvalds }
18121da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
18131da177e4SLinus Torvalds 
18144225399aSPaul Jackson /*
1815846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
18164225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
18174225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
18184225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
18194225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
1820*708c1bbcSMiao Xie  *
1821*708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
1822*708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
18234225399aSPaul Jackson  */
18244225399aSPaul Jackson 
1825846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
1826846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
18271da177e4SLinus Torvalds {
18281da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
18291da177e4SLinus Torvalds 
18301da177e4SLinus Torvalds 	if (!new)
18311da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
1832*708c1bbcSMiao Xie 
1833*708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
1834*708c1bbcSMiao Xie 	if (old == current->mempolicy) {
1835*708c1bbcSMiao Xie 		task_lock(current);
1836*708c1bbcSMiao Xie 		*new = *old;
1837*708c1bbcSMiao Xie 		task_unlock(current);
1838*708c1bbcSMiao Xie 	} else
1839*708c1bbcSMiao Xie 		*new = *old;
1840*708c1bbcSMiao Xie 
184199ee4ca7SPaul E. McKenney 	rcu_read_lock();
18424225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
18434225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
1844*708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
1845*708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1846*708c1bbcSMiao Xie 		else
1847*708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
18484225399aSPaul Jackson 	}
184999ee4ca7SPaul E. McKenney 	rcu_read_unlock();
18501da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
18511da177e4SLinus Torvalds 	return new;
18521da177e4SLinus Torvalds }
18531da177e4SLinus Torvalds 
185452cd3b07SLee Schermerhorn /*
185552cd3b07SLee Schermerhorn  * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
185652cd3b07SLee Schermerhorn  * eliminate the * MPOL_F_* flags that require conditional ref and
185752cd3b07SLee Schermerhorn  * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
185852cd3b07SLee Schermerhorn  * after return.  Use the returned value.
185952cd3b07SLee Schermerhorn  *
186052cd3b07SLee Schermerhorn  * Allows use of a mempolicy for, e.g., multiple allocations with a single
186152cd3b07SLee Schermerhorn  * policy lookup, even if the policy needs/has extra ref on lookup.
186252cd3b07SLee Schermerhorn  * shmem_readahead needs this.
186352cd3b07SLee Schermerhorn  */
186452cd3b07SLee Schermerhorn struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
186552cd3b07SLee Schermerhorn 						struct mempolicy *frompol)
186652cd3b07SLee Schermerhorn {
186752cd3b07SLee Schermerhorn 	if (!mpol_needs_cond_ref(frompol))
186852cd3b07SLee Schermerhorn 		return frompol;
186952cd3b07SLee Schermerhorn 
187052cd3b07SLee Schermerhorn 	*tompol = *frompol;
187152cd3b07SLee Schermerhorn 	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
187252cd3b07SLee Schermerhorn 	__mpol_put(frompol);
187352cd3b07SLee Schermerhorn 	return tompol;
187452cd3b07SLee Schermerhorn }
187552cd3b07SLee Schermerhorn 
18761da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
18771da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
18781da177e4SLinus Torvalds {
18791da177e4SLinus Torvalds 	if (!a || !b)
18801da177e4SLinus Torvalds 		return 0;
188145c4745aSLee Schermerhorn 	if (a->mode != b->mode)
18821da177e4SLinus Torvalds 		return 0;
188319800502SBob Liu 	if (a->flags != b->flags)
1884f5b087b5SDavid Rientjes 		return 0;
188519800502SBob Liu 	if (mpol_store_user_nodemask(a))
188619800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
188719800502SBob Liu 			return 0;
188819800502SBob Liu 
188945c4745aSLee Schermerhorn 	switch (a->mode) {
189019770b32SMel Gorman 	case MPOL_BIND:
189119770b32SMel Gorman 		/* Fall through */
18921da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1893dfcd3c0dSAndi Kleen 		return nodes_equal(a->v.nodes, b->v.nodes);
18941da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1895fc36b8d3SLee Schermerhorn 		return a->v.preferred_node == b->v.preferred_node &&
1896fc36b8d3SLee Schermerhorn 			a->flags == b->flags;
18971da177e4SLinus Torvalds 	default:
18981da177e4SLinus Torvalds 		BUG();
18991da177e4SLinus Torvalds 		return 0;
19001da177e4SLinus Torvalds 	}
19011da177e4SLinus Torvalds }
19021da177e4SLinus Torvalds 
19031da177e4SLinus Torvalds /*
19041da177e4SLinus Torvalds  * Shared memory backing store policy support.
19051da177e4SLinus Torvalds  *
19061da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
19071da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
19081da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
19091da177e4SLinus Torvalds  * for any accesses to the tree.
19101da177e4SLinus Torvalds  */
19111da177e4SLinus Torvalds 
19121da177e4SLinus Torvalds /* lookup first element intersecting start-end */
19131da177e4SLinus Torvalds /* Caller holds sp->lock */
19141da177e4SLinus Torvalds static struct sp_node *
19151da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
19161da177e4SLinus Torvalds {
19171da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
19181da177e4SLinus Torvalds 
19191da177e4SLinus Torvalds 	while (n) {
19201da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
19211da177e4SLinus Torvalds 
19221da177e4SLinus Torvalds 		if (start >= p->end)
19231da177e4SLinus Torvalds 			n = n->rb_right;
19241da177e4SLinus Torvalds 		else if (end <= p->start)
19251da177e4SLinus Torvalds 			n = n->rb_left;
19261da177e4SLinus Torvalds 		else
19271da177e4SLinus Torvalds 			break;
19281da177e4SLinus Torvalds 	}
19291da177e4SLinus Torvalds 	if (!n)
19301da177e4SLinus Torvalds 		return NULL;
19311da177e4SLinus Torvalds 	for (;;) {
19321da177e4SLinus Torvalds 		struct sp_node *w = NULL;
19331da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
19341da177e4SLinus Torvalds 		if (!prev)
19351da177e4SLinus Torvalds 			break;
19361da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
19371da177e4SLinus Torvalds 		if (w->end <= start)
19381da177e4SLinus Torvalds 			break;
19391da177e4SLinus Torvalds 		n = prev;
19401da177e4SLinus Torvalds 	}
19411da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
19421da177e4SLinus Torvalds }
19431da177e4SLinus Torvalds 
19441da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
19451da177e4SLinus Torvalds /* Caller holds sp->lock */
19461da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
19471da177e4SLinus Torvalds {
19481da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
19491da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
19501da177e4SLinus Torvalds 	struct sp_node *nd;
19511da177e4SLinus Torvalds 
19521da177e4SLinus Torvalds 	while (*p) {
19531da177e4SLinus Torvalds 		parent = *p;
19541da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
19551da177e4SLinus Torvalds 		if (new->start < nd->start)
19561da177e4SLinus Torvalds 			p = &(*p)->rb_left;
19571da177e4SLinus Torvalds 		else if (new->end > nd->end)
19581da177e4SLinus Torvalds 			p = &(*p)->rb_right;
19591da177e4SLinus Torvalds 		else
19601da177e4SLinus Torvalds 			BUG();
19611da177e4SLinus Torvalds 	}
19621da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
19631da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
1964140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
196545c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
19661da177e4SLinus Torvalds }
19671da177e4SLinus Torvalds 
19681da177e4SLinus Torvalds /* Find shared policy intersecting idx */
19691da177e4SLinus Torvalds struct mempolicy *
19701da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
19711da177e4SLinus Torvalds {
19721da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
19731da177e4SLinus Torvalds 	struct sp_node *sn;
19741da177e4SLinus Torvalds 
19751da177e4SLinus Torvalds 	if (!sp->root.rb_node)
19761da177e4SLinus Torvalds 		return NULL;
19771da177e4SLinus Torvalds 	spin_lock(&sp->lock);
19781da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
19791da177e4SLinus Torvalds 	if (sn) {
19801da177e4SLinus Torvalds 		mpol_get(sn->policy);
19811da177e4SLinus Torvalds 		pol = sn->policy;
19821da177e4SLinus Torvalds 	}
19831da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
19841da177e4SLinus Torvalds 	return pol;
19851da177e4SLinus Torvalds }
19861da177e4SLinus Torvalds 
19871da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
19881da177e4SLinus Torvalds {
1989140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
19901da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
1991f0be3d32SLee Schermerhorn 	mpol_put(n->policy);
19921da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
19931da177e4SLinus Torvalds }
19941da177e4SLinus Torvalds 
1995dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1996dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
19971da177e4SLinus Torvalds {
19981da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
19991da177e4SLinus Torvalds 
20001da177e4SLinus Torvalds 	if (!n)
20011da177e4SLinus Torvalds 		return NULL;
20021da177e4SLinus Torvalds 	n->start = start;
20031da177e4SLinus Torvalds 	n->end = end;
20041da177e4SLinus Torvalds 	mpol_get(pol);
2005aab0b102SLee Schermerhorn 	pol->flags |= MPOL_F_SHARED;	/* for unref */
20061da177e4SLinus Torvalds 	n->policy = pol;
20071da177e4SLinus Torvalds 	return n;
20081da177e4SLinus Torvalds }
20091da177e4SLinus Torvalds 
20101da177e4SLinus Torvalds /* Replace a policy range. */
20111da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
20121da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
20131da177e4SLinus Torvalds {
20141da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
20151da177e4SLinus Torvalds 
20161da177e4SLinus Torvalds restart:
20171da177e4SLinus Torvalds 	spin_lock(&sp->lock);
20181da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
20191da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
20201da177e4SLinus Torvalds 	while (n && n->start < end) {
20211da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
20221da177e4SLinus Torvalds 		if (n->start >= start) {
20231da177e4SLinus Torvalds 			if (n->end <= end)
20241da177e4SLinus Torvalds 				sp_delete(sp, n);
20251da177e4SLinus Torvalds 			else
20261da177e4SLinus Torvalds 				n->start = end;
20271da177e4SLinus Torvalds 		} else {
20281da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
20291da177e4SLinus Torvalds 			if (n->end > end) {
20301da177e4SLinus Torvalds 				if (!new2) {
20311da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
20321da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
20331da177e4SLinus Torvalds 					if (!new2)
20341da177e4SLinus Torvalds 						return -ENOMEM;
20351da177e4SLinus Torvalds 					goto restart;
20361da177e4SLinus Torvalds 				}
20371da177e4SLinus Torvalds 				n->end = start;
20381da177e4SLinus Torvalds 				sp_insert(sp, new2);
20391da177e4SLinus Torvalds 				new2 = NULL;
20401da177e4SLinus Torvalds 				break;
20411da177e4SLinus Torvalds 			} else
20421da177e4SLinus Torvalds 				n->end = start;
20431da177e4SLinus Torvalds 		}
20441da177e4SLinus Torvalds 		if (!next)
20451da177e4SLinus Torvalds 			break;
20461da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
20471da177e4SLinus Torvalds 	}
20481da177e4SLinus Torvalds 	if (new)
20491da177e4SLinus Torvalds 		sp_insert(sp, new);
20501da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
20511da177e4SLinus Torvalds 	if (new2) {
2052f0be3d32SLee Schermerhorn 		mpol_put(new2->policy);
20531da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
20541da177e4SLinus Torvalds 	}
20551da177e4SLinus Torvalds 	return 0;
20561da177e4SLinus Torvalds }
20571da177e4SLinus Torvalds 
205871fe804bSLee Schermerhorn /**
205971fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
206071fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
206171fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
206271fe804bSLee Schermerhorn  *
206371fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
206471fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
206571fe804bSLee Schermerhorn  * This must be released on exit.
20664bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
206771fe804bSLee Schermerhorn  */
206871fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
20697339ff83SRobin Holt {
207058568d2aSMiao Xie 	int ret;
207158568d2aSMiao Xie 
207271fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
207371fe804bSLee Schermerhorn 	spin_lock_init(&sp->lock);
20747339ff83SRobin Holt 
207571fe804bSLee Schermerhorn 	if (mpol) {
20767339ff83SRobin Holt 		struct vm_area_struct pvma;
207771fe804bSLee Schermerhorn 		struct mempolicy *new;
20784bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
20797339ff83SRobin Holt 
20804bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
20814bfc4495SKAMEZAWA Hiroyuki 			return;
208271fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
208371fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
208415d77835SLee Schermerhorn 		if (IS_ERR(new))
208515d77835SLee Schermerhorn 			goto put_free; /* no valid nodemask intersection */
208658568d2aSMiao Xie 
208758568d2aSMiao Xie 		task_lock(current);
20884bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
208958568d2aSMiao Xie 		task_unlock(current);
209058568d2aSMiao Xie 		mpol_put(mpol);	/* drop our ref on sb mpol */
209115d77835SLee Schermerhorn 		if (ret)
209215d77835SLee Schermerhorn 			goto put_free;
209371fe804bSLee Schermerhorn 
209471fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
20957339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
209671fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
209771fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
209815d77835SLee Schermerhorn 
209915d77835SLee Schermerhorn put_free:
210071fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
21014bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
21027339ff83SRobin Holt 	}
21037339ff83SRobin Holt }
21047339ff83SRobin Holt 
21051da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
21061da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
21071da177e4SLinus Torvalds {
21081da177e4SLinus Torvalds 	int err;
21091da177e4SLinus Torvalds 	struct sp_node *new = NULL;
21101da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
21111da177e4SLinus Torvalds 
2112028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
21131da177e4SLinus Torvalds 		 vma->vm_pgoff,
211445c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2115028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2116dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
21171da177e4SLinus Torvalds 
21181da177e4SLinus Torvalds 	if (npol) {
21191da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
21201da177e4SLinus Torvalds 		if (!new)
21211da177e4SLinus Torvalds 			return -ENOMEM;
21221da177e4SLinus Torvalds 	}
21231da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
21241da177e4SLinus Torvalds 	if (err && new)
21251da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
21261da177e4SLinus Torvalds 	return err;
21271da177e4SLinus Torvalds }
21281da177e4SLinus Torvalds 
21291da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
21301da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
21311da177e4SLinus Torvalds {
21321da177e4SLinus Torvalds 	struct sp_node *n;
21331da177e4SLinus Torvalds 	struct rb_node *next;
21341da177e4SLinus Torvalds 
21351da177e4SLinus Torvalds 	if (!p->root.rb_node)
21361da177e4SLinus Torvalds 		return;
21371da177e4SLinus Torvalds 	spin_lock(&p->lock);
21381da177e4SLinus Torvalds 	next = rb_first(&p->root);
21391da177e4SLinus Torvalds 	while (next) {
21401da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
21411da177e4SLinus Torvalds 		next = rb_next(&n->nd);
214290c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
2143f0be3d32SLee Schermerhorn 		mpol_put(n->policy);
21441da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
21451da177e4SLinus Torvalds 	}
21461da177e4SLinus Torvalds 	spin_unlock(&p->lock);
21471da177e4SLinus Torvalds }
21481da177e4SLinus Torvalds 
21491da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
21501da177e4SLinus Torvalds void __init numa_policy_init(void)
21511da177e4SLinus Torvalds {
2152b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2153b71636e2SPaul Mundt 	unsigned long largest = 0;
2154b71636e2SPaul Mundt 	int nid, prefer = 0;
2155b71636e2SPaul Mundt 
21561da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
21571da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
215820c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
21591da177e4SLinus Torvalds 
21601da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
21611da177e4SLinus Torvalds 				     sizeof(struct sp_node),
216220c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
21631da177e4SLinus Torvalds 
2164b71636e2SPaul Mundt 	/*
2165b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2166b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2167b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2168b71636e2SPaul Mundt 	 */
2169b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
217056bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
2171b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
21721da177e4SLinus Torvalds 
2173b71636e2SPaul Mundt 		/* Preserve the largest node */
2174b71636e2SPaul Mundt 		if (largest < total_pages) {
2175b71636e2SPaul Mundt 			largest = total_pages;
2176b71636e2SPaul Mundt 			prefer = nid;
2177b71636e2SPaul Mundt 		}
2178b71636e2SPaul Mundt 
2179b71636e2SPaul Mundt 		/* Interleave this node? */
2180b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2181b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2182b71636e2SPaul Mundt 	}
2183b71636e2SPaul Mundt 
2184b71636e2SPaul Mundt 	/* All too small, use the largest */
2185b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2186b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2187b71636e2SPaul Mundt 
2188028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
21891da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
21901da177e4SLinus Torvalds }
21911da177e4SLinus Torvalds 
21928bccd85fSChristoph Lameter /* Reset policy of current process to default */
21931da177e4SLinus Torvalds void numa_default_policy(void)
21941da177e4SLinus Torvalds {
2195028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
21961da177e4SLinus Torvalds }
219768860ec1SPaul Jackson 
21984225399aSPaul Jackson /*
2199095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2200095f1fc4SLee Schermerhorn  */
2201095f1fc4SLee Schermerhorn 
2202095f1fc4SLee Schermerhorn /*
2203fc36b8d3SLee Schermerhorn  * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
22043f226aa1SLee Schermerhorn  * Used only for mpol_parse_str() and mpol_to_str()
22051a75a6c8SChristoph Lameter  */
2206345ace9cSLee Schermerhorn #define MPOL_LOCAL MPOL_MAX
2207345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2208345ace9cSLee Schermerhorn {
2209345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2210345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2211345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2212345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2213345ace9cSLee Schermerhorn 	[MPOL_LOCAL]      = "local"
2214345ace9cSLee Schermerhorn };
22151a75a6c8SChristoph Lameter 
2216095f1fc4SLee Schermerhorn 
2217095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2218095f1fc4SLee Schermerhorn /**
2219095f1fc4SLee Schermerhorn  * mpol_parse_str - parse string to mempolicy
2220095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
222171fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
222271fe804bSLee Schermerhorn  * @no_context:  flag whether to "contextualize" the mempolicy
2223095f1fc4SLee Schermerhorn  *
2224095f1fc4SLee Schermerhorn  * Format of input:
2225095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2226095f1fc4SLee Schermerhorn  *
222771fe804bSLee Schermerhorn  * if @no_context is true, save the input nodemask in w.user_nodemask in
222871fe804bSLee Schermerhorn  * the returned mempolicy.  This will be used to "clone" the mempolicy in
222971fe804bSLee Schermerhorn  * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
223071fe804bSLee Schermerhorn  * mount option.  Note that if 'static' or 'relative' mode flags were
223171fe804bSLee Schermerhorn  * specified, the input nodemask will already have been saved.  Saving
223271fe804bSLee Schermerhorn  * it again is redundant, but safe.
223371fe804bSLee Schermerhorn  *
223471fe804bSLee Schermerhorn  * On success, returns 0, else 1
2235095f1fc4SLee Schermerhorn  */
223671fe804bSLee Schermerhorn int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2237095f1fc4SLee Schermerhorn {
223871fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2239b4652e84SLee Schermerhorn 	unsigned short mode;
224071fe804bSLee Schermerhorn 	unsigned short uninitialized_var(mode_flags);
224171fe804bSLee Schermerhorn 	nodemask_t nodes;
2242095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2243095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2244095f1fc4SLee Schermerhorn 	int err = 1;
2245095f1fc4SLee Schermerhorn 
2246095f1fc4SLee Schermerhorn 	if (nodelist) {
2247095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2248095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
224971fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2250095f1fc4SLee Schermerhorn 			goto out;
225171fe804bSLee Schermerhorn 		if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2252095f1fc4SLee Schermerhorn 			goto out;
225371fe804bSLee Schermerhorn 	} else
225471fe804bSLee Schermerhorn 		nodes_clear(nodes);
225571fe804bSLee Schermerhorn 
2256095f1fc4SLee Schermerhorn 	if (flags)
2257095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2258095f1fc4SLee Schermerhorn 
2259b4652e84SLee Schermerhorn 	for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2260345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2261095f1fc4SLee Schermerhorn 			break;
2262095f1fc4SLee Schermerhorn 		}
2263095f1fc4SLee Schermerhorn 	}
2264b4652e84SLee Schermerhorn 	if (mode > MPOL_LOCAL)
2265095f1fc4SLee Schermerhorn 		goto out;
2266095f1fc4SLee Schermerhorn 
226771fe804bSLee Schermerhorn 	switch (mode) {
2268095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
226971fe804bSLee Schermerhorn 		/*
227071fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
227171fe804bSLee Schermerhorn 		 */
2272095f1fc4SLee Schermerhorn 		if (nodelist) {
2273095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2274095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2275095f1fc4SLee Schermerhorn 				rest++;
2276926f2ae0SKOSAKI Motohiro 			if (*rest)
2277926f2ae0SKOSAKI Motohiro 				goto out;
2278095f1fc4SLee Schermerhorn 		}
2279095f1fc4SLee Schermerhorn 		break;
2280095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2281095f1fc4SLee Schermerhorn 		/*
2282095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2283095f1fc4SLee Schermerhorn 		 */
2284095f1fc4SLee Schermerhorn 		if (!nodelist)
228571fe804bSLee Schermerhorn 			nodes = node_states[N_HIGH_MEMORY];
22863f226aa1SLee Schermerhorn 		break;
228771fe804bSLee Schermerhorn 	case MPOL_LOCAL:
22883f226aa1SLee Schermerhorn 		/*
228971fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
22903f226aa1SLee Schermerhorn 		 */
229171fe804bSLee Schermerhorn 		if (nodelist)
22923f226aa1SLee Schermerhorn 			goto out;
229371fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
22943f226aa1SLee Schermerhorn 		break;
2295413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2296413b43deSRavikiran G Thirumalai 		/*
2297413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2298413b43deSRavikiran G Thirumalai 		 */
2299413b43deSRavikiran G Thirumalai 		if (!nodelist)
2300413b43deSRavikiran G Thirumalai 			err = 0;
2301413b43deSRavikiran G Thirumalai 		goto out;
2302d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
230371fe804bSLee Schermerhorn 		/*
2304d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
230571fe804bSLee Schermerhorn 		 */
2306d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2307d69b2e63SKOSAKI Motohiro 			goto out;
2308095f1fc4SLee Schermerhorn 	}
2309095f1fc4SLee Schermerhorn 
231071fe804bSLee Schermerhorn 	mode_flags = 0;
2311095f1fc4SLee Schermerhorn 	if (flags) {
2312095f1fc4SLee Schermerhorn 		/*
2313095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2314095f1fc4SLee Schermerhorn 		 * mode flags.
2315095f1fc4SLee Schermerhorn 		 */
2316095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
231771fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2318095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
231971fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2320095f1fc4SLee Schermerhorn 		else
2321926f2ae0SKOSAKI Motohiro 			goto out;
2322095f1fc4SLee Schermerhorn 	}
232371fe804bSLee Schermerhorn 
232471fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
232571fe804bSLee Schermerhorn 	if (IS_ERR(new))
2326926f2ae0SKOSAKI Motohiro 		goto out;
2327926f2ae0SKOSAKI Motohiro 
2328e17f74afSLee Schermerhorn 	if (no_context) {
2329e17f74afSLee Schermerhorn 		/* save for contextualization */
2330e17f74afSLee Schermerhorn 		new->w.user_nodemask = nodes;
2331e17f74afSLee Schermerhorn 	} else {
233258568d2aSMiao Xie 		int ret;
23334bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
23344bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
233558568d2aSMiao Xie 			task_lock(current);
23364bfc4495SKAMEZAWA Hiroyuki 			ret = mpol_set_nodemask(new, &nodes, scratch);
233758568d2aSMiao Xie 			task_unlock(current);
23384bfc4495SKAMEZAWA Hiroyuki 		} else
23394bfc4495SKAMEZAWA Hiroyuki 			ret = -ENOMEM;
23404bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
23414bfc4495SKAMEZAWA Hiroyuki 		if (ret) {
23424bfc4495SKAMEZAWA Hiroyuki 			mpol_put(new);
2343926f2ae0SKOSAKI Motohiro 			goto out;
2344926f2ae0SKOSAKI Motohiro 		}
2345926f2ae0SKOSAKI Motohiro 	}
2346926f2ae0SKOSAKI Motohiro 	err = 0;
234771fe804bSLee Schermerhorn 
2348095f1fc4SLee Schermerhorn out:
2349095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2350095f1fc4SLee Schermerhorn 	if (nodelist)
2351095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2352095f1fc4SLee Schermerhorn 	if (flags)
2353095f1fc4SLee Schermerhorn 		*--flags = '=';
235471fe804bSLee Schermerhorn 	if (!err)
235571fe804bSLee Schermerhorn 		*mpol = new;
2356095f1fc4SLee Schermerhorn 	return err;
2357095f1fc4SLee Schermerhorn }
2358095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2359095f1fc4SLee Schermerhorn 
236071fe804bSLee Schermerhorn /**
236171fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
236271fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
236371fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
236471fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
236571fe804bSLee Schermerhorn  * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
236671fe804bSLee Schermerhorn  *
23671a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
23681a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
23691a75a6c8SChristoph Lameter  * or an error (negative)
23701a75a6c8SChristoph Lameter  */
237171fe804bSLee Schermerhorn int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
23721a75a6c8SChristoph Lameter {
23731a75a6c8SChristoph Lameter 	char *p = buffer;
23741a75a6c8SChristoph Lameter 	int l;
23751a75a6c8SChristoph Lameter 	nodemask_t nodes;
2376bea904d5SLee Schermerhorn 	unsigned short mode;
2377f5b087b5SDavid Rientjes 	unsigned short flags = pol ? pol->flags : 0;
23781a75a6c8SChristoph Lameter 
23792291990aSLee Schermerhorn 	/*
23802291990aSLee Schermerhorn 	 * Sanity check:  room for longest mode, flag and some nodes
23812291990aSLee Schermerhorn 	 */
23822291990aSLee Schermerhorn 	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
23832291990aSLee Schermerhorn 
2384bea904d5SLee Schermerhorn 	if (!pol || pol == &default_policy)
2385bea904d5SLee Schermerhorn 		mode = MPOL_DEFAULT;
2386bea904d5SLee Schermerhorn 	else
2387bea904d5SLee Schermerhorn 		mode = pol->mode;
2388bea904d5SLee Schermerhorn 
23891a75a6c8SChristoph Lameter 	switch (mode) {
23901a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
23911a75a6c8SChristoph Lameter 		nodes_clear(nodes);
23921a75a6c8SChristoph Lameter 		break;
23931a75a6c8SChristoph Lameter 
23941a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
23951a75a6c8SChristoph Lameter 		nodes_clear(nodes);
2396fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
239753f2556bSLee Schermerhorn 			mode = MPOL_LOCAL;	/* pseudo-policy */
239853f2556bSLee Schermerhorn 		else
2399fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
24001a75a6c8SChristoph Lameter 		break;
24011a75a6c8SChristoph Lameter 
24021a75a6c8SChristoph Lameter 	case MPOL_BIND:
240319770b32SMel Gorman 		/* Fall through */
24041a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
240571fe804bSLee Schermerhorn 		if (no_context)
240671fe804bSLee Schermerhorn 			nodes = pol->w.user_nodemask;
240771fe804bSLee Schermerhorn 		else
24081a75a6c8SChristoph Lameter 			nodes = pol->v.nodes;
24091a75a6c8SChristoph Lameter 		break;
24101a75a6c8SChristoph Lameter 
24111a75a6c8SChristoph Lameter 	default:
24121a75a6c8SChristoph Lameter 		BUG();
24131a75a6c8SChristoph Lameter 	}
24141a75a6c8SChristoph Lameter 
2415345ace9cSLee Schermerhorn 	l = strlen(policy_modes[mode]);
24161a75a6c8SChristoph Lameter 	if (buffer + maxlen < p + l + 1)
24171a75a6c8SChristoph Lameter 		return -ENOSPC;
24181a75a6c8SChristoph Lameter 
2419345ace9cSLee Schermerhorn 	strcpy(p, policy_modes[mode]);
24201a75a6c8SChristoph Lameter 	p += l;
24211a75a6c8SChristoph Lameter 
2422fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2423f5b087b5SDavid Rientjes 		if (buffer + maxlen < p + 2)
2424f5b087b5SDavid Rientjes 			return -ENOSPC;
2425f5b087b5SDavid Rientjes 		*p++ = '=';
2426f5b087b5SDavid Rientjes 
24272291990aSLee Schermerhorn 		/*
24282291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
24292291990aSLee Schermerhorn 		 */
2430f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
24312291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
24322291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
24332291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2434f5b087b5SDavid Rientjes 	}
2435f5b087b5SDavid Rientjes 
24361a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
24371a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
24381a75a6c8SChristoph Lameter 			return -ENOSPC;
2439095f1fc4SLee Schermerhorn 		*p++ = ':';
24401a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
24411a75a6c8SChristoph Lameter 	}
24421a75a6c8SChristoph Lameter 	return p - buffer;
24431a75a6c8SChristoph Lameter }
24441a75a6c8SChristoph Lameter 
24451a75a6c8SChristoph Lameter struct numa_maps {
24461a75a6c8SChristoph Lameter 	unsigned long pages;
24471a75a6c8SChristoph Lameter 	unsigned long anon;
2448397874dfSChristoph Lameter 	unsigned long active;
2449397874dfSChristoph Lameter 	unsigned long writeback;
24501a75a6c8SChristoph Lameter 	unsigned long mapcount_max;
2451397874dfSChristoph Lameter 	unsigned long dirty;
2452397874dfSChristoph Lameter 	unsigned long swapcache;
24531a75a6c8SChristoph Lameter 	unsigned long node[MAX_NUMNODES];
24541a75a6c8SChristoph Lameter };
24551a75a6c8SChristoph Lameter 
2456397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty)
24571a75a6c8SChristoph Lameter {
24581a75a6c8SChristoph Lameter 	struct numa_maps *md = private;
24591a75a6c8SChristoph Lameter 	int count = page_mapcount(page);
24601a75a6c8SChristoph Lameter 
24611a75a6c8SChristoph Lameter 	md->pages++;
2462397874dfSChristoph Lameter 	if (pte_dirty || PageDirty(page))
2463397874dfSChristoph Lameter 		md->dirty++;
2464397874dfSChristoph Lameter 
2465397874dfSChristoph Lameter 	if (PageSwapCache(page))
2466397874dfSChristoph Lameter 		md->swapcache++;
2467397874dfSChristoph Lameter 
2468894bc310SLee Schermerhorn 	if (PageActive(page) || PageUnevictable(page))
2469397874dfSChristoph Lameter 		md->active++;
2470397874dfSChristoph Lameter 
2471397874dfSChristoph Lameter 	if (PageWriteback(page))
2472397874dfSChristoph Lameter 		md->writeback++;
24731a75a6c8SChristoph Lameter 
24741a75a6c8SChristoph Lameter 	if (PageAnon(page))
24751a75a6c8SChristoph Lameter 		md->anon++;
24761a75a6c8SChristoph Lameter 
2477397874dfSChristoph Lameter 	if (count > md->mapcount_max)
2478397874dfSChristoph Lameter 		md->mapcount_max = count;
2479397874dfSChristoph Lameter 
24801a75a6c8SChristoph Lameter 	md->node[page_to_nid(page)]++;
24811a75a6c8SChristoph Lameter }
24821a75a6c8SChristoph Lameter 
24837f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE
2484397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma,
2485397874dfSChristoph Lameter 		unsigned long start, unsigned long end,
2486397874dfSChristoph Lameter 		struct numa_maps *md)
2487397874dfSChristoph Lameter {
2488397874dfSChristoph Lameter 	unsigned long addr;
2489397874dfSChristoph Lameter 	struct page *page;
2490a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
2491a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
2492397874dfSChristoph Lameter 
2493a5516438SAndi Kleen 	for (addr = start; addr < end; addr += sz) {
2494a5516438SAndi Kleen 		pte_t *ptep = huge_pte_offset(vma->vm_mm,
2495a5516438SAndi Kleen 						addr & huge_page_mask(h));
2496397874dfSChristoph Lameter 		pte_t pte;
2497397874dfSChristoph Lameter 
2498397874dfSChristoph Lameter 		if (!ptep)
2499397874dfSChristoph Lameter 			continue;
2500397874dfSChristoph Lameter 
2501397874dfSChristoph Lameter 		pte = *ptep;
2502397874dfSChristoph Lameter 		if (pte_none(pte))
2503397874dfSChristoph Lameter 			continue;
2504397874dfSChristoph Lameter 
2505397874dfSChristoph Lameter 		page = pte_page(pte);
2506397874dfSChristoph Lameter 		if (!page)
2507397874dfSChristoph Lameter 			continue;
2508397874dfSChristoph Lameter 
2509397874dfSChristoph Lameter 		gather_stats(page, md, pte_dirty(*ptep));
2510397874dfSChristoph Lameter 	}
2511397874dfSChristoph Lameter }
25127f709ed0SAndrew Morton #else
25137f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma,
25147f709ed0SAndrew Morton 		unsigned long start, unsigned long end,
25157f709ed0SAndrew Morton 		struct numa_maps *md)
25167f709ed0SAndrew Morton {
25177f709ed0SAndrew Morton }
25187f709ed0SAndrew Morton #endif
2519397874dfSChristoph Lameter 
252053f2556bSLee Schermerhorn /*
252153f2556bSLee Schermerhorn  * Display pages allocated per node and memory policy via /proc.
252253f2556bSLee Schermerhorn  */
25231a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v)
25241a75a6c8SChristoph Lameter {
252599f89551SEric W. Biederman 	struct proc_maps_private *priv = m->private;
25261a75a6c8SChristoph Lameter 	struct vm_area_struct *vma = v;
25271a75a6c8SChristoph Lameter 	struct numa_maps *md;
2528397874dfSChristoph Lameter 	struct file *file = vma->vm_file;
2529397874dfSChristoph Lameter 	struct mm_struct *mm = vma->vm_mm;
2530480eccf9SLee Schermerhorn 	struct mempolicy *pol;
25311a75a6c8SChristoph Lameter 	int n;
25321a75a6c8SChristoph Lameter 	char buffer[50];
25331a75a6c8SChristoph Lameter 
2534397874dfSChristoph Lameter 	if (!mm)
25351a75a6c8SChristoph Lameter 		return 0;
25361a75a6c8SChristoph Lameter 
25371a75a6c8SChristoph Lameter 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
25381a75a6c8SChristoph Lameter 	if (!md)
25391a75a6c8SChristoph Lameter 		return 0;
25401a75a6c8SChristoph Lameter 
2541480eccf9SLee Schermerhorn 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
254271fe804bSLee Schermerhorn 	mpol_to_str(buffer, sizeof(buffer), pol, 0);
254352cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
25441a75a6c8SChristoph Lameter 
2545397874dfSChristoph Lameter 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2546397874dfSChristoph Lameter 
2547397874dfSChristoph Lameter 	if (file) {
2548397874dfSChristoph Lameter 		seq_printf(m, " file=");
2549c32c2f63SJan Blunck 		seq_path(m, &file->f_path, "\n\t= ");
2550397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2551397874dfSChristoph Lameter 		seq_printf(m, " heap");
2552397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->start_stack &&
2553397874dfSChristoph Lameter 			vma->vm_end >= mm->start_stack) {
2554397874dfSChristoph Lameter 		seq_printf(m, " stack");
2555397874dfSChristoph Lameter 	}
2556397874dfSChristoph Lameter 
2557397874dfSChristoph Lameter 	if (is_vm_hugetlb_page(vma)) {
2558397874dfSChristoph Lameter 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2559397874dfSChristoph Lameter 		seq_printf(m, " huge");
2560397874dfSChristoph Lameter 	} else {
2561397874dfSChristoph Lameter 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
256256bbd65dSChristoph Lameter 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2563397874dfSChristoph Lameter 	}
2564397874dfSChristoph Lameter 
2565397874dfSChristoph Lameter 	if (!md->pages)
2566397874dfSChristoph Lameter 		goto out;
25671a75a6c8SChristoph Lameter 
25681a75a6c8SChristoph Lameter 	if (md->anon)
25691a75a6c8SChristoph Lameter 		seq_printf(m," anon=%lu",md->anon);
25701a75a6c8SChristoph Lameter 
2571397874dfSChristoph Lameter 	if (md->dirty)
2572397874dfSChristoph Lameter 		seq_printf(m," dirty=%lu",md->dirty);
2573397874dfSChristoph Lameter 
2574397874dfSChristoph Lameter 	if (md->pages != md->anon && md->pages != md->dirty)
2575397874dfSChristoph Lameter 		seq_printf(m, " mapped=%lu", md->pages);
2576397874dfSChristoph Lameter 
2577397874dfSChristoph Lameter 	if (md->mapcount_max > 1)
2578397874dfSChristoph Lameter 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2579397874dfSChristoph Lameter 
2580397874dfSChristoph Lameter 	if (md->swapcache)
2581397874dfSChristoph Lameter 		seq_printf(m," swapcache=%lu", md->swapcache);
2582397874dfSChristoph Lameter 
2583397874dfSChristoph Lameter 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2584397874dfSChristoph Lameter 		seq_printf(m," active=%lu", md->active);
2585397874dfSChristoph Lameter 
2586397874dfSChristoph Lameter 	if (md->writeback)
2587397874dfSChristoph Lameter 		seq_printf(m," writeback=%lu", md->writeback);
2588397874dfSChristoph Lameter 
258956bbd65dSChristoph Lameter 	for_each_node_state(n, N_HIGH_MEMORY)
25901a75a6c8SChristoph Lameter 		if (md->node[n])
25911a75a6c8SChristoph Lameter 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2592397874dfSChristoph Lameter out:
25931a75a6c8SChristoph Lameter 	seq_putc(m, '\n');
25941a75a6c8SChristoph Lameter 	kfree(md);
25951a75a6c8SChristoph Lameter 
25961a75a6c8SChristoph Lameter 	if (m->count < m->size)
259799f89551SEric W. Biederman 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
25981a75a6c8SChristoph Lameter 	return 0;
25991a75a6c8SChristoph Lameter }
2600