xref: /openbmc/linux/mm/mempolicy.c (revision 19770b32609b6bf97a3dece2529089494cbfc549)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds    could replace all the switch()es with a mempolicy_ops structure.
671da177e4SLinus Torvalds */
681da177e4SLinus Torvalds 
691da177e4SLinus Torvalds #include <linux/mempolicy.h>
701da177e4SLinus Torvalds #include <linux/mm.h>
711da177e4SLinus Torvalds #include <linux/highmem.h>
721da177e4SLinus Torvalds #include <linux/hugetlb.h>
731da177e4SLinus Torvalds #include <linux/kernel.h>
741da177e4SLinus Torvalds #include <linux/sched.h>
751da177e4SLinus Torvalds #include <linux/nodemask.h>
761da177e4SLinus Torvalds #include <linux/cpuset.h>
771da177e4SLinus Torvalds #include <linux/gfp.h>
781da177e4SLinus Torvalds #include <linux/slab.h>
791da177e4SLinus Torvalds #include <linux/string.h>
801da177e4SLinus Torvalds #include <linux/module.h>
81b488893aSPavel Emelyanov #include <linux/nsproxy.h>
821da177e4SLinus Torvalds #include <linux/interrupt.h>
831da177e4SLinus Torvalds #include <linux/init.h>
841da177e4SLinus Torvalds #include <linux/compat.h>
85dc9aa5b9SChristoph Lameter #include <linux/swap.h>
861a75a6c8SChristoph Lameter #include <linux/seq_file.h>
871a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
88b20a3503SChristoph Lameter #include <linux/migrate.h>
8995a402c3SChristoph Lameter #include <linux/rmap.h>
9086c3a764SDavid Quigley #include <linux/security.h>
91dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
92dc9aa5b9SChristoph Lameter 
931da177e4SLinus Torvalds #include <asm/tlbflush.h>
941da177e4SLinus Torvalds #include <asm/uaccess.h>
951da177e4SLinus Torvalds 
9638e35860SChristoph Lameter /* Internal flags */
97dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
9838e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
991a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
100dc9aa5b9SChristoph Lameter 
101fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
102fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1051da177e4SLinus Torvalds    policied. */
1066267276fSChristoph Lameter enum zone_type policy_zone = 0;
1071da177e4SLinus Torvalds 
108d42c6997SAndi Kleen struct mempolicy default_policy = {
1091da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1101da177e4SLinus Torvalds 	.policy = MPOL_DEFAULT,
1111da177e4SLinus Torvalds };
1121da177e4SLinus Torvalds 
113dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
114dbcb0f19SAdrian Bunk                                const nodemask_t *newmask);
115dbcb0f19SAdrian Bunk 
1161da177e4SLinus Torvalds /* Do sanity checking on a policy */
117dfcd3c0dSAndi Kleen static int mpol_check_policy(int mode, nodemask_t *nodes)
1181da177e4SLinus Torvalds {
11931f1de46SKOSAKI Motohiro 	int was_empty, is_empty;
12031f1de46SKOSAKI Motohiro 
12131f1de46SKOSAKI Motohiro 	if (!nodes)
12231f1de46SKOSAKI Motohiro 		return 0;
12331f1de46SKOSAKI Motohiro 
12431f1de46SKOSAKI Motohiro 	/*
12531f1de46SKOSAKI Motohiro 	 * "Contextualize" the in-coming nodemast for cpusets:
12631f1de46SKOSAKI Motohiro 	 * Remember whether in-coming nodemask was empty,  If not,
12731f1de46SKOSAKI Motohiro 	 * restrict the nodes to the allowed nodes in the cpuset.
12831f1de46SKOSAKI Motohiro 	 * This is guaranteed to be a subset of nodes with memory.
12931f1de46SKOSAKI Motohiro 	 */
13031f1de46SKOSAKI Motohiro 	cpuset_update_task_memory_state();
13131f1de46SKOSAKI Motohiro 	is_empty = was_empty = nodes_empty(*nodes);
13231f1de46SKOSAKI Motohiro 	if (!was_empty) {
13331f1de46SKOSAKI Motohiro 		nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
13431f1de46SKOSAKI Motohiro 		is_empty = nodes_empty(*nodes);	/* after "contextualization" */
13531f1de46SKOSAKI Motohiro 	}
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds 	switch (mode) {
1381da177e4SLinus Torvalds 	case MPOL_DEFAULT:
13931f1de46SKOSAKI Motohiro 		/*
14031f1de46SKOSAKI Motohiro 		 * require caller to specify an empty nodemask
14131f1de46SKOSAKI Motohiro 		 * before "contextualization"
14231f1de46SKOSAKI Motohiro 		 */
14331f1de46SKOSAKI Motohiro 		if (!was_empty)
1441da177e4SLinus Torvalds 			return -EINVAL;
1451da177e4SLinus Torvalds 		break;
1461da177e4SLinus Torvalds 	case MPOL_BIND:
1471da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
14831f1de46SKOSAKI Motohiro 		/*
14931f1de46SKOSAKI Motohiro 		 * require at least 1 valid node after "contextualization"
15031f1de46SKOSAKI Motohiro 		 */
15131f1de46SKOSAKI Motohiro 		if (is_empty)
15231f1de46SKOSAKI Motohiro 			return -EINVAL;
15331f1de46SKOSAKI Motohiro 		break;
15431f1de46SKOSAKI Motohiro 	case MPOL_PREFERRED:
15531f1de46SKOSAKI Motohiro 		/*
15631f1de46SKOSAKI Motohiro 		 * Did caller specify invalid nodes?
15731f1de46SKOSAKI Motohiro 		 * Don't silently accept this as "local allocation".
15831f1de46SKOSAKI Motohiro 		 */
15931f1de46SKOSAKI Motohiro 		if (!was_empty && is_empty)
1601da177e4SLinus Torvalds 			return -EINVAL;
1611da177e4SLinus Torvalds 		break;
1621da177e4SLinus Torvalds 	}
16331f1de46SKOSAKI Motohiro 	return 0;
1641da177e4SLinus Torvalds }
165dd942ae3SAndi Kleen 
166*19770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
167*19770b32SMel Gorman static int is_valid_nodemask(nodemask_t *nodemask)
1681da177e4SLinus Torvalds {
169*19770b32SMel Gorman 	int nd, k;
1701da177e4SLinus Torvalds 
171*19770b32SMel Gorman 	/* Check that there is something useful in this mask */
172*19770b32SMel Gorman 	k = policy_zone;
173*19770b32SMel Gorman 
174*19770b32SMel Gorman 	for_each_node_mask(nd, *nodemask) {
175*19770b32SMel Gorman 		struct zone *z;
176*19770b32SMel Gorman 
177*19770b32SMel Gorman 		for (k = 0; k <= policy_zone; k++) {
178*19770b32SMel Gorman 			z = &NODE_DATA(nd)->node_zones[k];
179dd942ae3SAndi Kleen 			if (z->present_pages > 0)
180*19770b32SMel Gorman 				return 1;
181dd942ae3SAndi Kleen 		}
182dd942ae3SAndi Kleen 	}
183*19770b32SMel Gorman 
184*19770b32SMel Gorman 	return 0;
1851da177e4SLinus Torvalds }
1861da177e4SLinus Torvalds 
1871da177e4SLinus Torvalds /* Create a new policy */
188dfcd3c0dSAndi Kleen static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
1891da177e4SLinus Torvalds {
1901da177e4SLinus Torvalds 	struct mempolicy *policy;
1911da177e4SLinus Torvalds 
192140d5a49SPaul Mundt 	pr_debug("setting mode %d nodes[0] %lx\n",
193140d5a49SPaul Mundt 		 mode, nodes ? nodes_addr(*nodes)[0] : -1);
194140d5a49SPaul Mundt 
1951da177e4SLinus Torvalds 	if (mode == MPOL_DEFAULT)
1961da177e4SLinus Torvalds 		return NULL;
1971da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1981da177e4SLinus Torvalds 	if (!policy)
1991da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2001da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
2011da177e4SLinus Torvalds 	switch (mode) {
2021da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
203dfcd3c0dSAndi Kleen 		policy->v.nodes = *nodes;
2046eaf806aSChristoph Lameter 		if (nodes_weight(policy->v.nodes) == 0) {
2058f493d79SAndi Kleen 			kmem_cache_free(policy_cache, policy);
2068f493d79SAndi Kleen 			return ERR_PTR(-EINVAL);
2078f493d79SAndi Kleen 		}
2081da177e4SLinus Torvalds 		break;
2091da177e4SLinus Torvalds 	case MPOL_PREFERRED:
210dfcd3c0dSAndi Kleen 		policy->v.preferred_node = first_node(*nodes);
2111da177e4SLinus Torvalds 		if (policy->v.preferred_node >= MAX_NUMNODES)
2121da177e4SLinus Torvalds 			policy->v.preferred_node = -1;
2131da177e4SLinus Torvalds 		break;
2141da177e4SLinus Torvalds 	case MPOL_BIND:
215*19770b32SMel Gorman 		if (!is_valid_nodemask(nodes)) {
2161da177e4SLinus Torvalds 			kmem_cache_free(policy_cache, policy);
217*19770b32SMel Gorman 			return ERR_PTR(-EINVAL);
2181da177e4SLinus Torvalds 		}
219*19770b32SMel Gorman 		policy->v.nodes = *nodes;
2201da177e4SLinus Torvalds 		break;
2211da177e4SLinus Torvalds 	}
2221da177e4SLinus Torvalds 	policy->policy = mode;
22374cb2155SPaul Jackson 	policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
2241da177e4SLinus Torvalds 	return policy;
2251da177e4SLinus Torvalds }
2261da177e4SLinus Torvalds 
227397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty);
228fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
229fc301289SChristoph Lameter 				unsigned long flags);
2301a75a6c8SChristoph Lameter 
23138e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
232b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
233dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
234dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
23538e35860SChristoph Lameter 		void *private)
2361da177e4SLinus Torvalds {
23791612e0dSHugh Dickins 	pte_t *orig_pte;
23891612e0dSHugh Dickins 	pte_t *pte;
239705e87c0SHugh Dickins 	spinlock_t *ptl;
240941150a3SHugh Dickins 
241705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
24291612e0dSHugh Dickins 	do {
2436aab341eSLinus Torvalds 		struct page *page;
24425ba77c1SAndy Whitcroft 		int nid;
24591612e0dSHugh Dickins 
24691612e0dSHugh Dickins 		if (!pte_present(*pte))
24791612e0dSHugh Dickins 			continue;
2486aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
2496aab341eSLinus Torvalds 		if (!page)
25091612e0dSHugh Dickins 			continue;
251053837fcSNick Piggin 		/*
252053837fcSNick Piggin 		 * The check for PageReserved here is important to avoid
253053837fcSNick Piggin 		 * handling zero pages and other pages that may have been
254053837fcSNick Piggin 		 * marked special by the system.
255053837fcSNick Piggin 		 *
256053837fcSNick Piggin 		 * If the PageReserved would not be checked here then f.e.
257053837fcSNick Piggin 		 * the location of the zero page could have an influence
258053837fcSNick Piggin 		 * on MPOL_MF_STRICT, zero pages would be counted for
259053837fcSNick Piggin 		 * the per node stats, and there would be useless attempts
260053837fcSNick Piggin 		 * to put zero pages on the migration list.
261053837fcSNick Piggin 		 */
262f4598c8bSChristoph Lameter 		if (PageReserved(page))
263f4598c8bSChristoph Lameter 			continue;
2646aab341eSLinus Torvalds 		nid = page_to_nid(page);
26538e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
26638e35860SChristoph Lameter 			continue;
26738e35860SChristoph Lameter 
2681a75a6c8SChristoph Lameter 		if (flags & MPOL_MF_STATS)
269397874dfSChristoph Lameter 			gather_stats(page, private, pte_dirty(*pte));
270053837fcSNick Piggin 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
271fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
272dc9aa5b9SChristoph Lameter 		else
2731da177e4SLinus Torvalds 			break;
27491612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
275705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
27691612e0dSHugh Dickins 	return addr != end;
27791612e0dSHugh Dickins }
27891612e0dSHugh Dickins 
279b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
280dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
281dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
28238e35860SChristoph Lameter 		void *private)
28391612e0dSHugh Dickins {
28491612e0dSHugh Dickins 	pmd_t *pmd;
28591612e0dSHugh Dickins 	unsigned long next;
28691612e0dSHugh Dickins 
28791612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
28891612e0dSHugh Dickins 	do {
28991612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
29091612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
29191612e0dSHugh Dickins 			continue;
292dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
29338e35860SChristoph Lameter 				    flags, private))
29491612e0dSHugh Dickins 			return -EIO;
29591612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
29691612e0dSHugh Dickins 	return 0;
29791612e0dSHugh Dickins }
29891612e0dSHugh Dickins 
299b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
300dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
301dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
30238e35860SChristoph Lameter 		void *private)
30391612e0dSHugh Dickins {
30491612e0dSHugh Dickins 	pud_t *pud;
30591612e0dSHugh Dickins 	unsigned long next;
30691612e0dSHugh Dickins 
30791612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
30891612e0dSHugh Dickins 	do {
30991612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
31091612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
31191612e0dSHugh Dickins 			continue;
312dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
31338e35860SChristoph Lameter 				    flags, private))
31491612e0dSHugh Dickins 			return -EIO;
31591612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
31691612e0dSHugh Dickins 	return 0;
31791612e0dSHugh Dickins }
31891612e0dSHugh Dickins 
319b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
320dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
321dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
32238e35860SChristoph Lameter 		void *private)
32391612e0dSHugh Dickins {
32491612e0dSHugh Dickins 	pgd_t *pgd;
32591612e0dSHugh Dickins 	unsigned long next;
32691612e0dSHugh Dickins 
327b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
32891612e0dSHugh Dickins 	do {
32991612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
33091612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
33191612e0dSHugh Dickins 			continue;
332dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
33338e35860SChristoph Lameter 				    flags, private))
33491612e0dSHugh Dickins 			return -EIO;
33591612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
33691612e0dSHugh Dickins 	return 0;
3371da177e4SLinus Torvalds }
3381da177e4SLinus Torvalds 
339dc9aa5b9SChristoph Lameter /*
340dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
341dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
342dc9aa5b9SChristoph Lameter  * put them on the pagelist.
343dc9aa5b9SChristoph Lameter  */
3441da177e4SLinus Torvalds static struct vm_area_struct *
3451da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
34638e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
3471da177e4SLinus Torvalds {
3481da177e4SLinus Torvalds 	int err;
3491da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
3501da177e4SLinus Torvalds 
35190036ee5SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
35290036ee5SChristoph Lameter 
353b20a3503SChristoph Lameter 		err = migrate_prep();
354b20a3503SChristoph Lameter 		if (err)
355b20a3503SChristoph Lameter 			return ERR_PTR(err);
35690036ee5SChristoph Lameter 	}
357053837fcSNick Piggin 
3581da177e4SLinus Torvalds 	first = find_vma(mm, start);
3591da177e4SLinus Torvalds 	if (!first)
3601da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
3611da177e4SLinus Torvalds 	prev = NULL;
3621da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
363dc9aa5b9SChristoph Lameter 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
3641da177e4SLinus Torvalds 			if (!vma->vm_next && vma->vm_end < end)
3651da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
3661da177e4SLinus Torvalds 			if (prev && prev->vm_end < vma->vm_start)
3671da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
368dc9aa5b9SChristoph Lameter 		}
369dc9aa5b9SChristoph Lameter 		if (!is_vm_hugetlb_page(vma) &&
370dc9aa5b9SChristoph Lameter 		    ((flags & MPOL_MF_STRICT) ||
371dc9aa5b9SChristoph Lameter 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
372dc9aa5b9SChristoph Lameter 				vma_migratable(vma)))) {
3735b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
374dc9aa5b9SChristoph Lameter 
3755b952b3cSAndi Kleen 			if (endvma > end)
3765b952b3cSAndi Kleen 				endvma = end;
3775b952b3cSAndi Kleen 			if (vma->vm_start > start)
3785b952b3cSAndi Kleen 				start = vma->vm_start;
379dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
38038e35860SChristoph Lameter 						flags, private);
3811da177e4SLinus Torvalds 			if (err) {
3821da177e4SLinus Torvalds 				first = ERR_PTR(err);
3831da177e4SLinus Torvalds 				break;
3841da177e4SLinus Torvalds 			}
3851da177e4SLinus Torvalds 		}
3861da177e4SLinus Torvalds 		prev = vma;
3871da177e4SLinus Torvalds 	}
3881da177e4SLinus Torvalds 	return first;
3891da177e4SLinus Torvalds }
3901da177e4SLinus Torvalds 
3911da177e4SLinus Torvalds /* Apply policy to a single VMA */
3921da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
3931da177e4SLinus Torvalds {
3941da177e4SLinus Torvalds 	int err = 0;
3951da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
3961da177e4SLinus Torvalds 
397140d5a49SPaul Mundt 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
3981da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
3991da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
4001da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
4011da177e4SLinus Torvalds 
4021da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
4031da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
4041da177e4SLinus Torvalds 	if (!err) {
4051da177e4SLinus Torvalds 		mpol_get(new);
4061da177e4SLinus Torvalds 		vma->vm_policy = new;
4071da177e4SLinus Torvalds 		mpol_free(old);
4081da177e4SLinus Torvalds 	}
4091da177e4SLinus Torvalds 	return err;
4101da177e4SLinus Torvalds }
4111da177e4SLinus Torvalds 
4121da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
4131da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start,
4141da177e4SLinus Torvalds 		       unsigned long end, struct mempolicy *new)
4151da177e4SLinus Torvalds {
4161da177e4SLinus Torvalds 	struct vm_area_struct *next;
4171da177e4SLinus Torvalds 	int err;
4181da177e4SLinus Torvalds 
4191da177e4SLinus Torvalds 	err = 0;
4201da177e4SLinus Torvalds 	for (; vma && vma->vm_start < end; vma = next) {
4211da177e4SLinus Torvalds 		next = vma->vm_next;
4221da177e4SLinus Torvalds 		if (vma->vm_start < start)
4231da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, start, 1);
4241da177e4SLinus Torvalds 		if (!err && vma->vm_end > end)
4251da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, end, 0);
4261da177e4SLinus Torvalds 		if (!err)
4271da177e4SLinus Torvalds 			err = policy_vma(vma, new);
4281da177e4SLinus Torvalds 		if (err)
4291da177e4SLinus Torvalds 			break;
4301da177e4SLinus Torvalds 	}
4311da177e4SLinus Torvalds 	return err;
4321da177e4SLinus Torvalds }
4331da177e4SLinus Torvalds 
434c61afb18SPaul Jackson /*
435c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
436c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
437c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
438c61afb18SPaul Jackson  *
439c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
440c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
441c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
442c61afb18SPaul Jackson  *
443c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
444c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
445c61afb18SPaul Jackson  *
446c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
447c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
448c61afb18SPaul Jackson  * for use within this file.
449c61afb18SPaul Jackson  */
450c61afb18SPaul Jackson 
451c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
452c61afb18SPaul Jackson {
453c61afb18SPaul Jackson 	if (p->mempolicy)
454c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
455c61afb18SPaul Jackson 	else
456c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
457c61afb18SPaul Jackson }
458c61afb18SPaul Jackson 
459c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
460c61afb18SPaul Jackson {
461c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
462c61afb18SPaul Jackson }
463c61afb18SPaul Jackson 
4641da177e4SLinus Torvalds /* Set the process memory policy */
465dbcb0f19SAdrian Bunk static long do_set_mempolicy(int mode, nodemask_t *nodes)
4661da177e4SLinus Torvalds {
4671da177e4SLinus Torvalds 	struct mempolicy *new;
4681da177e4SLinus Torvalds 
46931f1de46SKOSAKI Motohiro 	if (mpol_check_policy(mode, nodes))
4701da177e4SLinus Torvalds 		return -EINVAL;
4718bccd85fSChristoph Lameter 	new = mpol_new(mode, nodes);
4721da177e4SLinus Torvalds 	if (IS_ERR(new))
4731da177e4SLinus Torvalds 		return PTR_ERR(new);
4741da177e4SLinus Torvalds 	mpol_free(current->mempolicy);
4751da177e4SLinus Torvalds 	current->mempolicy = new;
476c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
4771da177e4SLinus Torvalds 	if (new && new->policy == MPOL_INTERLEAVE)
478dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
4791da177e4SLinus Torvalds 	return 0;
4801da177e4SLinus Torvalds }
4811da177e4SLinus Torvalds 
4821da177e4SLinus Torvalds /* Fill a zone bitmap for a policy */
483dfcd3c0dSAndi Kleen static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
4841da177e4SLinus Torvalds {
485dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
4861da177e4SLinus Torvalds 	switch (p->policy) {
4871da177e4SLinus Torvalds 	case MPOL_DEFAULT:
4881da177e4SLinus Torvalds 		break;
489*19770b32SMel Gorman 	case MPOL_BIND:
490*19770b32SMel Gorman 		/* Fall through */
4911da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
492dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
4931da177e4SLinus Torvalds 		break;
4941da177e4SLinus Torvalds 	case MPOL_PREFERRED:
49556bbd65dSChristoph Lameter 		/* or use current node instead of memory_map? */
4961da177e4SLinus Torvalds 		if (p->v.preferred_node < 0)
49756bbd65dSChristoph Lameter 			*nodes = node_states[N_HIGH_MEMORY];
4981da177e4SLinus Torvalds 		else
499dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
5001da177e4SLinus Torvalds 		break;
5011da177e4SLinus Torvalds 	default:
5021da177e4SLinus Torvalds 		BUG();
5031da177e4SLinus Torvalds 	}
5041da177e4SLinus Torvalds }
5051da177e4SLinus Torvalds 
5061da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
5071da177e4SLinus Torvalds {
5081da177e4SLinus Torvalds 	struct page *p;
5091da177e4SLinus Torvalds 	int err;
5101da177e4SLinus Torvalds 
5111da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
5121da177e4SLinus Torvalds 	if (err >= 0) {
5131da177e4SLinus Torvalds 		err = page_to_nid(p);
5141da177e4SLinus Torvalds 		put_page(p);
5151da177e4SLinus Torvalds 	}
5161da177e4SLinus Torvalds 	return err;
5171da177e4SLinus Torvalds }
5181da177e4SLinus Torvalds 
5191da177e4SLinus Torvalds /* Retrieve NUMA policy */
520dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
5211da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
5221da177e4SLinus Torvalds {
5238bccd85fSChristoph Lameter 	int err;
5241da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
5251da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
5261da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
5271da177e4SLinus Torvalds 
528cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
529754af6f5SLee Schermerhorn 	if (flags &
530754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
5311da177e4SLinus Torvalds 		return -EINVAL;
532754af6f5SLee Schermerhorn 
533754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
534754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
535754af6f5SLee Schermerhorn 			return -EINVAL;
536754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
537754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
538754af6f5SLee Schermerhorn 		return 0;
539754af6f5SLee Schermerhorn 	}
540754af6f5SLee Schermerhorn 
5411da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
5421da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
5431da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
5441da177e4SLinus Torvalds 		if (!vma) {
5451da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
5461da177e4SLinus Torvalds 			return -EFAULT;
5471da177e4SLinus Torvalds 		}
5481da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
5491da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
5501da177e4SLinus Torvalds 		else
5511da177e4SLinus Torvalds 			pol = vma->vm_policy;
5521da177e4SLinus Torvalds 	} else if (addr)
5531da177e4SLinus Torvalds 		return -EINVAL;
5541da177e4SLinus Torvalds 
5551da177e4SLinus Torvalds 	if (!pol)
5561da177e4SLinus Torvalds 		pol = &default_policy;
5571da177e4SLinus Torvalds 
5581da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
5591da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
5601da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
5611da177e4SLinus Torvalds 			if (err < 0)
5621da177e4SLinus Torvalds 				goto out;
5638bccd85fSChristoph Lameter 			*policy = err;
5641da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
5651da177e4SLinus Torvalds 				pol->policy == MPOL_INTERLEAVE) {
5668bccd85fSChristoph Lameter 			*policy = current->il_next;
5671da177e4SLinus Torvalds 		} else {
5681da177e4SLinus Torvalds 			err = -EINVAL;
5691da177e4SLinus Torvalds 			goto out;
5701da177e4SLinus Torvalds 		}
5711da177e4SLinus Torvalds 	} else
5728bccd85fSChristoph Lameter 		*policy = pol->policy;
5731da177e4SLinus Torvalds 
5741da177e4SLinus Torvalds 	if (vma) {
5751da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5761da177e4SLinus Torvalds 		vma = NULL;
5771da177e4SLinus Torvalds 	}
5781da177e4SLinus Torvalds 
5791da177e4SLinus Torvalds 	err = 0;
5808bccd85fSChristoph Lameter 	if (nmask)
5818bccd85fSChristoph Lameter 		get_zonemask(pol, nmask);
5821da177e4SLinus Torvalds 
5831da177e4SLinus Torvalds  out:
5841da177e4SLinus Torvalds 	if (vma)
5851da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5861da177e4SLinus Torvalds 	return err;
5871da177e4SLinus Torvalds }
5881da177e4SLinus Torvalds 
589b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
5908bccd85fSChristoph Lameter /*
5916ce3c4c0SChristoph Lameter  * page migration
5926ce3c4c0SChristoph Lameter  */
593fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
594fc301289SChristoph Lameter 				unsigned long flags)
5956ce3c4c0SChristoph Lameter {
5966ce3c4c0SChristoph Lameter 	/*
597fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
5986ce3c4c0SChristoph Lameter 	 */
599b20a3503SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
600b20a3503SChristoph Lameter 		isolate_lru_page(page, pagelist);
6016ce3c4c0SChristoph Lameter }
6026ce3c4c0SChristoph Lameter 
603742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
60495a402c3SChristoph Lameter {
605769848c0SMel Gorman 	return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
60695a402c3SChristoph Lameter }
60795a402c3SChristoph Lameter 
6086ce3c4c0SChristoph Lameter /*
6097e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
6107e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
6117e2ab150SChristoph Lameter  */
612dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
613dbcb0f19SAdrian Bunk 			   int flags)
6147e2ab150SChristoph Lameter {
6157e2ab150SChristoph Lameter 	nodemask_t nmask;
6167e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
6177e2ab150SChristoph Lameter 	int err = 0;
6187e2ab150SChristoph Lameter 
6197e2ab150SChristoph Lameter 	nodes_clear(nmask);
6207e2ab150SChristoph Lameter 	node_set(source, nmask);
6217e2ab150SChristoph Lameter 
6227e2ab150SChristoph Lameter 	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
6237e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
6247e2ab150SChristoph Lameter 
6257e2ab150SChristoph Lameter 	if (!list_empty(&pagelist))
62695a402c3SChristoph Lameter 		err = migrate_pages(&pagelist, new_node_page, dest);
62795a402c3SChristoph Lameter 
6287e2ab150SChristoph Lameter 	return err;
6297e2ab150SChristoph Lameter }
6307e2ab150SChristoph Lameter 
6317e2ab150SChristoph Lameter /*
6327e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
6337e2ab150SChristoph Lameter  * layout as much as possible.
63439743889SChristoph Lameter  *
63539743889SChristoph Lameter  * Returns the number of page that could not be moved.
63639743889SChristoph Lameter  */
63739743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
63839743889SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
63939743889SChristoph Lameter {
64039743889SChristoph Lameter 	LIST_HEAD(pagelist);
6417e2ab150SChristoph Lameter 	int busy = 0;
6427e2ab150SChristoph Lameter 	int err = 0;
6437e2ab150SChristoph Lameter 	nodemask_t tmp;
64439743889SChristoph Lameter 
64539743889SChristoph Lameter   	down_read(&mm->mmap_sem);
646d4984711SChristoph Lameter 
6477b2259b3SChristoph Lameter 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
6487b2259b3SChristoph Lameter 	if (err)
6497b2259b3SChristoph Lameter 		goto out;
6507b2259b3SChristoph Lameter 
6517e2ab150SChristoph Lameter /*
6527e2ab150SChristoph Lameter  * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
6537e2ab150SChristoph Lameter  * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
6547e2ab150SChristoph Lameter  * bit in 'tmp', and return that <source, dest> pair for migration.
6557e2ab150SChristoph Lameter  * The pair of nodemasks 'to' and 'from' define the map.
6567e2ab150SChristoph Lameter  *
6577e2ab150SChristoph Lameter  * If no pair of bits is found that way, fallback to picking some
6587e2ab150SChristoph Lameter  * pair of 'source' and 'dest' bits that are not the same.  If the
6597e2ab150SChristoph Lameter  * 'source' and 'dest' bits are the same, this represents a node
6607e2ab150SChristoph Lameter  * that will be migrating to itself, so no pages need move.
6617e2ab150SChristoph Lameter  *
6627e2ab150SChristoph Lameter  * If no bits are left in 'tmp', or if all remaining bits left
6637e2ab150SChristoph Lameter  * in 'tmp' correspond to the same bit in 'to', return false
6647e2ab150SChristoph Lameter  * (nothing left to migrate).
6657e2ab150SChristoph Lameter  *
6667e2ab150SChristoph Lameter  * This lets us pick a pair of nodes to migrate between, such that
6677e2ab150SChristoph Lameter  * if possible the dest node is not already occupied by some other
6687e2ab150SChristoph Lameter  * source node, minimizing the risk of overloading the memory on a
6697e2ab150SChristoph Lameter  * node that would happen if we migrated incoming memory to a node
6707e2ab150SChristoph Lameter  * before migrating outgoing memory source that same node.
6717e2ab150SChristoph Lameter  *
6727e2ab150SChristoph Lameter  * A single scan of tmp is sufficient.  As we go, we remember the
6737e2ab150SChristoph Lameter  * most recent <s, d> pair that moved (s != d).  If we find a pair
6747e2ab150SChristoph Lameter  * that not only moved, but what's better, moved to an empty slot
6757e2ab150SChristoph Lameter  * (d is not set in tmp), then we break out then, with that pair.
6767e2ab150SChristoph Lameter  * Otherwise when we finish scannng from_tmp, we at least have the
6777e2ab150SChristoph Lameter  * most recent <s, d> pair that moved.  If we get all the way through
6787e2ab150SChristoph Lameter  * the scan of tmp without finding any node that moved, much less
6797e2ab150SChristoph Lameter  * moved to an empty node, then there is nothing left worth migrating.
6807e2ab150SChristoph Lameter  */
6817e2ab150SChristoph Lameter 
6827e2ab150SChristoph Lameter 	tmp = *from_nodes;
6837e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
6847e2ab150SChristoph Lameter 		int s,d;
6857e2ab150SChristoph Lameter 		int source = -1;
6867e2ab150SChristoph Lameter 		int dest = 0;
6877e2ab150SChristoph Lameter 
6887e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
6897e2ab150SChristoph Lameter 			d = node_remap(s, *from_nodes, *to_nodes);
6907e2ab150SChristoph Lameter 			if (s == d)
6917e2ab150SChristoph Lameter 				continue;
6927e2ab150SChristoph Lameter 
6937e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
6947e2ab150SChristoph Lameter 			dest = d;
6957e2ab150SChristoph Lameter 
6967e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
6977e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
6987e2ab150SChristoph Lameter 				break;
6997e2ab150SChristoph Lameter 		}
7007e2ab150SChristoph Lameter 		if (source == -1)
7017e2ab150SChristoph Lameter 			break;
7027e2ab150SChristoph Lameter 
7037e2ab150SChristoph Lameter 		node_clear(source, tmp);
7047e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
7057e2ab150SChristoph Lameter 		if (err > 0)
7067e2ab150SChristoph Lameter 			busy += err;
7077e2ab150SChristoph Lameter 		if (err < 0)
7087e2ab150SChristoph Lameter 			break;
70939743889SChristoph Lameter 	}
7107b2259b3SChristoph Lameter out:
71139743889SChristoph Lameter 	up_read(&mm->mmap_sem);
7127e2ab150SChristoph Lameter 	if (err < 0)
7137e2ab150SChristoph Lameter 		return err;
7147e2ab150SChristoph Lameter 	return busy;
715b20a3503SChristoph Lameter 
71639743889SChristoph Lameter }
71739743889SChristoph Lameter 
7183ad33b24SLee Schermerhorn /*
7193ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
7203ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
7213ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
7223ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
7233ad33b24SLee Schermerhorn  * is in virtual address order.
7243ad33b24SLee Schermerhorn  */
725742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
72695a402c3SChristoph Lameter {
72795a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
7283ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
72995a402c3SChristoph Lameter 
7303ad33b24SLee Schermerhorn 	while (vma) {
7313ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
7323ad33b24SLee Schermerhorn 		if (address != -EFAULT)
7333ad33b24SLee Schermerhorn 			break;
7343ad33b24SLee Schermerhorn 		vma = vma->vm_next;
7353ad33b24SLee Schermerhorn 	}
7363ad33b24SLee Schermerhorn 
7373ad33b24SLee Schermerhorn 	/*
7383ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
7393ad33b24SLee Schermerhorn 	 */
7403ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
74195a402c3SChristoph Lameter }
742b20a3503SChristoph Lameter #else
743b20a3503SChristoph Lameter 
744b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
745b20a3503SChristoph Lameter 				unsigned long flags)
746b20a3503SChristoph Lameter {
747b20a3503SChristoph Lameter }
748b20a3503SChristoph Lameter 
749b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
750b20a3503SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
751b20a3503SChristoph Lameter {
752b20a3503SChristoph Lameter 	return -ENOSYS;
753b20a3503SChristoph Lameter }
75495a402c3SChristoph Lameter 
75569939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
75695a402c3SChristoph Lameter {
75795a402c3SChristoph Lameter 	return NULL;
75895a402c3SChristoph Lameter }
759b20a3503SChristoph Lameter #endif
760b20a3503SChristoph Lameter 
761dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
762dbcb0f19SAdrian Bunk 		     unsigned long mode, nodemask_t *nmask,
763dbcb0f19SAdrian Bunk 		     unsigned long flags)
7646ce3c4c0SChristoph Lameter {
7656ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
7666ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
7676ce3c4c0SChristoph Lameter 	struct mempolicy *new;
7686ce3c4c0SChristoph Lameter 	unsigned long end;
7696ce3c4c0SChristoph Lameter 	int err;
7706ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
7716ce3c4c0SChristoph Lameter 
7726ce3c4c0SChristoph Lameter 	if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
7736ce3c4c0SChristoph Lameter 				      MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
7746ce3c4c0SChristoph Lameter 	    || mode > MPOL_MAX)
7756ce3c4c0SChristoph Lameter 		return -EINVAL;
77674c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
7776ce3c4c0SChristoph Lameter 		return -EPERM;
7786ce3c4c0SChristoph Lameter 
7796ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
7806ce3c4c0SChristoph Lameter 		return -EINVAL;
7816ce3c4c0SChristoph Lameter 
7826ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
7836ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
7846ce3c4c0SChristoph Lameter 
7856ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
7866ce3c4c0SChristoph Lameter 	end = start + len;
7876ce3c4c0SChristoph Lameter 
7886ce3c4c0SChristoph Lameter 	if (end < start)
7896ce3c4c0SChristoph Lameter 		return -EINVAL;
7906ce3c4c0SChristoph Lameter 	if (end == start)
7916ce3c4c0SChristoph Lameter 		return 0;
7926ce3c4c0SChristoph Lameter 
7936ce3c4c0SChristoph Lameter 	if (mpol_check_policy(mode, nmask))
7946ce3c4c0SChristoph Lameter 		return -EINVAL;
7956ce3c4c0SChristoph Lameter 
7966ce3c4c0SChristoph Lameter 	new = mpol_new(mode, nmask);
7976ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
7986ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
7996ce3c4c0SChristoph Lameter 
8006ce3c4c0SChristoph Lameter 	/*
8016ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
8026ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
8036ce3c4c0SChristoph Lameter 	 */
8046ce3c4c0SChristoph Lameter 	if (!new)
8056ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
8066ce3c4c0SChristoph Lameter 
807140d5a49SPaul Mundt 	pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
808140d5a49SPaul Mundt 		 mode, nmask ? nodes_addr(*nmask)[0] : -1);
8096ce3c4c0SChristoph Lameter 
8106ce3c4c0SChristoph Lameter 	down_write(&mm->mmap_sem);
8116ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
8126ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
8136ce3c4c0SChristoph Lameter 
8146ce3c4c0SChristoph Lameter 	err = PTR_ERR(vma);
8156ce3c4c0SChristoph Lameter 	if (!IS_ERR(vma)) {
8166ce3c4c0SChristoph Lameter 		int nr_failed = 0;
8176ce3c4c0SChristoph Lameter 
8186ce3c4c0SChristoph Lameter 		err = mbind_range(vma, start, end, new);
8197e2ab150SChristoph Lameter 
8206ce3c4c0SChristoph Lameter 		if (!list_empty(&pagelist))
82195a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
82295a402c3SChristoph Lameter 						(unsigned long)vma);
8236ce3c4c0SChristoph Lameter 
8246ce3c4c0SChristoph Lameter 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
8256ce3c4c0SChristoph Lameter 			err = -EIO;
8266ce3c4c0SChristoph Lameter 	}
827b20a3503SChristoph Lameter 
8286ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
8296ce3c4c0SChristoph Lameter 	mpol_free(new);
8306ce3c4c0SChristoph Lameter 	return err;
8316ce3c4c0SChristoph Lameter }
8326ce3c4c0SChristoph Lameter 
83339743889SChristoph Lameter /*
8348bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
8358bccd85fSChristoph Lameter  */
8368bccd85fSChristoph Lameter 
8378bccd85fSChristoph Lameter /* Copy a node mask from user space. */
83839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8398bccd85fSChristoph Lameter 		     unsigned long maxnode)
8408bccd85fSChristoph Lameter {
8418bccd85fSChristoph Lameter 	unsigned long k;
8428bccd85fSChristoph Lameter 	unsigned long nlongs;
8438bccd85fSChristoph Lameter 	unsigned long endmask;
8448bccd85fSChristoph Lameter 
8458bccd85fSChristoph Lameter 	--maxnode;
8468bccd85fSChristoph Lameter 	nodes_clear(*nodes);
8478bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
8488bccd85fSChristoph Lameter 		return 0;
849a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
850636f13c1SChris Wright 		return -EINVAL;
8518bccd85fSChristoph Lameter 
8528bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
8538bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
8548bccd85fSChristoph Lameter 		endmask = ~0UL;
8558bccd85fSChristoph Lameter 	else
8568bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
8578bccd85fSChristoph Lameter 
8588bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
8598bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
8608bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8618bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
8628bccd85fSChristoph Lameter 			return -EINVAL;
8638bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8648bccd85fSChristoph Lameter 			unsigned long t;
8658bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
8668bccd85fSChristoph Lameter 				return -EFAULT;
8678bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
8688bccd85fSChristoph Lameter 				if (t & endmask)
8698bccd85fSChristoph Lameter 					return -EINVAL;
8708bccd85fSChristoph Lameter 			} else if (t)
8718bccd85fSChristoph Lameter 				return -EINVAL;
8728bccd85fSChristoph Lameter 		}
8738bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
8748bccd85fSChristoph Lameter 		endmask = ~0UL;
8758bccd85fSChristoph Lameter 	}
8768bccd85fSChristoph Lameter 
8778bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
8788bccd85fSChristoph Lameter 		return -EFAULT;
8798bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
8808bccd85fSChristoph Lameter 	return 0;
8818bccd85fSChristoph Lameter }
8828bccd85fSChristoph Lameter 
8838bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
8848bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
8858bccd85fSChristoph Lameter 			      nodemask_t *nodes)
8868bccd85fSChristoph Lameter {
8878bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
8888bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
8898bccd85fSChristoph Lameter 
8908bccd85fSChristoph Lameter 	if (copy > nbytes) {
8918bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
8928bccd85fSChristoph Lameter 			return -EINVAL;
8938bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
8948bccd85fSChristoph Lameter 			return -EFAULT;
8958bccd85fSChristoph Lameter 		copy = nbytes;
8968bccd85fSChristoph Lameter 	}
8978bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
8988bccd85fSChristoph Lameter }
8998bccd85fSChristoph Lameter 
9008bccd85fSChristoph Lameter asmlinkage long sys_mbind(unsigned long start, unsigned long len,
9018bccd85fSChristoph Lameter 			unsigned long mode,
9028bccd85fSChristoph Lameter 			unsigned long __user *nmask, unsigned long maxnode,
9038bccd85fSChristoph Lameter 			unsigned flags)
9048bccd85fSChristoph Lameter {
9058bccd85fSChristoph Lameter 	nodemask_t nodes;
9068bccd85fSChristoph Lameter 	int err;
9078bccd85fSChristoph Lameter 
9088bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
9098bccd85fSChristoph Lameter 	if (err)
9108bccd85fSChristoph Lameter 		return err;
9118bccd85fSChristoph Lameter 	return do_mbind(start, len, mode, &nodes, flags);
9128bccd85fSChristoph Lameter }
9138bccd85fSChristoph Lameter 
9148bccd85fSChristoph Lameter /* Set the process memory policy */
9158bccd85fSChristoph Lameter asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
9168bccd85fSChristoph Lameter 		unsigned long maxnode)
9178bccd85fSChristoph Lameter {
9188bccd85fSChristoph Lameter 	int err;
9198bccd85fSChristoph Lameter 	nodemask_t nodes;
9208bccd85fSChristoph Lameter 
9218bccd85fSChristoph Lameter 	if (mode < 0 || mode > MPOL_MAX)
9228bccd85fSChristoph Lameter 		return -EINVAL;
9238bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
9248bccd85fSChristoph Lameter 	if (err)
9258bccd85fSChristoph Lameter 		return err;
9268bccd85fSChristoph Lameter 	return do_set_mempolicy(mode, &nodes);
9278bccd85fSChristoph Lameter }
9288bccd85fSChristoph Lameter 
92939743889SChristoph Lameter asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
93039743889SChristoph Lameter 		const unsigned long __user *old_nodes,
93139743889SChristoph Lameter 		const unsigned long __user *new_nodes)
93239743889SChristoph Lameter {
93339743889SChristoph Lameter 	struct mm_struct *mm;
93439743889SChristoph Lameter 	struct task_struct *task;
93539743889SChristoph Lameter 	nodemask_t old;
93639743889SChristoph Lameter 	nodemask_t new;
93739743889SChristoph Lameter 	nodemask_t task_nodes;
93839743889SChristoph Lameter 	int err;
93939743889SChristoph Lameter 
94039743889SChristoph Lameter 	err = get_nodes(&old, old_nodes, maxnode);
94139743889SChristoph Lameter 	if (err)
94239743889SChristoph Lameter 		return err;
94339743889SChristoph Lameter 
94439743889SChristoph Lameter 	err = get_nodes(&new, new_nodes, maxnode);
94539743889SChristoph Lameter 	if (err)
94639743889SChristoph Lameter 		return err;
94739743889SChristoph Lameter 
94839743889SChristoph Lameter 	/* Find the mm_struct */
94939743889SChristoph Lameter 	read_lock(&tasklist_lock);
950228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
95139743889SChristoph Lameter 	if (!task) {
95239743889SChristoph Lameter 		read_unlock(&tasklist_lock);
95339743889SChristoph Lameter 		return -ESRCH;
95439743889SChristoph Lameter 	}
95539743889SChristoph Lameter 	mm = get_task_mm(task);
95639743889SChristoph Lameter 	read_unlock(&tasklist_lock);
95739743889SChristoph Lameter 
95839743889SChristoph Lameter 	if (!mm)
95939743889SChristoph Lameter 		return -EINVAL;
96039743889SChristoph Lameter 
96139743889SChristoph Lameter 	/*
96239743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
96339743889SChristoph Lameter 	 * process. The right exists if the process has administrative
9647f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
96539743889SChristoph Lameter 	 * userid as the target process.
96639743889SChristoph Lameter 	 */
96739743889SChristoph Lameter 	if ((current->euid != task->suid) && (current->euid != task->uid) &&
96839743889SChristoph Lameter 	    (current->uid != task->suid) && (current->uid != task->uid) &&
96974c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
97039743889SChristoph Lameter 		err = -EPERM;
97139743889SChristoph Lameter 		goto out;
97239743889SChristoph Lameter 	}
97339743889SChristoph Lameter 
97439743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
97539743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
97674c00241SChristoph Lameter 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
97739743889SChristoph Lameter 		err = -EPERM;
97839743889SChristoph Lameter 		goto out;
97939743889SChristoph Lameter 	}
98039743889SChristoph Lameter 
98137b07e41SLee Schermerhorn 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
9823b42d28bSChristoph Lameter 		err = -EINVAL;
9833b42d28bSChristoph Lameter 		goto out;
9843b42d28bSChristoph Lameter 	}
9853b42d28bSChristoph Lameter 
98686c3a764SDavid Quigley 	err = security_task_movememory(task);
98786c3a764SDavid Quigley 	if (err)
98886c3a764SDavid Quigley 		goto out;
98986c3a764SDavid Quigley 
990511030bcSChristoph Lameter 	err = do_migrate_pages(mm, &old, &new,
99174c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
99239743889SChristoph Lameter out:
99339743889SChristoph Lameter 	mmput(mm);
99439743889SChristoph Lameter 	return err;
99539743889SChristoph Lameter }
99639743889SChristoph Lameter 
99739743889SChristoph Lameter 
9988bccd85fSChristoph Lameter /* Retrieve NUMA policy */
9998bccd85fSChristoph Lameter asmlinkage long sys_get_mempolicy(int __user *policy,
10008bccd85fSChristoph Lameter 				unsigned long __user *nmask,
10018bccd85fSChristoph Lameter 				unsigned long maxnode,
10028bccd85fSChristoph Lameter 				unsigned long addr, unsigned long flags)
10038bccd85fSChristoph Lameter {
1004dbcb0f19SAdrian Bunk 	int err;
1005dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
10068bccd85fSChristoph Lameter 	nodemask_t nodes;
10078bccd85fSChristoph Lameter 
10088bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
10098bccd85fSChristoph Lameter 		return -EINVAL;
10108bccd85fSChristoph Lameter 
10118bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
10128bccd85fSChristoph Lameter 
10138bccd85fSChristoph Lameter 	if (err)
10148bccd85fSChristoph Lameter 		return err;
10158bccd85fSChristoph Lameter 
10168bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
10178bccd85fSChristoph Lameter 		return -EFAULT;
10188bccd85fSChristoph Lameter 
10198bccd85fSChristoph Lameter 	if (nmask)
10208bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
10218bccd85fSChristoph Lameter 
10228bccd85fSChristoph Lameter 	return err;
10238bccd85fSChristoph Lameter }
10248bccd85fSChristoph Lameter 
10251da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
10261da177e4SLinus Torvalds 
10271da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
10281da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
10291da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
10301da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
10311da177e4SLinus Torvalds {
10321da177e4SLinus Torvalds 	long err;
10331da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10341da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10351da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10361da177e4SLinus Torvalds 
10371da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10381da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10391da177e4SLinus Torvalds 
10401da177e4SLinus Torvalds 	if (nmask)
10411da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10421da177e4SLinus Torvalds 
10431da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
10441da177e4SLinus Torvalds 
10451da177e4SLinus Torvalds 	if (!err && nmask) {
10461da177e4SLinus Torvalds 		err = copy_from_user(bm, nm, alloc_size);
10471da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
10481da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
10491da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
10501da177e4SLinus Torvalds 	}
10511da177e4SLinus Torvalds 
10521da177e4SLinus Torvalds 	return err;
10531da177e4SLinus Torvalds }
10541da177e4SLinus Torvalds 
10551da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
10561da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
10571da177e4SLinus Torvalds {
10581da177e4SLinus Torvalds 	long err = 0;
10591da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10601da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10611da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10621da177e4SLinus Torvalds 
10631da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10641da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10651da177e4SLinus Torvalds 
10661da177e4SLinus Torvalds 	if (nmask) {
10671da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
10681da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10691da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
10701da177e4SLinus Torvalds 	}
10711da177e4SLinus Torvalds 
10721da177e4SLinus Torvalds 	if (err)
10731da177e4SLinus Torvalds 		return -EFAULT;
10741da177e4SLinus Torvalds 
10751da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
10761da177e4SLinus Torvalds }
10771da177e4SLinus Torvalds 
10781da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
10791da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
10801da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
10811da177e4SLinus Torvalds {
10821da177e4SLinus Torvalds 	long err = 0;
10831da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10841da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1085dfcd3c0dSAndi Kleen 	nodemask_t bm;
10861da177e4SLinus Torvalds 
10871da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10881da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10891da177e4SLinus Torvalds 
10901da177e4SLinus Torvalds 	if (nmask) {
1091dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
10921da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1093dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
10941da177e4SLinus Torvalds 	}
10951da177e4SLinus Torvalds 
10961da177e4SLinus Torvalds 	if (err)
10971da177e4SLinus Torvalds 		return -EFAULT;
10981da177e4SLinus Torvalds 
10991da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
11001da177e4SLinus Torvalds }
11011da177e4SLinus Torvalds 
11021da177e4SLinus Torvalds #endif
11031da177e4SLinus Torvalds 
1104480eccf9SLee Schermerhorn /*
1105480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1106480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1107480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1108480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1109480eccf9SLee Schermerhorn  *
1110480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1111480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
1112480eccf9SLee Schermerhorn  * Returned policy has extra reference count if shared, vma,
1113480eccf9SLee Schermerhorn  * or some other task's policy [show_numa_maps() can pass
1114480eccf9SLee Schermerhorn  * @task != current].  It is the caller's responsibility to
1115480eccf9SLee Schermerhorn  * free the reference in these cases.
1116480eccf9SLee Schermerhorn  */
111748fce342SChristoph Lameter static struct mempolicy * get_vma_policy(struct task_struct *task,
111848fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
11191da177e4SLinus Torvalds {
11206e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
1121480eccf9SLee Schermerhorn 	int shared_pol = 0;
11221da177e4SLinus Torvalds 
11231da177e4SLinus Torvalds 	if (vma) {
1124480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
11251da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
1126480eccf9SLee Schermerhorn 			shared_pol = 1;	/* if pol non-NULL, add ref below */
1127480eccf9SLee Schermerhorn 		} else if (vma->vm_policy &&
11281da177e4SLinus Torvalds 				vma->vm_policy->policy != MPOL_DEFAULT)
11291da177e4SLinus Torvalds 			pol = vma->vm_policy;
11301da177e4SLinus Torvalds 	}
11311da177e4SLinus Torvalds 	if (!pol)
11321da177e4SLinus Torvalds 		pol = &default_policy;
1133480eccf9SLee Schermerhorn 	else if (!shared_pol && pol != current->mempolicy)
1134480eccf9SLee Schermerhorn 		mpol_get(pol);	/* vma or other task's policy */
11351da177e4SLinus Torvalds 	return pol;
11361da177e4SLinus Torvalds }
11371da177e4SLinus Torvalds 
1138*19770b32SMel Gorman /* Return a nodemask representing a mempolicy */
1139*19770b32SMel Gorman static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
1140*19770b32SMel Gorman {
1141*19770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1142*19770b32SMel Gorman 	if (unlikely(policy->policy == MPOL_BIND) &&
1143*19770b32SMel Gorman 			gfp_zone(gfp) >= policy_zone &&
1144*19770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1145*19770b32SMel Gorman 		return &policy->v.nodes;
1146*19770b32SMel Gorman 
1147*19770b32SMel Gorman 	return NULL;
1148*19770b32SMel Gorman }
1149*19770b32SMel Gorman 
11501da177e4SLinus Torvalds /* Return a zonelist representing a mempolicy */
1151dd0fc66fSAl Viro static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
11521da177e4SLinus Torvalds {
11531da177e4SLinus Torvalds 	int nd;
11541da177e4SLinus Torvalds 
11551da177e4SLinus Torvalds 	switch (policy->policy) {
11561da177e4SLinus Torvalds 	case MPOL_PREFERRED:
11571da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
11581da177e4SLinus Torvalds 		if (nd < 0)
11591da177e4SLinus Torvalds 			nd = numa_node_id();
11601da177e4SLinus Torvalds 		break;
11611da177e4SLinus Torvalds 	case MPOL_BIND:
1162*19770b32SMel Gorman 		/*
1163*19770b32SMel Gorman 		 * Normally, MPOL_BIND allocations node-local are node-local
1164*19770b32SMel Gorman 		 * within the allowed nodemask. However, if __GFP_THISNODE is
1165*19770b32SMel Gorman 		 * set and the current node is part of the mask, we use the
1166*19770b32SMel Gorman 		 * the zonelist for the first node in the mask instead.
1167*19770b32SMel Gorman 		 */
1168*19770b32SMel Gorman 		nd = numa_node_id();
1169*19770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
1170*19770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
1171*19770b32SMel Gorman 			nd = first_node(policy->v.nodes);
1172*19770b32SMel Gorman 		break;
11731da177e4SLinus Torvalds 	case MPOL_INTERLEAVE: /* should not happen */
11741da177e4SLinus Torvalds 	case MPOL_DEFAULT:
11751da177e4SLinus Torvalds 		nd = numa_node_id();
11761da177e4SLinus Torvalds 		break;
11771da177e4SLinus Torvalds 	default:
11781da177e4SLinus Torvalds 		nd = 0;
11791da177e4SLinus Torvalds 		BUG();
11801da177e4SLinus Torvalds 	}
11810e88460dSMel Gorman 	return node_zonelist(nd, gfp);
11821da177e4SLinus Torvalds }
11831da177e4SLinus Torvalds 
11841da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
11851da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
11861da177e4SLinus Torvalds {
11871da177e4SLinus Torvalds 	unsigned nid, next;
11881da177e4SLinus Torvalds 	struct task_struct *me = current;
11891da177e4SLinus Torvalds 
11901da177e4SLinus Torvalds 	nid = me->il_next;
1191dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
11921da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1193dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
11941da177e4SLinus Torvalds 	me->il_next = next;
11951da177e4SLinus Torvalds 	return nid;
11961da177e4SLinus Torvalds }
11971da177e4SLinus Torvalds 
1198dc85da15SChristoph Lameter /*
1199dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1200dc85da15SChristoph Lameter  * next slab entry.
1201dc85da15SChristoph Lameter  */
1202dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy)
1203dc85da15SChristoph Lameter {
1204765c4507SChristoph Lameter 	int pol = policy ? policy->policy : MPOL_DEFAULT;
1205765c4507SChristoph Lameter 
1206765c4507SChristoph Lameter 	switch (pol) {
1207dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1208dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1209dc85da15SChristoph Lameter 
1210dd1a239fSMel Gorman 	case MPOL_BIND: {
1211dc85da15SChristoph Lameter 		/*
1212dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1213dc85da15SChristoph Lameter 		 * first node.
1214dc85da15SChristoph Lameter 		 */
1215*19770b32SMel Gorman 		struct zonelist *zonelist;
1216*19770b32SMel Gorman 		struct zone *zone;
1217*19770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1218*19770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1219*19770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
1220*19770b32SMel Gorman 							&policy->v.nodes,
1221*19770b32SMel Gorman 							&zone);
1222*19770b32SMel Gorman 		return zone->node;
1223dd1a239fSMel Gorman 	}
1224dc85da15SChristoph Lameter 
1225dc85da15SChristoph Lameter 	case MPOL_PREFERRED:
1226dc85da15SChristoph Lameter 		if (policy->v.preferred_node >= 0)
1227dc85da15SChristoph Lameter 			return policy->v.preferred_node;
1228dc85da15SChristoph Lameter 		/* Fall through */
1229dc85da15SChristoph Lameter 
1230dc85da15SChristoph Lameter 	default:
1231dc85da15SChristoph Lameter 		return numa_node_id();
1232dc85da15SChristoph Lameter 	}
1233dc85da15SChristoph Lameter }
1234dc85da15SChristoph Lameter 
12351da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
12361da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
12371da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
12381da177e4SLinus Torvalds {
1239dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
12401da177e4SLinus Torvalds 	unsigned target = (unsigned)off % nnodes;
12411da177e4SLinus Torvalds 	int c;
12421da177e4SLinus Torvalds 	int nid = -1;
12431da177e4SLinus Torvalds 
12441da177e4SLinus Torvalds 	c = 0;
12451da177e4SLinus Torvalds 	do {
1246dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
12471da177e4SLinus Torvalds 		c++;
12481da177e4SLinus Torvalds 	} while (c <= target);
12491da177e4SLinus Torvalds 	return nid;
12501da177e4SLinus Torvalds }
12511da177e4SLinus Torvalds 
12525da7ca86SChristoph Lameter /* Determine a node number for interleave */
12535da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
12545da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
12555da7ca86SChristoph Lameter {
12565da7ca86SChristoph Lameter 	if (vma) {
12575da7ca86SChristoph Lameter 		unsigned long off;
12585da7ca86SChristoph Lameter 
12593b98b087SNishanth Aravamudan 		/*
12603b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
12613b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
12623b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
12633b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
12643b98b087SNishanth Aravamudan 		 * a useful offset.
12653b98b087SNishanth Aravamudan 		 */
12663b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
12673b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
12685da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
12695da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
12705da7ca86SChristoph Lameter 	} else
12715da7ca86SChristoph Lameter 		return interleave_nodes(pol);
12725da7ca86SChristoph Lameter }
12735da7ca86SChristoph Lameter 
127400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1275480eccf9SLee Schermerhorn /*
1276480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1277480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1278480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1279480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
1280*19770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1281*19770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1282480eccf9SLee Schermerhorn  *
1283480eccf9SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation.
1284*19770b32SMel Gorman  * If the effective policy is 'BIND, returns pointer to local node's zonelist,
1285*19770b32SMel Gorman  * and a pointer to the mempolicy's @nodemask for filtering the zonelist.
1286480eccf9SLee Schermerhorn  * If it is also a policy for which get_vma_policy() returns an extra
1287*19770b32SMel Gorman  * reference, we must hold that reference until after the allocation.
1288480eccf9SLee Schermerhorn  * In that case, return policy via @mpol so hugetlb allocation can drop
1289480eccf9SLee Schermerhorn  * the reference. For non-'BIND referenced policies, we can/do drop the
1290480eccf9SLee Schermerhorn  * reference here, so the caller doesn't need to know about the special case
1291480eccf9SLee Schermerhorn  * for default and current task policy.
1292480eccf9SLee Schermerhorn  */
1293396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1294*19770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
1295*19770b32SMel Gorman 				nodemask_t **nodemask)
12965da7ca86SChristoph Lameter {
12975da7ca86SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1298480eccf9SLee Schermerhorn 	struct zonelist *zl;
12995da7ca86SChristoph Lameter 
1300480eccf9SLee Schermerhorn 	*mpol = NULL;		/* probably no unref needed */
1301*19770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
1302*19770b32SMel Gorman 	if (pol->policy == MPOL_BIND) {
1303*19770b32SMel Gorman 			*nodemask = &pol->v.nodes;
1304*19770b32SMel Gorman 	} else if (pol->policy == MPOL_INTERLEAVE) {
13055da7ca86SChristoph Lameter 		unsigned nid;
13065da7ca86SChristoph Lameter 
13075da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
130869682d85SLee Schermerhorn 		if (unlikely(pol != &default_policy &&
130969682d85SLee Schermerhorn 				pol != current->mempolicy))
1310480eccf9SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
13110e88460dSMel Gorman 		return node_zonelist(nid, gfp_flags);
13125da7ca86SChristoph Lameter 	}
1313480eccf9SLee Schermerhorn 
1314480eccf9SLee Schermerhorn 	zl = zonelist_policy(GFP_HIGHUSER, pol);
1315480eccf9SLee Schermerhorn 	if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1316480eccf9SLee Schermerhorn 		if (pol->policy != MPOL_BIND)
1317480eccf9SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
1318480eccf9SLee Schermerhorn 		else
1319480eccf9SLee Schermerhorn 			*mpol = pol;	/* unref needed after allocation */
1320480eccf9SLee Schermerhorn 	}
1321480eccf9SLee Schermerhorn 	return zl;
13225da7ca86SChristoph Lameter }
132300ac59adSChen, Kenneth W #endif
13245da7ca86SChristoph Lameter 
13251da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
13261da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1327662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1328662f3a0bSAndi Kleen 					unsigned nid)
13291da177e4SLinus Torvalds {
13301da177e4SLinus Torvalds 	struct zonelist *zl;
13311da177e4SLinus Torvalds 	struct page *page;
13321da177e4SLinus Torvalds 
13330e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
13341da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1335dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1336ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
13371da177e4SLinus Torvalds 	return page;
13381da177e4SLinus Torvalds }
13391da177e4SLinus Torvalds 
13401da177e4SLinus Torvalds /**
13411da177e4SLinus Torvalds  * 	alloc_page_vma	- Allocate a page for a VMA.
13421da177e4SLinus Torvalds  *
13431da177e4SLinus Torvalds  * 	@gfp:
13441da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
13451da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
13461da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
13471da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
13481da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
13491da177e4SLinus Torvalds  *
13501da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
13511da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
13521da177e4SLinus Torvalds  *
13531da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
13541da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
13551da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
13561da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
13571da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
13581da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
13591da177e4SLinus Torvalds  *
13601da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
13611da177e4SLinus Torvalds  */
13621da177e4SLinus Torvalds struct page *
1363dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
13641da177e4SLinus Torvalds {
13656e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1366480eccf9SLee Schermerhorn 	struct zonelist *zl;
13671da177e4SLinus Torvalds 
1368cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
13691da177e4SLinus Torvalds 
13701da177e4SLinus Torvalds 	if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
13711da177e4SLinus Torvalds 		unsigned nid;
13725da7ca86SChristoph Lameter 
13735da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
137469682d85SLee Schermerhorn 		if (unlikely(pol != &default_policy &&
137569682d85SLee Schermerhorn 				pol != current->mempolicy))
137669682d85SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
13771da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, 0, nid);
13781da177e4SLinus Torvalds 	}
1379480eccf9SLee Schermerhorn 	zl = zonelist_policy(gfp, pol);
1380480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy) {
1381480eccf9SLee Schermerhorn 		/*
1382480eccf9SLee Schermerhorn 		 * slow path: ref counted policy -- shared or vma
1383480eccf9SLee Schermerhorn 		 */
1384*19770b32SMel Gorman 		struct page *page =  __alloc_pages_nodemask(gfp, 0,
1385*19770b32SMel Gorman 						zl, nodemask_policy(gfp, pol));
1386480eccf9SLee Schermerhorn 		__mpol_free(pol);
1387480eccf9SLee Schermerhorn 		return page;
1388480eccf9SLee Schermerhorn 	}
1389480eccf9SLee Schermerhorn 	/*
1390480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
1391480eccf9SLee Schermerhorn 	 */
1392*19770b32SMel Gorman 	return __alloc_pages_nodemask(gfp, 0, zl, nodemask_policy(gfp, pol));
13931da177e4SLinus Torvalds }
13941da177e4SLinus Torvalds 
13951da177e4SLinus Torvalds /**
13961da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
13971da177e4SLinus Torvalds  *
13981da177e4SLinus Torvalds  *	@gfp:
13991da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
14001da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
14011da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
14021da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
14031da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
14041da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
14051da177e4SLinus Torvalds  *
14061da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
14071da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
14081da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
14091da177e4SLinus Torvalds  *
1410cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
14111da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
14121da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
14131da177e4SLinus Torvalds  */
1414dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
14151da177e4SLinus Torvalds {
14161da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
14171da177e4SLinus Torvalds 
14181da177e4SLinus Torvalds 	if ((gfp & __GFP_WAIT) && !in_interrupt())
1419cf2a473cSPaul Jackson 		cpuset_update_task_memory_state();
14209b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
14211da177e4SLinus Torvalds 		pol = &default_policy;
14221da177e4SLinus Torvalds 	if (pol->policy == MPOL_INTERLEAVE)
14231da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1424*19770b32SMel Gorman 	return __alloc_pages_nodemask(gfp, order,
1425*19770b32SMel Gorman 			zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
14261da177e4SLinus Torvalds }
14271da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
14281da177e4SLinus Torvalds 
14294225399aSPaul Jackson /*
14304225399aSPaul Jackson  * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
14314225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
14324225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
14334225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
14344225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
14354225399aSPaul Jackson  */
14364225399aSPaul Jackson 
14371da177e4SLinus Torvalds /* Slow path of a mempolicy copy */
14381da177e4SLinus Torvalds struct mempolicy *__mpol_copy(struct mempolicy *old)
14391da177e4SLinus Torvalds {
14401da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
14411da177e4SLinus Torvalds 
14421da177e4SLinus Torvalds 	if (!new)
14431da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
14444225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
14454225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
14464225399aSPaul Jackson 		mpol_rebind_policy(old, &mems);
14474225399aSPaul Jackson 	}
14481da177e4SLinus Torvalds 	*new = *old;
14491da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
14501da177e4SLinus Torvalds 	return new;
14511da177e4SLinus Torvalds }
14521da177e4SLinus Torvalds 
14531da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
14541da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
14551da177e4SLinus Torvalds {
14561da177e4SLinus Torvalds 	if (!a || !b)
14571da177e4SLinus Torvalds 		return 0;
14581da177e4SLinus Torvalds 	if (a->policy != b->policy)
14591da177e4SLinus Torvalds 		return 0;
14601da177e4SLinus Torvalds 	switch (a->policy) {
14611da177e4SLinus Torvalds 	case MPOL_DEFAULT:
14621da177e4SLinus Torvalds 		return 1;
1463*19770b32SMel Gorman 	case MPOL_BIND:
1464*19770b32SMel Gorman 		/* Fall through */
14651da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1466dfcd3c0dSAndi Kleen 		return nodes_equal(a->v.nodes, b->v.nodes);
14671da177e4SLinus Torvalds 	case MPOL_PREFERRED:
14681da177e4SLinus Torvalds 		return a->v.preferred_node == b->v.preferred_node;
14691da177e4SLinus Torvalds 	default:
14701da177e4SLinus Torvalds 		BUG();
14711da177e4SLinus Torvalds 		return 0;
14721da177e4SLinus Torvalds 	}
14731da177e4SLinus Torvalds }
14741da177e4SLinus Torvalds 
14751da177e4SLinus Torvalds /* Slow path of a mpol destructor. */
14761da177e4SLinus Torvalds void __mpol_free(struct mempolicy *p)
14771da177e4SLinus Torvalds {
14781da177e4SLinus Torvalds 	if (!atomic_dec_and_test(&p->refcnt))
14791da177e4SLinus Torvalds 		return;
14801da177e4SLinus Torvalds 	p->policy = MPOL_DEFAULT;
14811da177e4SLinus Torvalds 	kmem_cache_free(policy_cache, p);
14821da177e4SLinus Torvalds }
14831da177e4SLinus Torvalds 
14841da177e4SLinus Torvalds /*
14851da177e4SLinus Torvalds  * Shared memory backing store policy support.
14861da177e4SLinus Torvalds  *
14871da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
14881da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
14891da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
14901da177e4SLinus Torvalds  * for any accesses to the tree.
14911da177e4SLinus Torvalds  */
14921da177e4SLinus Torvalds 
14931da177e4SLinus Torvalds /* lookup first element intersecting start-end */
14941da177e4SLinus Torvalds /* Caller holds sp->lock */
14951da177e4SLinus Torvalds static struct sp_node *
14961da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
14971da177e4SLinus Torvalds {
14981da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
14991da177e4SLinus Torvalds 
15001da177e4SLinus Torvalds 	while (n) {
15011da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
15021da177e4SLinus Torvalds 
15031da177e4SLinus Torvalds 		if (start >= p->end)
15041da177e4SLinus Torvalds 			n = n->rb_right;
15051da177e4SLinus Torvalds 		else if (end <= p->start)
15061da177e4SLinus Torvalds 			n = n->rb_left;
15071da177e4SLinus Torvalds 		else
15081da177e4SLinus Torvalds 			break;
15091da177e4SLinus Torvalds 	}
15101da177e4SLinus Torvalds 	if (!n)
15111da177e4SLinus Torvalds 		return NULL;
15121da177e4SLinus Torvalds 	for (;;) {
15131da177e4SLinus Torvalds 		struct sp_node *w = NULL;
15141da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
15151da177e4SLinus Torvalds 		if (!prev)
15161da177e4SLinus Torvalds 			break;
15171da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
15181da177e4SLinus Torvalds 		if (w->end <= start)
15191da177e4SLinus Torvalds 			break;
15201da177e4SLinus Torvalds 		n = prev;
15211da177e4SLinus Torvalds 	}
15221da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
15231da177e4SLinus Torvalds }
15241da177e4SLinus Torvalds 
15251da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
15261da177e4SLinus Torvalds /* Caller holds sp->lock */
15271da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
15281da177e4SLinus Torvalds {
15291da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
15301da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
15311da177e4SLinus Torvalds 	struct sp_node *nd;
15321da177e4SLinus Torvalds 
15331da177e4SLinus Torvalds 	while (*p) {
15341da177e4SLinus Torvalds 		parent = *p;
15351da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
15361da177e4SLinus Torvalds 		if (new->start < nd->start)
15371da177e4SLinus Torvalds 			p = &(*p)->rb_left;
15381da177e4SLinus Torvalds 		else if (new->end > nd->end)
15391da177e4SLinus Torvalds 			p = &(*p)->rb_right;
15401da177e4SLinus Torvalds 		else
15411da177e4SLinus Torvalds 			BUG();
15421da177e4SLinus Torvalds 	}
15431da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
15441da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
1545140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
15461da177e4SLinus Torvalds 		 new->policy ? new->policy->policy : 0);
15471da177e4SLinus Torvalds }
15481da177e4SLinus Torvalds 
15491da177e4SLinus Torvalds /* Find shared policy intersecting idx */
15501da177e4SLinus Torvalds struct mempolicy *
15511da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
15521da177e4SLinus Torvalds {
15531da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
15541da177e4SLinus Torvalds 	struct sp_node *sn;
15551da177e4SLinus Torvalds 
15561da177e4SLinus Torvalds 	if (!sp->root.rb_node)
15571da177e4SLinus Torvalds 		return NULL;
15581da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15591da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
15601da177e4SLinus Torvalds 	if (sn) {
15611da177e4SLinus Torvalds 		mpol_get(sn->policy);
15621da177e4SLinus Torvalds 		pol = sn->policy;
15631da177e4SLinus Torvalds 	}
15641da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
15651da177e4SLinus Torvalds 	return pol;
15661da177e4SLinus Torvalds }
15671da177e4SLinus Torvalds 
15681da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
15691da177e4SLinus Torvalds {
1570140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
15711da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
15721da177e4SLinus Torvalds 	mpol_free(n->policy);
15731da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
15741da177e4SLinus Torvalds }
15751da177e4SLinus Torvalds 
1576dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1577dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
15781da177e4SLinus Torvalds {
15791da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
15801da177e4SLinus Torvalds 
15811da177e4SLinus Torvalds 	if (!n)
15821da177e4SLinus Torvalds 		return NULL;
15831da177e4SLinus Torvalds 	n->start = start;
15841da177e4SLinus Torvalds 	n->end = end;
15851da177e4SLinus Torvalds 	mpol_get(pol);
15861da177e4SLinus Torvalds 	n->policy = pol;
15871da177e4SLinus Torvalds 	return n;
15881da177e4SLinus Torvalds }
15891da177e4SLinus Torvalds 
15901da177e4SLinus Torvalds /* Replace a policy range. */
15911da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
15921da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
15931da177e4SLinus Torvalds {
15941da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
15951da177e4SLinus Torvalds 
15961da177e4SLinus Torvalds restart:
15971da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15981da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
15991da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
16001da177e4SLinus Torvalds 	while (n && n->start < end) {
16011da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
16021da177e4SLinus Torvalds 		if (n->start >= start) {
16031da177e4SLinus Torvalds 			if (n->end <= end)
16041da177e4SLinus Torvalds 				sp_delete(sp, n);
16051da177e4SLinus Torvalds 			else
16061da177e4SLinus Torvalds 				n->start = end;
16071da177e4SLinus Torvalds 		} else {
16081da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
16091da177e4SLinus Torvalds 			if (n->end > end) {
16101da177e4SLinus Torvalds 				if (!new2) {
16111da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
16121da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
16131da177e4SLinus Torvalds 					if (!new2)
16141da177e4SLinus Torvalds 						return -ENOMEM;
16151da177e4SLinus Torvalds 					goto restart;
16161da177e4SLinus Torvalds 				}
16171da177e4SLinus Torvalds 				n->end = start;
16181da177e4SLinus Torvalds 				sp_insert(sp, new2);
16191da177e4SLinus Torvalds 				new2 = NULL;
16201da177e4SLinus Torvalds 				break;
16211da177e4SLinus Torvalds 			} else
16221da177e4SLinus Torvalds 				n->end = start;
16231da177e4SLinus Torvalds 		}
16241da177e4SLinus Torvalds 		if (!next)
16251da177e4SLinus Torvalds 			break;
16261da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
16271da177e4SLinus Torvalds 	}
16281da177e4SLinus Torvalds 	if (new)
16291da177e4SLinus Torvalds 		sp_insert(sp, new);
16301da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
16311da177e4SLinus Torvalds 	if (new2) {
16321da177e4SLinus Torvalds 		mpol_free(new2->policy);
16331da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
16341da177e4SLinus Torvalds 	}
16351da177e4SLinus Torvalds 	return 0;
16361da177e4SLinus Torvalds }
16371da177e4SLinus Torvalds 
16387339ff83SRobin Holt void mpol_shared_policy_init(struct shared_policy *info, int policy,
16397339ff83SRobin Holt 				nodemask_t *policy_nodes)
16407339ff83SRobin Holt {
16417339ff83SRobin Holt 	info->root = RB_ROOT;
16427339ff83SRobin Holt 	spin_lock_init(&info->lock);
16437339ff83SRobin Holt 
16447339ff83SRobin Holt 	if (policy != MPOL_DEFAULT) {
16457339ff83SRobin Holt 		struct mempolicy *newpol;
16467339ff83SRobin Holt 
16477339ff83SRobin Holt 		/* Falls back to MPOL_DEFAULT on any error */
16487339ff83SRobin Holt 		newpol = mpol_new(policy, policy_nodes);
16497339ff83SRobin Holt 		if (!IS_ERR(newpol)) {
16507339ff83SRobin Holt 			/* Create pseudo-vma that contains just the policy */
16517339ff83SRobin Holt 			struct vm_area_struct pvma;
16527339ff83SRobin Holt 
16537339ff83SRobin Holt 			memset(&pvma, 0, sizeof(struct vm_area_struct));
16547339ff83SRobin Holt 			/* Policy covers entire file */
16557339ff83SRobin Holt 			pvma.vm_end = TASK_SIZE;
16567339ff83SRobin Holt 			mpol_set_shared_policy(info, &pvma, newpol);
16577339ff83SRobin Holt 			mpol_free(newpol);
16587339ff83SRobin Holt 		}
16597339ff83SRobin Holt 	}
16607339ff83SRobin Holt }
16617339ff83SRobin Holt 
16621da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
16631da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
16641da177e4SLinus Torvalds {
16651da177e4SLinus Torvalds 	int err;
16661da177e4SLinus Torvalds 	struct sp_node *new = NULL;
16671da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
16681da177e4SLinus Torvalds 
1669140d5a49SPaul Mundt 	pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
16701da177e4SLinus Torvalds 		 vma->vm_pgoff,
16711da177e4SLinus Torvalds 		 sz, npol? npol->policy : -1,
1672dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
16731da177e4SLinus Torvalds 
16741da177e4SLinus Torvalds 	if (npol) {
16751da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
16761da177e4SLinus Torvalds 		if (!new)
16771da177e4SLinus Torvalds 			return -ENOMEM;
16781da177e4SLinus Torvalds 	}
16791da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
16801da177e4SLinus Torvalds 	if (err && new)
16811da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
16821da177e4SLinus Torvalds 	return err;
16831da177e4SLinus Torvalds }
16841da177e4SLinus Torvalds 
16851da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
16861da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
16871da177e4SLinus Torvalds {
16881da177e4SLinus Torvalds 	struct sp_node *n;
16891da177e4SLinus Torvalds 	struct rb_node *next;
16901da177e4SLinus Torvalds 
16911da177e4SLinus Torvalds 	if (!p->root.rb_node)
16921da177e4SLinus Torvalds 		return;
16931da177e4SLinus Torvalds 	spin_lock(&p->lock);
16941da177e4SLinus Torvalds 	next = rb_first(&p->root);
16951da177e4SLinus Torvalds 	while (next) {
16961da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
16971da177e4SLinus Torvalds 		next = rb_next(&n->nd);
169890c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
16991da177e4SLinus Torvalds 		mpol_free(n->policy);
17001da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
17011da177e4SLinus Torvalds 	}
17021da177e4SLinus Torvalds 	spin_unlock(&p->lock);
17031da177e4SLinus Torvalds }
17041da177e4SLinus Torvalds 
17051da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
17061da177e4SLinus Torvalds void __init numa_policy_init(void)
17071da177e4SLinus Torvalds {
1708b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
1709b71636e2SPaul Mundt 	unsigned long largest = 0;
1710b71636e2SPaul Mundt 	int nid, prefer = 0;
1711b71636e2SPaul Mundt 
17121da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
17131da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
171420c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
17151da177e4SLinus Torvalds 
17161da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
17171da177e4SLinus Torvalds 				     sizeof(struct sp_node),
171820c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
17191da177e4SLinus Torvalds 
1720b71636e2SPaul Mundt 	/*
1721b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
1722b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
1723b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
1724b71636e2SPaul Mundt 	 */
1725b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
172656bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
1727b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
17281da177e4SLinus Torvalds 
1729b71636e2SPaul Mundt 		/* Preserve the largest node */
1730b71636e2SPaul Mundt 		if (largest < total_pages) {
1731b71636e2SPaul Mundt 			largest = total_pages;
1732b71636e2SPaul Mundt 			prefer = nid;
1733b71636e2SPaul Mundt 		}
1734b71636e2SPaul Mundt 
1735b71636e2SPaul Mundt 		/* Interleave this node? */
1736b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1737b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
1738b71636e2SPaul Mundt 	}
1739b71636e2SPaul Mundt 
1740b71636e2SPaul Mundt 	/* All too small, use the largest */
1741b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
1742b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
1743b71636e2SPaul Mundt 
1744b71636e2SPaul Mundt 	if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
17451da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
17461da177e4SLinus Torvalds }
17471da177e4SLinus Torvalds 
17488bccd85fSChristoph Lameter /* Reset policy of current process to default */
17491da177e4SLinus Torvalds void numa_default_policy(void)
17501da177e4SLinus Torvalds {
17518bccd85fSChristoph Lameter 	do_set_mempolicy(MPOL_DEFAULT, NULL);
17521da177e4SLinus Torvalds }
175368860ec1SPaul Jackson 
175468860ec1SPaul Jackson /* Migrate a policy to a different set of nodes */
1755dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
1756dbcb0f19SAdrian Bunk 			       const nodemask_t *newmask)
175768860ec1SPaul Jackson {
175874cb2155SPaul Jackson 	nodemask_t *mpolmask;
175968860ec1SPaul Jackson 	nodemask_t tmp;
176068860ec1SPaul Jackson 
176168860ec1SPaul Jackson 	if (!pol)
176268860ec1SPaul Jackson 		return;
176374cb2155SPaul Jackson 	mpolmask = &pol->cpuset_mems_allowed;
176474cb2155SPaul Jackson 	if (nodes_equal(*mpolmask, *newmask))
176574cb2155SPaul Jackson 		return;
176668860ec1SPaul Jackson 
176768860ec1SPaul Jackson 	switch (pol->policy) {
176868860ec1SPaul Jackson 	case MPOL_DEFAULT:
176968860ec1SPaul Jackson 		break;
1770*19770b32SMel Gorman 	case MPOL_BIND:
1771*19770b32SMel Gorman 		/* Fall through */
177268860ec1SPaul Jackson 	case MPOL_INTERLEAVE:
177374cb2155SPaul Jackson 		nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
177468860ec1SPaul Jackson 		pol->v.nodes = tmp;
177574cb2155SPaul Jackson 		*mpolmask = *newmask;
177674cb2155SPaul Jackson 		current->il_next = node_remap(current->il_next,
177774cb2155SPaul Jackson 						*mpolmask, *newmask);
177868860ec1SPaul Jackson 		break;
177968860ec1SPaul Jackson 	case MPOL_PREFERRED:
178068860ec1SPaul Jackson 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
178174cb2155SPaul Jackson 						*mpolmask, *newmask);
178274cb2155SPaul Jackson 		*mpolmask = *newmask;
178368860ec1SPaul Jackson 		break;
178468860ec1SPaul Jackson 	default:
178568860ec1SPaul Jackson 		BUG();
178668860ec1SPaul Jackson 		break;
178768860ec1SPaul Jackson 	}
178868860ec1SPaul Jackson }
178968860ec1SPaul Jackson 
179068860ec1SPaul Jackson /*
179174cb2155SPaul Jackson  * Wrapper for mpol_rebind_policy() that just requires task
179274cb2155SPaul Jackson  * pointer, and updates task mempolicy.
179368860ec1SPaul Jackson  */
179474cb2155SPaul Jackson 
179574cb2155SPaul Jackson void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
179668860ec1SPaul Jackson {
179774cb2155SPaul Jackson 	mpol_rebind_policy(tsk->mempolicy, new);
179868860ec1SPaul Jackson }
17991a75a6c8SChristoph Lameter 
18001a75a6c8SChristoph Lameter /*
18014225399aSPaul Jackson  * Rebind each vma in mm to new nodemask.
18024225399aSPaul Jackson  *
18034225399aSPaul Jackson  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
18044225399aSPaul Jackson  */
18054225399aSPaul Jackson 
18064225399aSPaul Jackson void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
18074225399aSPaul Jackson {
18084225399aSPaul Jackson 	struct vm_area_struct *vma;
18094225399aSPaul Jackson 
18104225399aSPaul Jackson 	down_write(&mm->mmap_sem);
18114225399aSPaul Jackson 	for (vma = mm->mmap; vma; vma = vma->vm_next)
18124225399aSPaul Jackson 		mpol_rebind_policy(vma->vm_policy, new);
18134225399aSPaul Jackson 	up_write(&mm->mmap_sem);
18144225399aSPaul Jackson }
18154225399aSPaul Jackson 
18164225399aSPaul Jackson /*
18171a75a6c8SChristoph Lameter  * Display pages allocated per node and memory policy via /proc.
18181a75a6c8SChristoph Lameter  */
18191a75a6c8SChristoph Lameter 
182015ad7cdcSHelge Deller static const char * const policy_types[] =
182115ad7cdcSHelge Deller 	{ "default", "prefer", "bind", "interleave" };
18221a75a6c8SChristoph Lameter 
18231a75a6c8SChristoph Lameter /*
18241a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
18251a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
18261a75a6c8SChristoph Lameter  * or an error (negative)
18271a75a6c8SChristoph Lameter  */
18281a75a6c8SChristoph Lameter static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
18291a75a6c8SChristoph Lameter {
18301a75a6c8SChristoph Lameter 	char *p = buffer;
18311a75a6c8SChristoph Lameter 	int l;
18321a75a6c8SChristoph Lameter 	nodemask_t nodes;
18331a75a6c8SChristoph Lameter 	int mode = pol ? pol->policy : MPOL_DEFAULT;
18341a75a6c8SChristoph Lameter 
18351a75a6c8SChristoph Lameter 	switch (mode) {
18361a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
18371a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18381a75a6c8SChristoph Lameter 		break;
18391a75a6c8SChristoph Lameter 
18401a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
18411a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18421a75a6c8SChristoph Lameter 		node_set(pol->v.preferred_node, nodes);
18431a75a6c8SChristoph Lameter 		break;
18441a75a6c8SChristoph Lameter 
18451a75a6c8SChristoph Lameter 	case MPOL_BIND:
1846*19770b32SMel Gorman 		/* Fall through */
18471a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
18481a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
18491a75a6c8SChristoph Lameter 		break;
18501a75a6c8SChristoph Lameter 
18511a75a6c8SChristoph Lameter 	default:
18521a75a6c8SChristoph Lameter 		BUG();
18531a75a6c8SChristoph Lameter 		return -EFAULT;
18541a75a6c8SChristoph Lameter 	}
18551a75a6c8SChristoph Lameter 
18561a75a6c8SChristoph Lameter 	l = strlen(policy_types[mode]);
18571a75a6c8SChristoph Lameter  	if (buffer + maxlen < p + l + 1)
18581a75a6c8SChristoph Lameter  		return -ENOSPC;
18591a75a6c8SChristoph Lameter 
18601a75a6c8SChristoph Lameter 	strcpy(p, policy_types[mode]);
18611a75a6c8SChristoph Lameter 	p += l;
18621a75a6c8SChristoph Lameter 
18631a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
18641a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
18651a75a6c8SChristoph Lameter 			return -ENOSPC;
18661a75a6c8SChristoph Lameter 		*p++ = '=';
18671a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
18681a75a6c8SChristoph Lameter 	}
18691a75a6c8SChristoph Lameter 	return p - buffer;
18701a75a6c8SChristoph Lameter }
18711a75a6c8SChristoph Lameter 
18721a75a6c8SChristoph Lameter struct numa_maps {
18731a75a6c8SChristoph Lameter 	unsigned long pages;
18741a75a6c8SChristoph Lameter 	unsigned long anon;
1875397874dfSChristoph Lameter 	unsigned long active;
1876397874dfSChristoph Lameter 	unsigned long writeback;
18771a75a6c8SChristoph Lameter 	unsigned long mapcount_max;
1878397874dfSChristoph Lameter 	unsigned long dirty;
1879397874dfSChristoph Lameter 	unsigned long swapcache;
18801a75a6c8SChristoph Lameter 	unsigned long node[MAX_NUMNODES];
18811a75a6c8SChristoph Lameter };
18821a75a6c8SChristoph Lameter 
1883397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty)
18841a75a6c8SChristoph Lameter {
18851a75a6c8SChristoph Lameter 	struct numa_maps *md = private;
18861a75a6c8SChristoph Lameter 	int count = page_mapcount(page);
18871a75a6c8SChristoph Lameter 
18881a75a6c8SChristoph Lameter 	md->pages++;
1889397874dfSChristoph Lameter 	if (pte_dirty || PageDirty(page))
1890397874dfSChristoph Lameter 		md->dirty++;
1891397874dfSChristoph Lameter 
1892397874dfSChristoph Lameter 	if (PageSwapCache(page))
1893397874dfSChristoph Lameter 		md->swapcache++;
1894397874dfSChristoph Lameter 
1895397874dfSChristoph Lameter 	if (PageActive(page))
1896397874dfSChristoph Lameter 		md->active++;
1897397874dfSChristoph Lameter 
1898397874dfSChristoph Lameter 	if (PageWriteback(page))
1899397874dfSChristoph Lameter 		md->writeback++;
19001a75a6c8SChristoph Lameter 
19011a75a6c8SChristoph Lameter 	if (PageAnon(page))
19021a75a6c8SChristoph Lameter 		md->anon++;
19031a75a6c8SChristoph Lameter 
1904397874dfSChristoph Lameter 	if (count > md->mapcount_max)
1905397874dfSChristoph Lameter 		md->mapcount_max = count;
1906397874dfSChristoph Lameter 
19071a75a6c8SChristoph Lameter 	md->node[page_to_nid(page)]++;
19081a75a6c8SChristoph Lameter }
19091a75a6c8SChristoph Lameter 
19107f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE
1911397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma,
1912397874dfSChristoph Lameter 		unsigned long start, unsigned long end,
1913397874dfSChristoph Lameter 		struct numa_maps *md)
1914397874dfSChristoph Lameter {
1915397874dfSChristoph Lameter 	unsigned long addr;
1916397874dfSChristoph Lameter 	struct page *page;
1917397874dfSChristoph Lameter 
1918397874dfSChristoph Lameter 	for (addr = start; addr < end; addr += HPAGE_SIZE) {
1919397874dfSChristoph Lameter 		pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1920397874dfSChristoph Lameter 		pte_t pte;
1921397874dfSChristoph Lameter 
1922397874dfSChristoph Lameter 		if (!ptep)
1923397874dfSChristoph Lameter 			continue;
1924397874dfSChristoph Lameter 
1925397874dfSChristoph Lameter 		pte = *ptep;
1926397874dfSChristoph Lameter 		if (pte_none(pte))
1927397874dfSChristoph Lameter 			continue;
1928397874dfSChristoph Lameter 
1929397874dfSChristoph Lameter 		page = pte_page(pte);
1930397874dfSChristoph Lameter 		if (!page)
1931397874dfSChristoph Lameter 			continue;
1932397874dfSChristoph Lameter 
1933397874dfSChristoph Lameter 		gather_stats(page, md, pte_dirty(*ptep));
1934397874dfSChristoph Lameter 	}
1935397874dfSChristoph Lameter }
19367f709ed0SAndrew Morton #else
19377f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma,
19387f709ed0SAndrew Morton 		unsigned long start, unsigned long end,
19397f709ed0SAndrew Morton 		struct numa_maps *md)
19407f709ed0SAndrew Morton {
19417f709ed0SAndrew Morton }
19427f709ed0SAndrew Morton #endif
1943397874dfSChristoph Lameter 
19441a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v)
19451a75a6c8SChristoph Lameter {
194699f89551SEric W. Biederman 	struct proc_maps_private *priv = m->private;
19471a75a6c8SChristoph Lameter 	struct vm_area_struct *vma = v;
19481a75a6c8SChristoph Lameter 	struct numa_maps *md;
1949397874dfSChristoph Lameter 	struct file *file = vma->vm_file;
1950397874dfSChristoph Lameter 	struct mm_struct *mm = vma->vm_mm;
1951480eccf9SLee Schermerhorn 	struct mempolicy *pol;
19521a75a6c8SChristoph Lameter 	int n;
19531a75a6c8SChristoph Lameter 	char buffer[50];
19541a75a6c8SChristoph Lameter 
1955397874dfSChristoph Lameter 	if (!mm)
19561a75a6c8SChristoph Lameter 		return 0;
19571a75a6c8SChristoph Lameter 
19581a75a6c8SChristoph Lameter 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
19591a75a6c8SChristoph Lameter 	if (!md)
19601a75a6c8SChristoph Lameter 		return 0;
19611a75a6c8SChristoph Lameter 
1962480eccf9SLee Schermerhorn 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
1963480eccf9SLee Schermerhorn 	mpol_to_str(buffer, sizeof(buffer), pol);
1964480eccf9SLee Schermerhorn 	/*
1965480eccf9SLee Schermerhorn 	 * unref shared or other task's mempolicy
1966480eccf9SLee Schermerhorn 	 */
1967480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy)
1968480eccf9SLee Schermerhorn 		__mpol_free(pol);
19691a75a6c8SChristoph Lameter 
1970397874dfSChristoph Lameter 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1971397874dfSChristoph Lameter 
1972397874dfSChristoph Lameter 	if (file) {
1973397874dfSChristoph Lameter 		seq_printf(m, " file=");
1974c32c2f63SJan Blunck 		seq_path(m, &file->f_path, "\n\t= ");
1975397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1976397874dfSChristoph Lameter 		seq_printf(m, " heap");
1977397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->start_stack &&
1978397874dfSChristoph Lameter 			vma->vm_end >= mm->start_stack) {
1979397874dfSChristoph Lameter 		seq_printf(m, " stack");
1980397874dfSChristoph Lameter 	}
1981397874dfSChristoph Lameter 
1982397874dfSChristoph Lameter 	if (is_vm_hugetlb_page(vma)) {
1983397874dfSChristoph Lameter 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1984397874dfSChristoph Lameter 		seq_printf(m, " huge");
1985397874dfSChristoph Lameter 	} else {
1986397874dfSChristoph Lameter 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
198756bbd65dSChristoph Lameter 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
1988397874dfSChristoph Lameter 	}
1989397874dfSChristoph Lameter 
1990397874dfSChristoph Lameter 	if (!md->pages)
1991397874dfSChristoph Lameter 		goto out;
19921a75a6c8SChristoph Lameter 
19931a75a6c8SChristoph Lameter 	if (md->anon)
19941a75a6c8SChristoph Lameter 		seq_printf(m," anon=%lu",md->anon);
19951a75a6c8SChristoph Lameter 
1996397874dfSChristoph Lameter 	if (md->dirty)
1997397874dfSChristoph Lameter 		seq_printf(m," dirty=%lu",md->dirty);
1998397874dfSChristoph Lameter 
1999397874dfSChristoph Lameter 	if (md->pages != md->anon && md->pages != md->dirty)
2000397874dfSChristoph Lameter 		seq_printf(m, " mapped=%lu", md->pages);
2001397874dfSChristoph Lameter 
2002397874dfSChristoph Lameter 	if (md->mapcount_max > 1)
2003397874dfSChristoph Lameter 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2004397874dfSChristoph Lameter 
2005397874dfSChristoph Lameter 	if (md->swapcache)
2006397874dfSChristoph Lameter 		seq_printf(m," swapcache=%lu", md->swapcache);
2007397874dfSChristoph Lameter 
2008397874dfSChristoph Lameter 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2009397874dfSChristoph Lameter 		seq_printf(m," active=%lu", md->active);
2010397874dfSChristoph Lameter 
2011397874dfSChristoph Lameter 	if (md->writeback)
2012397874dfSChristoph Lameter 		seq_printf(m," writeback=%lu", md->writeback);
2013397874dfSChristoph Lameter 
201456bbd65dSChristoph Lameter 	for_each_node_state(n, N_HIGH_MEMORY)
20151a75a6c8SChristoph Lameter 		if (md->node[n])
20161a75a6c8SChristoph Lameter 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2017397874dfSChristoph Lameter out:
20181a75a6c8SChristoph Lameter 	seq_putc(m, '\n');
20191a75a6c8SChristoph Lameter 	kfree(md);
20201a75a6c8SChristoph Lameter 
20211a75a6c8SChristoph Lameter 	if (m->count < m->size)
202299f89551SEric W. Biederman 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
20231a75a6c8SChristoph Lameter 	return 0;
20241a75a6c8SChristoph Lameter }
2025