xref: /openbmc/linux/mm/mempolicy.c (revision a3b51e0142d1be156ac697eaadadd6cfbb7ba32b)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds    could replace all the switch()es with a mempolicy_ops structure.
671da177e4SLinus Torvalds */
681da177e4SLinus Torvalds 
691da177e4SLinus Torvalds #include <linux/mempolicy.h>
701da177e4SLinus Torvalds #include <linux/mm.h>
711da177e4SLinus Torvalds #include <linux/highmem.h>
721da177e4SLinus Torvalds #include <linux/hugetlb.h>
731da177e4SLinus Torvalds #include <linux/kernel.h>
741da177e4SLinus Torvalds #include <linux/sched.h>
751da177e4SLinus Torvalds #include <linux/nodemask.h>
761da177e4SLinus Torvalds #include <linux/cpuset.h>
771da177e4SLinus Torvalds #include <linux/gfp.h>
781da177e4SLinus Torvalds #include <linux/slab.h>
791da177e4SLinus Torvalds #include <linux/string.h>
801da177e4SLinus Torvalds #include <linux/module.h>
81b488893aSPavel Emelyanov #include <linux/nsproxy.h>
821da177e4SLinus Torvalds #include <linux/interrupt.h>
831da177e4SLinus Torvalds #include <linux/init.h>
841da177e4SLinus Torvalds #include <linux/compat.h>
85dc9aa5b9SChristoph Lameter #include <linux/swap.h>
861a75a6c8SChristoph Lameter #include <linux/seq_file.h>
871a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
88b20a3503SChristoph Lameter #include <linux/migrate.h>
8995a402c3SChristoph Lameter #include <linux/rmap.h>
9086c3a764SDavid Quigley #include <linux/security.h>
91dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
92dc9aa5b9SChristoph Lameter 
931da177e4SLinus Torvalds #include <asm/tlbflush.h>
941da177e4SLinus Torvalds #include <asm/uaccess.h>
951da177e4SLinus Torvalds 
9638e35860SChristoph Lameter /* Internal flags */
97dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
9838e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
991a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
100dc9aa5b9SChristoph Lameter 
101fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
102fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1051da177e4SLinus Torvalds    policied. */
1066267276fSChristoph Lameter enum zone_type policy_zone = 0;
1071da177e4SLinus Torvalds 
108d42c6997SAndi Kleen struct mempolicy default_policy = {
1091da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1101da177e4SLinus Torvalds 	.policy = MPOL_DEFAULT,
1111da177e4SLinus Torvalds };
1121da177e4SLinus Torvalds 
113dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
114dbcb0f19SAdrian Bunk                                const nodemask_t *newmask);
115dbcb0f19SAdrian Bunk 
1161da177e4SLinus Torvalds /* Do sanity checking on a policy */
117*a3b51e01SDavid Rientjes static int mpol_check_policy(unsigned short mode, nodemask_t *nodes)
1181da177e4SLinus Torvalds {
11931f1de46SKOSAKI Motohiro 	int was_empty, is_empty;
12031f1de46SKOSAKI Motohiro 
12131f1de46SKOSAKI Motohiro 	if (!nodes)
12231f1de46SKOSAKI Motohiro 		return 0;
12331f1de46SKOSAKI Motohiro 
12431f1de46SKOSAKI Motohiro 	/*
12531f1de46SKOSAKI Motohiro 	 * "Contextualize" the in-coming nodemast for cpusets:
12631f1de46SKOSAKI Motohiro 	 * Remember whether in-coming nodemask was empty,  If not,
12731f1de46SKOSAKI Motohiro 	 * restrict the nodes to the allowed nodes in the cpuset.
12831f1de46SKOSAKI Motohiro 	 * This is guaranteed to be a subset of nodes with memory.
12931f1de46SKOSAKI Motohiro 	 */
13031f1de46SKOSAKI Motohiro 	cpuset_update_task_memory_state();
13131f1de46SKOSAKI Motohiro 	is_empty = was_empty = nodes_empty(*nodes);
13231f1de46SKOSAKI Motohiro 	if (!was_empty) {
13331f1de46SKOSAKI Motohiro 		nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
13431f1de46SKOSAKI Motohiro 		is_empty = nodes_empty(*nodes);	/* after "contextualization" */
13531f1de46SKOSAKI Motohiro 	}
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds 	switch (mode) {
1381da177e4SLinus Torvalds 	case MPOL_DEFAULT:
13931f1de46SKOSAKI Motohiro 		/*
14031f1de46SKOSAKI Motohiro 		 * require caller to specify an empty nodemask
14131f1de46SKOSAKI Motohiro 		 * before "contextualization"
14231f1de46SKOSAKI Motohiro 		 */
14331f1de46SKOSAKI Motohiro 		if (!was_empty)
1441da177e4SLinus Torvalds 			return -EINVAL;
1451da177e4SLinus Torvalds 		break;
1461da177e4SLinus Torvalds 	case MPOL_BIND:
1471da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
14831f1de46SKOSAKI Motohiro 		/*
14931f1de46SKOSAKI Motohiro 		 * require at least 1 valid node after "contextualization"
15031f1de46SKOSAKI Motohiro 		 */
15131f1de46SKOSAKI Motohiro 		if (is_empty)
15231f1de46SKOSAKI Motohiro 			return -EINVAL;
15331f1de46SKOSAKI Motohiro 		break;
15431f1de46SKOSAKI Motohiro 	case MPOL_PREFERRED:
15531f1de46SKOSAKI Motohiro 		/*
15631f1de46SKOSAKI Motohiro 		 * Did caller specify invalid nodes?
15731f1de46SKOSAKI Motohiro 		 * Don't silently accept this as "local allocation".
15831f1de46SKOSAKI Motohiro 		 */
15931f1de46SKOSAKI Motohiro 		if (!was_empty && is_empty)
1601da177e4SLinus Torvalds 			return -EINVAL;
1611da177e4SLinus Torvalds 		break;
162*a3b51e01SDavid Rientjes 	default:
163*a3b51e01SDavid Rientjes 		BUG();
1641da177e4SLinus Torvalds 	}
16531f1de46SKOSAKI Motohiro 	return 0;
1661da177e4SLinus Torvalds }
167dd942ae3SAndi Kleen 
16819770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
16919770b32SMel Gorman static int is_valid_nodemask(nodemask_t *nodemask)
1701da177e4SLinus Torvalds {
17119770b32SMel Gorman 	int nd, k;
1721da177e4SLinus Torvalds 
17319770b32SMel Gorman 	/* Check that there is something useful in this mask */
17419770b32SMel Gorman 	k = policy_zone;
17519770b32SMel Gorman 
17619770b32SMel Gorman 	for_each_node_mask(nd, *nodemask) {
17719770b32SMel Gorman 		struct zone *z;
17819770b32SMel Gorman 
17919770b32SMel Gorman 		for (k = 0; k <= policy_zone; k++) {
18019770b32SMel Gorman 			z = &NODE_DATA(nd)->node_zones[k];
181dd942ae3SAndi Kleen 			if (z->present_pages > 0)
18219770b32SMel Gorman 				return 1;
183dd942ae3SAndi Kleen 		}
184dd942ae3SAndi Kleen 	}
18519770b32SMel Gorman 
18619770b32SMel Gorman 	return 0;
1871da177e4SLinus Torvalds }
1881da177e4SLinus Torvalds 
1891da177e4SLinus Torvalds /* Create a new policy */
190*a3b51e01SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, nodemask_t *nodes)
1911da177e4SLinus Torvalds {
1921da177e4SLinus Torvalds 	struct mempolicy *policy;
1931da177e4SLinus Torvalds 
194140d5a49SPaul Mundt 	pr_debug("setting mode %d nodes[0] %lx\n",
195140d5a49SPaul Mundt 		 mode, nodes ? nodes_addr(*nodes)[0] : -1);
196140d5a49SPaul Mundt 
1971da177e4SLinus Torvalds 	if (mode == MPOL_DEFAULT)
1981da177e4SLinus Torvalds 		return NULL;
1991da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2001da177e4SLinus Torvalds 	if (!policy)
2011da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2021da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
2031da177e4SLinus Torvalds 	switch (mode) {
2041da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
205dfcd3c0dSAndi Kleen 		policy->v.nodes = *nodes;
2066eaf806aSChristoph Lameter 		if (nodes_weight(policy->v.nodes) == 0) {
2078f493d79SAndi Kleen 			kmem_cache_free(policy_cache, policy);
2088f493d79SAndi Kleen 			return ERR_PTR(-EINVAL);
2098f493d79SAndi Kleen 		}
2101da177e4SLinus Torvalds 		break;
2111da177e4SLinus Torvalds 	case MPOL_PREFERRED:
212dfcd3c0dSAndi Kleen 		policy->v.preferred_node = first_node(*nodes);
2131da177e4SLinus Torvalds 		if (policy->v.preferred_node >= MAX_NUMNODES)
2141da177e4SLinus Torvalds 			policy->v.preferred_node = -1;
2151da177e4SLinus Torvalds 		break;
2161da177e4SLinus Torvalds 	case MPOL_BIND:
21719770b32SMel Gorman 		if (!is_valid_nodemask(nodes)) {
2181da177e4SLinus Torvalds 			kmem_cache_free(policy_cache, policy);
21919770b32SMel Gorman 			return ERR_PTR(-EINVAL);
2201da177e4SLinus Torvalds 		}
22119770b32SMel Gorman 		policy->v.nodes = *nodes;
2221da177e4SLinus Torvalds 		break;
223*a3b51e01SDavid Rientjes 	default:
224*a3b51e01SDavid Rientjes 		BUG();
2251da177e4SLinus Torvalds 	}
2261da177e4SLinus Torvalds 	policy->policy = mode;
22774cb2155SPaul Jackson 	policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
2281da177e4SLinus Torvalds 	return policy;
2291da177e4SLinus Torvalds }
2301da177e4SLinus Torvalds 
231397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty);
232fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
233fc301289SChristoph Lameter 				unsigned long flags);
2341a75a6c8SChristoph Lameter 
23538e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
236b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
237dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
238dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
23938e35860SChristoph Lameter 		void *private)
2401da177e4SLinus Torvalds {
24191612e0dSHugh Dickins 	pte_t *orig_pte;
24291612e0dSHugh Dickins 	pte_t *pte;
243705e87c0SHugh Dickins 	spinlock_t *ptl;
244941150a3SHugh Dickins 
245705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
24691612e0dSHugh Dickins 	do {
2476aab341eSLinus Torvalds 		struct page *page;
24825ba77c1SAndy Whitcroft 		int nid;
24991612e0dSHugh Dickins 
25091612e0dSHugh Dickins 		if (!pte_present(*pte))
25191612e0dSHugh Dickins 			continue;
2526aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
2536aab341eSLinus Torvalds 		if (!page)
25491612e0dSHugh Dickins 			continue;
255053837fcSNick Piggin 		/*
256053837fcSNick Piggin 		 * The check for PageReserved here is important to avoid
257053837fcSNick Piggin 		 * handling zero pages and other pages that may have been
258053837fcSNick Piggin 		 * marked special by the system.
259053837fcSNick Piggin 		 *
260053837fcSNick Piggin 		 * If the PageReserved would not be checked here then f.e.
261053837fcSNick Piggin 		 * the location of the zero page could have an influence
262053837fcSNick Piggin 		 * on MPOL_MF_STRICT, zero pages would be counted for
263053837fcSNick Piggin 		 * the per node stats, and there would be useless attempts
264053837fcSNick Piggin 		 * to put zero pages on the migration list.
265053837fcSNick Piggin 		 */
266f4598c8bSChristoph Lameter 		if (PageReserved(page))
267f4598c8bSChristoph Lameter 			continue;
2686aab341eSLinus Torvalds 		nid = page_to_nid(page);
26938e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
27038e35860SChristoph Lameter 			continue;
27138e35860SChristoph Lameter 
2721a75a6c8SChristoph Lameter 		if (flags & MPOL_MF_STATS)
273397874dfSChristoph Lameter 			gather_stats(page, private, pte_dirty(*pte));
274053837fcSNick Piggin 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
275fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
276dc9aa5b9SChristoph Lameter 		else
2771da177e4SLinus Torvalds 			break;
27891612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
279705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
28091612e0dSHugh Dickins 	return addr != end;
28191612e0dSHugh Dickins }
28291612e0dSHugh Dickins 
283b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
284dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
285dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
28638e35860SChristoph Lameter 		void *private)
28791612e0dSHugh Dickins {
28891612e0dSHugh Dickins 	pmd_t *pmd;
28991612e0dSHugh Dickins 	unsigned long next;
29091612e0dSHugh Dickins 
29191612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
29291612e0dSHugh Dickins 	do {
29391612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
29491612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
29591612e0dSHugh Dickins 			continue;
296dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
29738e35860SChristoph Lameter 				    flags, private))
29891612e0dSHugh Dickins 			return -EIO;
29991612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
30091612e0dSHugh Dickins 	return 0;
30191612e0dSHugh Dickins }
30291612e0dSHugh Dickins 
303b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
304dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
305dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
30638e35860SChristoph Lameter 		void *private)
30791612e0dSHugh Dickins {
30891612e0dSHugh Dickins 	pud_t *pud;
30991612e0dSHugh Dickins 	unsigned long next;
31091612e0dSHugh Dickins 
31191612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
31291612e0dSHugh Dickins 	do {
31391612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
31491612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
31591612e0dSHugh Dickins 			continue;
316dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
31738e35860SChristoph Lameter 				    flags, private))
31891612e0dSHugh Dickins 			return -EIO;
31991612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
32091612e0dSHugh Dickins 	return 0;
32191612e0dSHugh Dickins }
32291612e0dSHugh Dickins 
323b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
324dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
325dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
32638e35860SChristoph Lameter 		void *private)
32791612e0dSHugh Dickins {
32891612e0dSHugh Dickins 	pgd_t *pgd;
32991612e0dSHugh Dickins 	unsigned long next;
33091612e0dSHugh Dickins 
331b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
33291612e0dSHugh Dickins 	do {
33391612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
33491612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
33591612e0dSHugh Dickins 			continue;
336dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
33738e35860SChristoph Lameter 				    flags, private))
33891612e0dSHugh Dickins 			return -EIO;
33991612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
34091612e0dSHugh Dickins 	return 0;
3411da177e4SLinus Torvalds }
3421da177e4SLinus Torvalds 
343dc9aa5b9SChristoph Lameter /*
344dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
345dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
346dc9aa5b9SChristoph Lameter  * put them on the pagelist.
347dc9aa5b9SChristoph Lameter  */
3481da177e4SLinus Torvalds static struct vm_area_struct *
3491da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
35038e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
3511da177e4SLinus Torvalds {
3521da177e4SLinus Torvalds 	int err;
3531da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
3541da177e4SLinus Torvalds 
35590036ee5SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
35690036ee5SChristoph Lameter 
357b20a3503SChristoph Lameter 		err = migrate_prep();
358b20a3503SChristoph Lameter 		if (err)
359b20a3503SChristoph Lameter 			return ERR_PTR(err);
36090036ee5SChristoph Lameter 	}
361053837fcSNick Piggin 
3621da177e4SLinus Torvalds 	first = find_vma(mm, start);
3631da177e4SLinus Torvalds 	if (!first)
3641da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
3651da177e4SLinus Torvalds 	prev = NULL;
3661da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
367dc9aa5b9SChristoph Lameter 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
3681da177e4SLinus Torvalds 			if (!vma->vm_next && vma->vm_end < end)
3691da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
3701da177e4SLinus Torvalds 			if (prev && prev->vm_end < vma->vm_start)
3711da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
372dc9aa5b9SChristoph Lameter 		}
373dc9aa5b9SChristoph Lameter 		if (!is_vm_hugetlb_page(vma) &&
374dc9aa5b9SChristoph Lameter 		    ((flags & MPOL_MF_STRICT) ||
375dc9aa5b9SChristoph Lameter 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
376dc9aa5b9SChristoph Lameter 				vma_migratable(vma)))) {
3775b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
378dc9aa5b9SChristoph Lameter 
3795b952b3cSAndi Kleen 			if (endvma > end)
3805b952b3cSAndi Kleen 				endvma = end;
3815b952b3cSAndi Kleen 			if (vma->vm_start > start)
3825b952b3cSAndi Kleen 				start = vma->vm_start;
383dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
38438e35860SChristoph Lameter 						flags, private);
3851da177e4SLinus Torvalds 			if (err) {
3861da177e4SLinus Torvalds 				first = ERR_PTR(err);
3871da177e4SLinus Torvalds 				break;
3881da177e4SLinus Torvalds 			}
3891da177e4SLinus Torvalds 		}
3901da177e4SLinus Torvalds 		prev = vma;
3911da177e4SLinus Torvalds 	}
3921da177e4SLinus Torvalds 	return first;
3931da177e4SLinus Torvalds }
3941da177e4SLinus Torvalds 
3951da177e4SLinus Torvalds /* Apply policy to a single VMA */
3961da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
3971da177e4SLinus Torvalds {
3981da177e4SLinus Torvalds 	int err = 0;
3991da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
4001da177e4SLinus Torvalds 
401140d5a49SPaul Mundt 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
4021da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
4031da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
4041da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
4051da177e4SLinus Torvalds 
4061da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
4071da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
4081da177e4SLinus Torvalds 	if (!err) {
4091da177e4SLinus Torvalds 		mpol_get(new);
4101da177e4SLinus Torvalds 		vma->vm_policy = new;
4111da177e4SLinus Torvalds 		mpol_free(old);
4121da177e4SLinus Torvalds 	}
4131da177e4SLinus Torvalds 	return err;
4141da177e4SLinus Torvalds }
4151da177e4SLinus Torvalds 
4161da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
4171da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start,
4181da177e4SLinus Torvalds 		       unsigned long end, struct mempolicy *new)
4191da177e4SLinus Torvalds {
4201da177e4SLinus Torvalds 	struct vm_area_struct *next;
4211da177e4SLinus Torvalds 	int err;
4221da177e4SLinus Torvalds 
4231da177e4SLinus Torvalds 	err = 0;
4241da177e4SLinus Torvalds 	for (; vma && vma->vm_start < end; vma = next) {
4251da177e4SLinus Torvalds 		next = vma->vm_next;
4261da177e4SLinus Torvalds 		if (vma->vm_start < start)
4271da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, start, 1);
4281da177e4SLinus Torvalds 		if (!err && vma->vm_end > end)
4291da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, end, 0);
4301da177e4SLinus Torvalds 		if (!err)
4311da177e4SLinus Torvalds 			err = policy_vma(vma, new);
4321da177e4SLinus Torvalds 		if (err)
4331da177e4SLinus Torvalds 			break;
4341da177e4SLinus Torvalds 	}
4351da177e4SLinus Torvalds 	return err;
4361da177e4SLinus Torvalds }
4371da177e4SLinus Torvalds 
438c61afb18SPaul Jackson /*
439c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
440c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
441c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
442c61afb18SPaul Jackson  *
443c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
444c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
445c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
446c61afb18SPaul Jackson  *
447c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
448c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
449c61afb18SPaul Jackson  *
450c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
451c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
452c61afb18SPaul Jackson  * for use within this file.
453c61afb18SPaul Jackson  */
454c61afb18SPaul Jackson 
455c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
456c61afb18SPaul Jackson {
457c61afb18SPaul Jackson 	if (p->mempolicy)
458c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
459c61afb18SPaul Jackson 	else
460c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
461c61afb18SPaul Jackson }
462c61afb18SPaul Jackson 
463c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
464c61afb18SPaul Jackson {
465c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
466c61afb18SPaul Jackson }
467c61afb18SPaul Jackson 
4681da177e4SLinus Torvalds /* Set the process memory policy */
469*a3b51e01SDavid Rientjes static long do_set_mempolicy(unsigned short mode, nodemask_t *nodes)
4701da177e4SLinus Torvalds {
4711da177e4SLinus Torvalds 	struct mempolicy *new;
4721da177e4SLinus Torvalds 
47331f1de46SKOSAKI Motohiro 	if (mpol_check_policy(mode, nodes))
4741da177e4SLinus Torvalds 		return -EINVAL;
4758bccd85fSChristoph Lameter 	new = mpol_new(mode, nodes);
4761da177e4SLinus Torvalds 	if (IS_ERR(new))
4771da177e4SLinus Torvalds 		return PTR_ERR(new);
4781da177e4SLinus Torvalds 	mpol_free(current->mempolicy);
4791da177e4SLinus Torvalds 	current->mempolicy = new;
480c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
4811da177e4SLinus Torvalds 	if (new && new->policy == MPOL_INTERLEAVE)
482dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
4831da177e4SLinus Torvalds 	return 0;
4841da177e4SLinus Torvalds }
4851da177e4SLinus Torvalds 
4861da177e4SLinus Torvalds /* Fill a zone bitmap for a policy */
487dfcd3c0dSAndi Kleen static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
4881da177e4SLinus Torvalds {
489dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
4901da177e4SLinus Torvalds 	switch (p->policy) {
4911da177e4SLinus Torvalds 	case MPOL_DEFAULT:
4921da177e4SLinus Torvalds 		break;
49319770b32SMel Gorman 	case MPOL_BIND:
49419770b32SMel Gorman 		/* Fall through */
4951da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
496dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
4971da177e4SLinus Torvalds 		break;
4981da177e4SLinus Torvalds 	case MPOL_PREFERRED:
49956bbd65dSChristoph Lameter 		/* or use current node instead of memory_map? */
5001da177e4SLinus Torvalds 		if (p->v.preferred_node < 0)
50156bbd65dSChristoph Lameter 			*nodes = node_states[N_HIGH_MEMORY];
5021da177e4SLinus Torvalds 		else
503dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
5041da177e4SLinus Torvalds 		break;
5051da177e4SLinus Torvalds 	default:
5061da177e4SLinus Torvalds 		BUG();
5071da177e4SLinus Torvalds 	}
5081da177e4SLinus Torvalds }
5091da177e4SLinus Torvalds 
5101da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
5111da177e4SLinus Torvalds {
5121da177e4SLinus Torvalds 	struct page *p;
5131da177e4SLinus Torvalds 	int err;
5141da177e4SLinus Torvalds 
5151da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
5161da177e4SLinus Torvalds 	if (err >= 0) {
5171da177e4SLinus Torvalds 		err = page_to_nid(p);
5181da177e4SLinus Torvalds 		put_page(p);
5191da177e4SLinus Torvalds 	}
5201da177e4SLinus Torvalds 	return err;
5211da177e4SLinus Torvalds }
5221da177e4SLinus Torvalds 
5231da177e4SLinus Torvalds /* Retrieve NUMA policy */
524dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
5251da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
5261da177e4SLinus Torvalds {
5278bccd85fSChristoph Lameter 	int err;
5281da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
5291da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
5301da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
5311da177e4SLinus Torvalds 
532cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
533754af6f5SLee Schermerhorn 	if (flags &
534754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
5351da177e4SLinus Torvalds 		return -EINVAL;
536754af6f5SLee Schermerhorn 
537754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
538754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
539754af6f5SLee Schermerhorn 			return -EINVAL;
540754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
541754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
542754af6f5SLee Schermerhorn 		return 0;
543754af6f5SLee Schermerhorn 	}
544754af6f5SLee Schermerhorn 
5451da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
5461da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
5471da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
5481da177e4SLinus Torvalds 		if (!vma) {
5491da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
5501da177e4SLinus Torvalds 			return -EFAULT;
5511da177e4SLinus Torvalds 		}
5521da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
5531da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
5541da177e4SLinus Torvalds 		else
5551da177e4SLinus Torvalds 			pol = vma->vm_policy;
5561da177e4SLinus Torvalds 	} else if (addr)
5571da177e4SLinus Torvalds 		return -EINVAL;
5581da177e4SLinus Torvalds 
5591da177e4SLinus Torvalds 	if (!pol)
5601da177e4SLinus Torvalds 		pol = &default_policy;
5611da177e4SLinus Torvalds 
5621da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
5631da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
5641da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
5651da177e4SLinus Torvalds 			if (err < 0)
5661da177e4SLinus Torvalds 				goto out;
5678bccd85fSChristoph Lameter 			*policy = err;
5681da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
5691da177e4SLinus Torvalds 				pol->policy == MPOL_INTERLEAVE) {
5708bccd85fSChristoph Lameter 			*policy = current->il_next;
5711da177e4SLinus Torvalds 		} else {
5721da177e4SLinus Torvalds 			err = -EINVAL;
5731da177e4SLinus Torvalds 			goto out;
5741da177e4SLinus Torvalds 		}
5751da177e4SLinus Torvalds 	} else
5768bccd85fSChristoph Lameter 		*policy = pol->policy;
5771da177e4SLinus Torvalds 
5781da177e4SLinus Torvalds 	if (vma) {
5791da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5801da177e4SLinus Torvalds 		vma = NULL;
5811da177e4SLinus Torvalds 	}
5821da177e4SLinus Torvalds 
5831da177e4SLinus Torvalds 	err = 0;
5848bccd85fSChristoph Lameter 	if (nmask)
5858bccd85fSChristoph Lameter 		get_zonemask(pol, nmask);
5861da177e4SLinus Torvalds 
5871da177e4SLinus Torvalds  out:
5881da177e4SLinus Torvalds 	if (vma)
5891da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5901da177e4SLinus Torvalds 	return err;
5911da177e4SLinus Torvalds }
5921da177e4SLinus Torvalds 
593b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
5948bccd85fSChristoph Lameter /*
5956ce3c4c0SChristoph Lameter  * page migration
5966ce3c4c0SChristoph Lameter  */
597fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
598fc301289SChristoph Lameter 				unsigned long flags)
5996ce3c4c0SChristoph Lameter {
6006ce3c4c0SChristoph Lameter 	/*
601fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
6026ce3c4c0SChristoph Lameter 	 */
603b20a3503SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
604b20a3503SChristoph Lameter 		isolate_lru_page(page, pagelist);
6056ce3c4c0SChristoph Lameter }
6066ce3c4c0SChristoph Lameter 
607742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
60895a402c3SChristoph Lameter {
609769848c0SMel Gorman 	return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
61095a402c3SChristoph Lameter }
61195a402c3SChristoph Lameter 
6126ce3c4c0SChristoph Lameter /*
6137e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
6147e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
6157e2ab150SChristoph Lameter  */
616dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
617dbcb0f19SAdrian Bunk 			   int flags)
6187e2ab150SChristoph Lameter {
6197e2ab150SChristoph Lameter 	nodemask_t nmask;
6207e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
6217e2ab150SChristoph Lameter 	int err = 0;
6227e2ab150SChristoph Lameter 
6237e2ab150SChristoph Lameter 	nodes_clear(nmask);
6247e2ab150SChristoph Lameter 	node_set(source, nmask);
6257e2ab150SChristoph Lameter 
6267e2ab150SChristoph Lameter 	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
6277e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
6287e2ab150SChristoph Lameter 
6297e2ab150SChristoph Lameter 	if (!list_empty(&pagelist))
63095a402c3SChristoph Lameter 		err = migrate_pages(&pagelist, new_node_page, dest);
63195a402c3SChristoph Lameter 
6327e2ab150SChristoph Lameter 	return err;
6337e2ab150SChristoph Lameter }
6347e2ab150SChristoph Lameter 
6357e2ab150SChristoph Lameter /*
6367e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
6377e2ab150SChristoph Lameter  * layout as much as possible.
63839743889SChristoph Lameter  *
63939743889SChristoph Lameter  * Returns the number of page that could not be moved.
64039743889SChristoph Lameter  */
64139743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
64239743889SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
64339743889SChristoph Lameter {
64439743889SChristoph Lameter 	LIST_HEAD(pagelist);
6457e2ab150SChristoph Lameter 	int busy = 0;
6467e2ab150SChristoph Lameter 	int err = 0;
6477e2ab150SChristoph Lameter 	nodemask_t tmp;
64839743889SChristoph Lameter 
64939743889SChristoph Lameter   	down_read(&mm->mmap_sem);
650d4984711SChristoph Lameter 
6517b2259b3SChristoph Lameter 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
6527b2259b3SChristoph Lameter 	if (err)
6537b2259b3SChristoph Lameter 		goto out;
6547b2259b3SChristoph Lameter 
6557e2ab150SChristoph Lameter /*
6567e2ab150SChristoph Lameter  * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
6577e2ab150SChristoph Lameter  * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
6587e2ab150SChristoph Lameter  * bit in 'tmp', and return that <source, dest> pair for migration.
6597e2ab150SChristoph Lameter  * The pair of nodemasks 'to' and 'from' define the map.
6607e2ab150SChristoph Lameter  *
6617e2ab150SChristoph Lameter  * If no pair of bits is found that way, fallback to picking some
6627e2ab150SChristoph Lameter  * pair of 'source' and 'dest' bits that are not the same.  If the
6637e2ab150SChristoph Lameter  * 'source' and 'dest' bits are the same, this represents a node
6647e2ab150SChristoph Lameter  * that will be migrating to itself, so no pages need move.
6657e2ab150SChristoph Lameter  *
6667e2ab150SChristoph Lameter  * If no bits are left in 'tmp', or if all remaining bits left
6677e2ab150SChristoph Lameter  * in 'tmp' correspond to the same bit in 'to', return false
6687e2ab150SChristoph Lameter  * (nothing left to migrate).
6697e2ab150SChristoph Lameter  *
6707e2ab150SChristoph Lameter  * This lets us pick a pair of nodes to migrate between, such that
6717e2ab150SChristoph Lameter  * if possible the dest node is not already occupied by some other
6727e2ab150SChristoph Lameter  * source node, minimizing the risk of overloading the memory on a
6737e2ab150SChristoph Lameter  * node that would happen if we migrated incoming memory to a node
6747e2ab150SChristoph Lameter  * before migrating outgoing memory source that same node.
6757e2ab150SChristoph Lameter  *
6767e2ab150SChristoph Lameter  * A single scan of tmp is sufficient.  As we go, we remember the
6777e2ab150SChristoph Lameter  * most recent <s, d> pair that moved (s != d).  If we find a pair
6787e2ab150SChristoph Lameter  * that not only moved, but what's better, moved to an empty slot
6797e2ab150SChristoph Lameter  * (d is not set in tmp), then we break out then, with that pair.
6807e2ab150SChristoph Lameter  * Otherwise when we finish scannng from_tmp, we at least have the
6817e2ab150SChristoph Lameter  * most recent <s, d> pair that moved.  If we get all the way through
6827e2ab150SChristoph Lameter  * the scan of tmp without finding any node that moved, much less
6837e2ab150SChristoph Lameter  * moved to an empty node, then there is nothing left worth migrating.
6847e2ab150SChristoph Lameter  */
6857e2ab150SChristoph Lameter 
6867e2ab150SChristoph Lameter 	tmp = *from_nodes;
6877e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
6887e2ab150SChristoph Lameter 		int s,d;
6897e2ab150SChristoph Lameter 		int source = -1;
6907e2ab150SChristoph Lameter 		int dest = 0;
6917e2ab150SChristoph Lameter 
6927e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
6937e2ab150SChristoph Lameter 			d = node_remap(s, *from_nodes, *to_nodes);
6947e2ab150SChristoph Lameter 			if (s == d)
6957e2ab150SChristoph Lameter 				continue;
6967e2ab150SChristoph Lameter 
6977e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
6987e2ab150SChristoph Lameter 			dest = d;
6997e2ab150SChristoph Lameter 
7007e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
7017e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
7027e2ab150SChristoph Lameter 				break;
7037e2ab150SChristoph Lameter 		}
7047e2ab150SChristoph Lameter 		if (source == -1)
7057e2ab150SChristoph Lameter 			break;
7067e2ab150SChristoph Lameter 
7077e2ab150SChristoph Lameter 		node_clear(source, tmp);
7087e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
7097e2ab150SChristoph Lameter 		if (err > 0)
7107e2ab150SChristoph Lameter 			busy += err;
7117e2ab150SChristoph Lameter 		if (err < 0)
7127e2ab150SChristoph Lameter 			break;
71339743889SChristoph Lameter 	}
7147b2259b3SChristoph Lameter out:
71539743889SChristoph Lameter 	up_read(&mm->mmap_sem);
7167e2ab150SChristoph Lameter 	if (err < 0)
7177e2ab150SChristoph Lameter 		return err;
7187e2ab150SChristoph Lameter 	return busy;
719b20a3503SChristoph Lameter 
72039743889SChristoph Lameter }
72139743889SChristoph Lameter 
7223ad33b24SLee Schermerhorn /*
7233ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
7243ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
7253ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
7263ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
7273ad33b24SLee Schermerhorn  * is in virtual address order.
7283ad33b24SLee Schermerhorn  */
729742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
73095a402c3SChristoph Lameter {
73195a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
7323ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
73395a402c3SChristoph Lameter 
7343ad33b24SLee Schermerhorn 	while (vma) {
7353ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
7363ad33b24SLee Schermerhorn 		if (address != -EFAULT)
7373ad33b24SLee Schermerhorn 			break;
7383ad33b24SLee Schermerhorn 		vma = vma->vm_next;
7393ad33b24SLee Schermerhorn 	}
7403ad33b24SLee Schermerhorn 
7413ad33b24SLee Schermerhorn 	/*
7423ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
7433ad33b24SLee Schermerhorn 	 */
7443ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
74595a402c3SChristoph Lameter }
746b20a3503SChristoph Lameter #else
747b20a3503SChristoph Lameter 
748b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
749b20a3503SChristoph Lameter 				unsigned long flags)
750b20a3503SChristoph Lameter {
751b20a3503SChristoph Lameter }
752b20a3503SChristoph Lameter 
753b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
754b20a3503SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
755b20a3503SChristoph Lameter {
756b20a3503SChristoph Lameter 	return -ENOSYS;
757b20a3503SChristoph Lameter }
75895a402c3SChristoph Lameter 
75969939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
76095a402c3SChristoph Lameter {
76195a402c3SChristoph Lameter 	return NULL;
76295a402c3SChristoph Lameter }
763b20a3503SChristoph Lameter #endif
764b20a3503SChristoph Lameter 
765dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
766*a3b51e01SDavid Rientjes 		     unsigned short mode, nodemask_t *nmask,
767dbcb0f19SAdrian Bunk 		     unsigned long flags)
7686ce3c4c0SChristoph Lameter {
7696ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
7706ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
7716ce3c4c0SChristoph Lameter 	struct mempolicy *new;
7726ce3c4c0SChristoph Lameter 	unsigned long end;
7736ce3c4c0SChristoph Lameter 	int err;
7746ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
7756ce3c4c0SChristoph Lameter 
776*a3b51e01SDavid Rientjes 	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
7776ce3c4c0SChristoph Lameter 				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
7786ce3c4c0SChristoph Lameter 		return -EINVAL;
77974c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
7806ce3c4c0SChristoph Lameter 		return -EPERM;
7816ce3c4c0SChristoph Lameter 
7826ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
7836ce3c4c0SChristoph Lameter 		return -EINVAL;
7846ce3c4c0SChristoph Lameter 
7856ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
7866ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
7876ce3c4c0SChristoph Lameter 
7886ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
7896ce3c4c0SChristoph Lameter 	end = start + len;
7906ce3c4c0SChristoph Lameter 
7916ce3c4c0SChristoph Lameter 	if (end < start)
7926ce3c4c0SChristoph Lameter 		return -EINVAL;
7936ce3c4c0SChristoph Lameter 	if (end == start)
7946ce3c4c0SChristoph Lameter 		return 0;
7956ce3c4c0SChristoph Lameter 
7966ce3c4c0SChristoph Lameter 	if (mpol_check_policy(mode, nmask))
7976ce3c4c0SChristoph Lameter 		return -EINVAL;
7986ce3c4c0SChristoph Lameter 
7996ce3c4c0SChristoph Lameter 	new = mpol_new(mode, nmask);
8006ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
8016ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
8026ce3c4c0SChristoph Lameter 
8036ce3c4c0SChristoph Lameter 	/*
8046ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
8056ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
8066ce3c4c0SChristoph Lameter 	 */
8076ce3c4c0SChristoph Lameter 	if (!new)
8086ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
8096ce3c4c0SChristoph Lameter 
810*a3b51e01SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d nodes:%lx\n", start, start + len,
811140d5a49SPaul Mundt 		 mode, nmask ? nodes_addr(*nmask)[0] : -1);
8126ce3c4c0SChristoph Lameter 
8136ce3c4c0SChristoph Lameter 	down_write(&mm->mmap_sem);
8146ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
8156ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
8166ce3c4c0SChristoph Lameter 
8176ce3c4c0SChristoph Lameter 	err = PTR_ERR(vma);
8186ce3c4c0SChristoph Lameter 	if (!IS_ERR(vma)) {
8196ce3c4c0SChristoph Lameter 		int nr_failed = 0;
8206ce3c4c0SChristoph Lameter 
8216ce3c4c0SChristoph Lameter 		err = mbind_range(vma, start, end, new);
8227e2ab150SChristoph Lameter 
8236ce3c4c0SChristoph Lameter 		if (!list_empty(&pagelist))
82495a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
82595a402c3SChristoph Lameter 						(unsigned long)vma);
8266ce3c4c0SChristoph Lameter 
8276ce3c4c0SChristoph Lameter 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
8286ce3c4c0SChristoph Lameter 			err = -EIO;
8296ce3c4c0SChristoph Lameter 	}
830b20a3503SChristoph Lameter 
8316ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
8326ce3c4c0SChristoph Lameter 	mpol_free(new);
8336ce3c4c0SChristoph Lameter 	return err;
8346ce3c4c0SChristoph Lameter }
8356ce3c4c0SChristoph Lameter 
83639743889SChristoph Lameter /*
8378bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
8388bccd85fSChristoph Lameter  */
8398bccd85fSChristoph Lameter 
8408bccd85fSChristoph Lameter /* Copy a node mask from user space. */
84139743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8428bccd85fSChristoph Lameter 		     unsigned long maxnode)
8438bccd85fSChristoph Lameter {
8448bccd85fSChristoph Lameter 	unsigned long k;
8458bccd85fSChristoph Lameter 	unsigned long nlongs;
8468bccd85fSChristoph Lameter 	unsigned long endmask;
8478bccd85fSChristoph Lameter 
8488bccd85fSChristoph Lameter 	--maxnode;
8498bccd85fSChristoph Lameter 	nodes_clear(*nodes);
8508bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
8518bccd85fSChristoph Lameter 		return 0;
852a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
853636f13c1SChris Wright 		return -EINVAL;
8548bccd85fSChristoph Lameter 
8558bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
8568bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
8578bccd85fSChristoph Lameter 		endmask = ~0UL;
8588bccd85fSChristoph Lameter 	else
8598bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
8608bccd85fSChristoph Lameter 
8618bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
8628bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
8638bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8648bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
8658bccd85fSChristoph Lameter 			return -EINVAL;
8668bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8678bccd85fSChristoph Lameter 			unsigned long t;
8688bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
8698bccd85fSChristoph Lameter 				return -EFAULT;
8708bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
8718bccd85fSChristoph Lameter 				if (t & endmask)
8728bccd85fSChristoph Lameter 					return -EINVAL;
8738bccd85fSChristoph Lameter 			} else if (t)
8748bccd85fSChristoph Lameter 				return -EINVAL;
8758bccd85fSChristoph Lameter 		}
8768bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
8778bccd85fSChristoph Lameter 		endmask = ~0UL;
8788bccd85fSChristoph Lameter 	}
8798bccd85fSChristoph Lameter 
8808bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
8818bccd85fSChristoph Lameter 		return -EFAULT;
8828bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
8838bccd85fSChristoph Lameter 	return 0;
8848bccd85fSChristoph Lameter }
8858bccd85fSChristoph Lameter 
8868bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
8878bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
8888bccd85fSChristoph Lameter 			      nodemask_t *nodes)
8898bccd85fSChristoph Lameter {
8908bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
8918bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
8928bccd85fSChristoph Lameter 
8938bccd85fSChristoph Lameter 	if (copy > nbytes) {
8948bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
8958bccd85fSChristoph Lameter 			return -EINVAL;
8968bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
8978bccd85fSChristoph Lameter 			return -EFAULT;
8988bccd85fSChristoph Lameter 		copy = nbytes;
8998bccd85fSChristoph Lameter 	}
9008bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
9018bccd85fSChristoph Lameter }
9028bccd85fSChristoph Lameter 
9038bccd85fSChristoph Lameter asmlinkage long sys_mbind(unsigned long start, unsigned long len,
9048bccd85fSChristoph Lameter 			unsigned long mode,
9058bccd85fSChristoph Lameter 			unsigned long __user *nmask, unsigned long maxnode,
9068bccd85fSChristoph Lameter 			unsigned flags)
9078bccd85fSChristoph Lameter {
9088bccd85fSChristoph Lameter 	nodemask_t nodes;
9098bccd85fSChristoph Lameter 	int err;
9108bccd85fSChristoph Lameter 
911*a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
912*a3b51e01SDavid Rientjes 		return -EINVAL;
9138bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
9148bccd85fSChristoph Lameter 	if (err)
9158bccd85fSChristoph Lameter 		return err;
9168bccd85fSChristoph Lameter 	return do_mbind(start, len, mode, &nodes, flags);
9178bccd85fSChristoph Lameter }
9188bccd85fSChristoph Lameter 
9198bccd85fSChristoph Lameter /* Set the process memory policy */
9208bccd85fSChristoph Lameter asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
9218bccd85fSChristoph Lameter 		unsigned long maxnode)
9228bccd85fSChristoph Lameter {
9238bccd85fSChristoph Lameter 	int err;
9248bccd85fSChristoph Lameter 	nodemask_t nodes;
9258bccd85fSChristoph Lameter 
926*a3b51e01SDavid Rientjes 	if (mode < 0 || mode >= MPOL_MAX)
9278bccd85fSChristoph Lameter 		return -EINVAL;
9288bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
9298bccd85fSChristoph Lameter 	if (err)
9308bccd85fSChristoph Lameter 		return err;
9318bccd85fSChristoph Lameter 	return do_set_mempolicy(mode, &nodes);
9328bccd85fSChristoph Lameter }
9338bccd85fSChristoph Lameter 
93439743889SChristoph Lameter asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
93539743889SChristoph Lameter 		const unsigned long __user *old_nodes,
93639743889SChristoph Lameter 		const unsigned long __user *new_nodes)
93739743889SChristoph Lameter {
93839743889SChristoph Lameter 	struct mm_struct *mm;
93939743889SChristoph Lameter 	struct task_struct *task;
94039743889SChristoph Lameter 	nodemask_t old;
94139743889SChristoph Lameter 	nodemask_t new;
94239743889SChristoph Lameter 	nodemask_t task_nodes;
94339743889SChristoph Lameter 	int err;
94439743889SChristoph Lameter 
94539743889SChristoph Lameter 	err = get_nodes(&old, old_nodes, maxnode);
94639743889SChristoph Lameter 	if (err)
94739743889SChristoph Lameter 		return err;
94839743889SChristoph Lameter 
94939743889SChristoph Lameter 	err = get_nodes(&new, new_nodes, maxnode);
95039743889SChristoph Lameter 	if (err)
95139743889SChristoph Lameter 		return err;
95239743889SChristoph Lameter 
95339743889SChristoph Lameter 	/* Find the mm_struct */
95439743889SChristoph Lameter 	read_lock(&tasklist_lock);
955228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
95639743889SChristoph Lameter 	if (!task) {
95739743889SChristoph Lameter 		read_unlock(&tasklist_lock);
95839743889SChristoph Lameter 		return -ESRCH;
95939743889SChristoph Lameter 	}
96039743889SChristoph Lameter 	mm = get_task_mm(task);
96139743889SChristoph Lameter 	read_unlock(&tasklist_lock);
96239743889SChristoph Lameter 
96339743889SChristoph Lameter 	if (!mm)
96439743889SChristoph Lameter 		return -EINVAL;
96539743889SChristoph Lameter 
96639743889SChristoph Lameter 	/*
96739743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
96839743889SChristoph Lameter 	 * process. The right exists if the process has administrative
9697f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
97039743889SChristoph Lameter 	 * userid as the target process.
97139743889SChristoph Lameter 	 */
97239743889SChristoph Lameter 	if ((current->euid != task->suid) && (current->euid != task->uid) &&
97339743889SChristoph Lameter 	    (current->uid != task->suid) && (current->uid != task->uid) &&
97474c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
97539743889SChristoph Lameter 		err = -EPERM;
97639743889SChristoph Lameter 		goto out;
97739743889SChristoph Lameter 	}
97839743889SChristoph Lameter 
97939743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
98039743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
98174c00241SChristoph Lameter 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
98239743889SChristoph Lameter 		err = -EPERM;
98339743889SChristoph Lameter 		goto out;
98439743889SChristoph Lameter 	}
98539743889SChristoph Lameter 
98637b07e41SLee Schermerhorn 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
9873b42d28bSChristoph Lameter 		err = -EINVAL;
9883b42d28bSChristoph Lameter 		goto out;
9893b42d28bSChristoph Lameter 	}
9903b42d28bSChristoph Lameter 
99186c3a764SDavid Quigley 	err = security_task_movememory(task);
99286c3a764SDavid Quigley 	if (err)
99386c3a764SDavid Quigley 		goto out;
99486c3a764SDavid Quigley 
995511030bcSChristoph Lameter 	err = do_migrate_pages(mm, &old, &new,
99674c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
99739743889SChristoph Lameter out:
99839743889SChristoph Lameter 	mmput(mm);
99939743889SChristoph Lameter 	return err;
100039743889SChristoph Lameter }
100139743889SChristoph Lameter 
100239743889SChristoph Lameter 
10038bccd85fSChristoph Lameter /* Retrieve NUMA policy */
10048bccd85fSChristoph Lameter asmlinkage long sys_get_mempolicy(int __user *policy,
10058bccd85fSChristoph Lameter 				unsigned long __user *nmask,
10068bccd85fSChristoph Lameter 				unsigned long maxnode,
10078bccd85fSChristoph Lameter 				unsigned long addr, unsigned long flags)
10088bccd85fSChristoph Lameter {
1009dbcb0f19SAdrian Bunk 	int err;
1010dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
10118bccd85fSChristoph Lameter 	nodemask_t nodes;
10128bccd85fSChristoph Lameter 
10138bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
10148bccd85fSChristoph Lameter 		return -EINVAL;
10158bccd85fSChristoph Lameter 
10168bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
10178bccd85fSChristoph Lameter 
10188bccd85fSChristoph Lameter 	if (err)
10198bccd85fSChristoph Lameter 		return err;
10208bccd85fSChristoph Lameter 
10218bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
10228bccd85fSChristoph Lameter 		return -EFAULT;
10238bccd85fSChristoph Lameter 
10248bccd85fSChristoph Lameter 	if (nmask)
10258bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
10268bccd85fSChristoph Lameter 
10278bccd85fSChristoph Lameter 	return err;
10288bccd85fSChristoph Lameter }
10298bccd85fSChristoph Lameter 
10301da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
10311da177e4SLinus Torvalds 
10321da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
10331da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
10341da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
10351da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
10361da177e4SLinus Torvalds {
10371da177e4SLinus Torvalds 	long err;
10381da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10391da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10401da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10411da177e4SLinus Torvalds 
10421da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10431da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10441da177e4SLinus Torvalds 
10451da177e4SLinus Torvalds 	if (nmask)
10461da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10471da177e4SLinus Torvalds 
10481da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
10491da177e4SLinus Torvalds 
10501da177e4SLinus Torvalds 	if (!err && nmask) {
10511da177e4SLinus Torvalds 		err = copy_from_user(bm, nm, alloc_size);
10521da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
10531da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
10541da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
10551da177e4SLinus Torvalds 	}
10561da177e4SLinus Torvalds 
10571da177e4SLinus Torvalds 	return err;
10581da177e4SLinus Torvalds }
10591da177e4SLinus Torvalds 
10601da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
10611da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
10621da177e4SLinus Torvalds {
10631da177e4SLinus Torvalds 	long err = 0;
10641da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10651da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10661da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10671da177e4SLinus Torvalds 
10681da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10691da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10701da177e4SLinus Torvalds 
10711da177e4SLinus Torvalds 	if (nmask) {
10721da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
10731da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10741da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
10751da177e4SLinus Torvalds 	}
10761da177e4SLinus Torvalds 
10771da177e4SLinus Torvalds 	if (err)
10781da177e4SLinus Torvalds 		return -EFAULT;
10791da177e4SLinus Torvalds 
10801da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
10811da177e4SLinus Torvalds }
10821da177e4SLinus Torvalds 
10831da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
10841da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
10851da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
10861da177e4SLinus Torvalds {
10871da177e4SLinus Torvalds 	long err = 0;
10881da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10891da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1090dfcd3c0dSAndi Kleen 	nodemask_t bm;
10911da177e4SLinus Torvalds 
10921da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10931da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10941da177e4SLinus Torvalds 
10951da177e4SLinus Torvalds 	if (nmask) {
1096dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
10971da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1098dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
10991da177e4SLinus Torvalds 	}
11001da177e4SLinus Torvalds 
11011da177e4SLinus Torvalds 	if (err)
11021da177e4SLinus Torvalds 		return -EFAULT;
11031da177e4SLinus Torvalds 
11041da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
11051da177e4SLinus Torvalds }
11061da177e4SLinus Torvalds 
11071da177e4SLinus Torvalds #endif
11081da177e4SLinus Torvalds 
1109480eccf9SLee Schermerhorn /*
1110480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1111480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1112480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1113480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1114480eccf9SLee Schermerhorn  *
1115480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1116480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
1117480eccf9SLee Schermerhorn  * Returned policy has extra reference count if shared, vma,
1118480eccf9SLee Schermerhorn  * or some other task's policy [show_numa_maps() can pass
1119480eccf9SLee Schermerhorn  * @task != current].  It is the caller's responsibility to
1120480eccf9SLee Schermerhorn  * free the reference in these cases.
1121480eccf9SLee Schermerhorn  */
112248fce342SChristoph Lameter static struct mempolicy * get_vma_policy(struct task_struct *task,
112348fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
11241da177e4SLinus Torvalds {
11256e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
1126480eccf9SLee Schermerhorn 	int shared_pol = 0;
11271da177e4SLinus Torvalds 
11281da177e4SLinus Torvalds 	if (vma) {
1129480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
11301da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
1131480eccf9SLee Schermerhorn 			shared_pol = 1;	/* if pol non-NULL, add ref below */
1132480eccf9SLee Schermerhorn 		} else if (vma->vm_policy &&
11331da177e4SLinus Torvalds 				vma->vm_policy->policy != MPOL_DEFAULT)
11341da177e4SLinus Torvalds 			pol = vma->vm_policy;
11351da177e4SLinus Torvalds 	}
11361da177e4SLinus Torvalds 	if (!pol)
11371da177e4SLinus Torvalds 		pol = &default_policy;
1138480eccf9SLee Schermerhorn 	else if (!shared_pol && pol != current->mempolicy)
1139480eccf9SLee Schermerhorn 		mpol_get(pol);	/* vma or other task's policy */
11401da177e4SLinus Torvalds 	return pol;
11411da177e4SLinus Torvalds }
11421da177e4SLinus Torvalds 
114319770b32SMel Gorman /* Return a nodemask representing a mempolicy */
114419770b32SMel Gorman static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
114519770b32SMel Gorman {
114619770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
114719770b32SMel Gorman 	if (unlikely(policy->policy == MPOL_BIND) &&
114819770b32SMel Gorman 			gfp_zone(gfp) >= policy_zone &&
114919770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
115019770b32SMel Gorman 		return &policy->v.nodes;
115119770b32SMel Gorman 
115219770b32SMel Gorman 	return NULL;
115319770b32SMel Gorman }
115419770b32SMel Gorman 
11551da177e4SLinus Torvalds /* Return a zonelist representing a mempolicy */
1156dd0fc66fSAl Viro static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
11571da177e4SLinus Torvalds {
11581da177e4SLinus Torvalds 	int nd;
11591da177e4SLinus Torvalds 
11601da177e4SLinus Torvalds 	switch (policy->policy) {
11611da177e4SLinus Torvalds 	case MPOL_PREFERRED:
11621da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
11631da177e4SLinus Torvalds 		if (nd < 0)
11641da177e4SLinus Torvalds 			nd = numa_node_id();
11651da177e4SLinus Torvalds 		break;
11661da177e4SLinus Torvalds 	case MPOL_BIND:
116719770b32SMel Gorman 		/*
116819770b32SMel Gorman 		 * Normally, MPOL_BIND allocations node-local are node-local
116919770b32SMel Gorman 		 * within the allowed nodemask. However, if __GFP_THISNODE is
117019770b32SMel Gorman 		 * set and the current node is part of the mask, we use the
117119770b32SMel Gorman 		 * the zonelist for the first node in the mask instead.
117219770b32SMel Gorman 		 */
117319770b32SMel Gorman 		nd = numa_node_id();
117419770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
117519770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
117619770b32SMel Gorman 			nd = first_node(policy->v.nodes);
117719770b32SMel Gorman 		break;
11781da177e4SLinus Torvalds 	case MPOL_INTERLEAVE: /* should not happen */
11791da177e4SLinus Torvalds 	case MPOL_DEFAULT:
11801da177e4SLinus Torvalds 		nd = numa_node_id();
11811da177e4SLinus Torvalds 		break;
11821da177e4SLinus Torvalds 	default:
11831da177e4SLinus Torvalds 		nd = 0;
11841da177e4SLinus Torvalds 		BUG();
11851da177e4SLinus Torvalds 	}
11860e88460dSMel Gorman 	return node_zonelist(nd, gfp);
11871da177e4SLinus Torvalds }
11881da177e4SLinus Torvalds 
11891da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
11901da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
11911da177e4SLinus Torvalds {
11921da177e4SLinus Torvalds 	unsigned nid, next;
11931da177e4SLinus Torvalds 	struct task_struct *me = current;
11941da177e4SLinus Torvalds 
11951da177e4SLinus Torvalds 	nid = me->il_next;
1196dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
11971da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1198dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
11991da177e4SLinus Torvalds 	me->il_next = next;
12001da177e4SLinus Torvalds 	return nid;
12011da177e4SLinus Torvalds }
12021da177e4SLinus Torvalds 
1203dc85da15SChristoph Lameter /*
1204dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1205dc85da15SChristoph Lameter  * next slab entry.
1206dc85da15SChristoph Lameter  */
1207dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy)
1208dc85da15SChristoph Lameter {
1209*a3b51e01SDavid Rientjes 	unsigned short pol = policy ? policy->policy : MPOL_DEFAULT;
1210765c4507SChristoph Lameter 
1211765c4507SChristoph Lameter 	switch (pol) {
1212dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1213dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1214dc85da15SChristoph Lameter 
1215dd1a239fSMel Gorman 	case MPOL_BIND: {
1216dc85da15SChristoph Lameter 		/*
1217dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1218dc85da15SChristoph Lameter 		 * first node.
1219dc85da15SChristoph Lameter 		 */
122019770b32SMel Gorman 		struct zonelist *zonelist;
122119770b32SMel Gorman 		struct zone *zone;
122219770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
122319770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
122419770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
122519770b32SMel Gorman 							&policy->v.nodes,
122619770b32SMel Gorman 							&zone);
122719770b32SMel Gorman 		return zone->node;
1228dd1a239fSMel Gorman 	}
1229dc85da15SChristoph Lameter 
1230dc85da15SChristoph Lameter 	case MPOL_PREFERRED:
1231dc85da15SChristoph Lameter 		if (policy->v.preferred_node >= 0)
1232dc85da15SChristoph Lameter 			return policy->v.preferred_node;
1233dc85da15SChristoph Lameter 		/* Fall through */
1234dc85da15SChristoph Lameter 
1235dc85da15SChristoph Lameter 	default:
1236dc85da15SChristoph Lameter 		return numa_node_id();
1237dc85da15SChristoph Lameter 	}
1238dc85da15SChristoph Lameter }
1239dc85da15SChristoph Lameter 
12401da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
12411da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
12421da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
12431da177e4SLinus Torvalds {
1244dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
12451da177e4SLinus Torvalds 	unsigned target = (unsigned)off % nnodes;
12461da177e4SLinus Torvalds 	int c;
12471da177e4SLinus Torvalds 	int nid = -1;
12481da177e4SLinus Torvalds 
12491da177e4SLinus Torvalds 	c = 0;
12501da177e4SLinus Torvalds 	do {
1251dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
12521da177e4SLinus Torvalds 		c++;
12531da177e4SLinus Torvalds 	} while (c <= target);
12541da177e4SLinus Torvalds 	return nid;
12551da177e4SLinus Torvalds }
12561da177e4SLinus Torvalds 
12575da7ca86SChristoph Lameter /* Determine a node number for interleave */
12585da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
12595da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
12605da7ca86SChristoph Lameter {
12615da7ca86SChristoph Lameter 	if (vma) {
12625da7ca86SChristoph Lameter 		unsigned long off;
12635da7ca86SChristoph Lameter 
12643b98b087SNishanth Aravamudan 		/*
12653b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
12663b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
12673b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
12683b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
12693b98b087SNishanth Aravamudan 		 * a useful offset.
12703b98b087SNishanth Aravamudan 		 */
12713b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
12723b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
12735da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
12745da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
12755da7ca86SChristoph Lameter 	} else
12765da7ca86SChristoph Lameter 		return interleave_nodes(pol);
12775da7ca86SChristoph Lameter }
12785da7ca86SChristoph Lameter 
127900ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1280480eccf9SLee Schermerhorn /*
1281480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1282480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1283480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1284480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
128519770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
128619770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1287480eccf9SLee Schermerhorn  *
1288480eccf9SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation.
128919770b32SMel Gorman  * If the effective policy is 'BIND, returns pointer to local node's zonelist,
129019770b32SMel Gorman  * and a pointer to the mempolicy's @nodemask for filtering the zonelist.
1291480eccf9SLee Schermerhorn  * If it is also a policy for which get_vma_policy() returns an extra
129219770b32SMel Gorman  * reference, we must hold that reference until after the allocation.
1293480eccf9SLee Schermerhorn  * In that case, return policy via @mpol so hugetlb allocation can drop
1294480eccf9SLee Schermerhorn  * the reference. For non-'BIND referenced policies, we can/do drop the
1295480eccf9SLee Schermerhorn  * reference here, so the caller doesn't need to know about the special case
1296480eccf9SLee Schermerhorn  * for default and current task policy.
1297480eccf9SLee Schermerhorn  */
1298396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
129919770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
130019770b32SMel Gorman 				nodemask_t **nodemask)
13015da7ca86SChristoph Lameter {
13025da7ca86SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1303480eccf9SLee Schermerhorn 	struct zonelist *zl;
13045da7ca86SChristoph Lameter 
1305480eccf9SLee Schermerhorn 	*mpol = NULL;		/* probably no unref needed */
130619770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
130719770b32SMel Gorman 	if (pol->policy == MPOL_BIND) {
130819770b32SMel Gorman 			*nodemask = &pol->v.nodes;
130919770b32SMel Gorman 	} else if (pol->policy == MPOL_INTERLEAVE) {
13105da7ca86SChristoph Lameter 		unsigned nid;
13115da7ca86SChristoph Lameter 
13125da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
131369682d85SLee Schermerhorn 		if (unlikely(pol != &default_policy &&
131469682d85SLee Schermerhorn 				pol != current->mempolicy))
1315480eccf9SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
13160e88460dSMel Gorman 		return node_zonelist(nid, gfp_flags);
13175da7ca86SChristoph Lameter 	}
1318480eccf9SLee Schermerhorn 
1319480eccf9SLee Schermerhorn 	zl = zonelist_policy(GFP_HIGHUSER, pol);
1320480eccf9SLee Schermerhorn 	if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1321480eccf9SLee Schermerhorn 		if (pol->policy != MPOL_BIND)
1322480eccf9SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
1323480eccf9SLee Schermerhorn 		else
1324480eccf9SLee Schermerhorn 			*mpol = pol;	/* unref needed after allocation */
1325480eccf9SLee Schermerhorn 	}
1326480eccf9SLee Schermerhorn 	return zl;
13275da7ca86SChristoph Lameter }
132800ac59adSChen, Kenneth W #endif
13295da7ca86SChristoph Lameter 
13301da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
13311da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1332662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1333662f3a0bSAndi Kleen 					unsigned nid)
13341da177e4SLinus Torvalds {
13351da177e4SLinus Torvalds 	struct zonelist *zl;
13361da177e4SLinus Torvalds 	struct page *page;
13371da177e4SLinus Torvalds 
13380e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
13391da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1340dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1341ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
13421da177e4SLinus Torvalds 	return page;
13431da177e4SLinus Torvalds }
13441da177e4SLinus Torvalds 
13451da177e4SLinus Torvalds /**
13461da177e4SLinus Torvalds  * 	alloc_page_vma	- Allocate a page for a VMA.
13471da177e4SLinus Torvalds  *
13481da177e4SLinus Torvalds  * 	@gfp:
13491da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
13501da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
13511da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
13521da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
13531da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
13541da177e4SLinus Torvalds  *
13551da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
13561da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
13571da177e4SLinus Torvalds  *
13581da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
13591da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
13601da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
13611da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
13621da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
13631da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
13641da177e4SLinus Torvalds  *
13651da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
13661da177e4SLinus Torvalds  */
13671da177e4SLinus Torvalds struct page *
1368dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
13691da177e4SLinus Torvalds {
13706e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1371480eccf9SLee Schermerhorn 	struct zonelist *zl;
13721da177e4SLinus Torvalds 
1373cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
13741da177e4SLinus Torvalds 
13751da177e4SLinus Torvalds 	if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
13761da177e4SLinus Torvalds 		unsigned nid;
13775da7ca86SChristoph Lameter 
13785da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
137969682d85SLee Schermerhorn 		if (unlikely(pol != &default_policy &&
138069682d85SLee Schermerhorn 				pol != current->mempolicy))
138169682d85SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
13821da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, 0, nid);
13831da177e4SLinus Torvalds 	}
1384480eccf9SLee Schermerhorn 	zl = zonelist_policy(gfp, pol);
1385480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy) {
1386480eccf9SLee Schermerhorn 		/*
1387480eccf9SLee Schermerhorn 		 * slow path: ref counted policy -- shared or vma
1388480eccf9SLee Schermerhorn 		 */
138919770b32SMel Gorman 		struct page *page =  __alloc_pages_nodemask(gfp, 0,
139019770b32SMel Gorman 						zl, nodemask_policy(gfp, pol));
1391480eccf9SLee Schermerhorn 		__mpol_free(pol);
1392480eccf9SLee Schermerhorn 		return page;
1393480eccf9SLee Schermerhorn 	}
1394480eccf9SLee Schermerhorn 	/*
1395480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
1396480eccf9SLee Schermerhorn 	 */
139719770b32SMel Gorman 	return __alloc_pages_nodemask(gfp, 0, zl, nodemask_policy(gfp, pol));
13981da177e4SLinus Torvalds }
13991da177e4SLinus Torvalds 
14001da177e4SLinus Torvalds /**
14011da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
14021da177e4SLinus Torvalds  *
14031da177e4SLinus Torvalds  *	@gfp:
14041da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
14051da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
14061da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
14071da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
14081da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
14091da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
14101da177e4SLinus Torvalds  *
14111da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
14121da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
14131da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
14141da177e4SLinus Torvalds  *
1415cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
14161da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
14171da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
14181da177e4SLinus Torvalds  */
1419dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
14201da177e4SLinus Torvalds {
14211da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
14221da177e4SLinus Torvalds 
14231da177e4SLinus Torvalds 	if ((gfp & __GFP_WAIT) && !in_interrupt())
1424cf2a473cSPaul Jackson 		cpuset_update_task_memory_state();
14259b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
14261da177e4SLinus Torvalds 		pol = &default_policy;
14271da177e4SLinus Torvalds 	if (pol->policy == MPOL_INTERLEAVE)
14281da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
142919770b32SMel Gorman 	return __alloc_pages_nodemask(gfp, order,
143019770b32SMel Gorman 			zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
14311da177e4SLinus Torvalds }
14321da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
14331da177e4SLinus Torvalds 
14344225399aSPaul Jackson /*
14354225399aSPaul Jackson  * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
14364225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
14374225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
14384225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
14394225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
14404225399aSPaul Jackson  */
14414225399aSPaul Jackson 
14421da177e4SLinus Torvalds /* Slow path of a mempolicy copy */
14431da177e4SLinus Torvalds struct mempolicy *__mpol_copy(struct mempolicy *old)
14441da177e4SLinus Torvalds {
14451da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
14461da177e4SLinus Torvalds 
14471da177e4SLinus Torvalds 	if (!new)
14481da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
14494225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
14504225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
14514225399aSPaul Jackson 		mpol_rebind_policy(old, &mems);
14524225399aSPaul Jackson 	}
14531da177e4SLinus Torvalds 	*new = *old;
14541da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
14551da177e4SLinus Torvalds 	return new;
14561da177e4SLinus Torvalds }
14571da177e4SLinus Torvalds 
14581da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
14591da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
14601da177e4SLinus Torvalds {
14611da177e4SLinus Torvalds 	if (!a || !b)
14621da177e4SLinus Torvalds 		return 0;
14631da177e4SLinus Torvalds 	if (a->policy != b->policy)
14641da177e4SLinus Torvalds 		return 0;
14651da177e4SLinus Torvalds 	switch (a->policy) {
14661da177e4SLinus Torvalds 	case MPOL_DEFAULT:
14671da177e4SLinus Torvalds 		return 1;
146819770b32SMel Gorman 	case MPOL_BIND:
146919770b32SMel Gorman 		/* Fall through */
14701da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1471dfcd3c0dSAndi Kleen 		return nodes_equal(a->v.nodes, b->v.nodes);
14721da177e4SLinus Torvalds 	case MPOL_PREFERRED:
14731da177e4SLinus Torvalds 		return a->v.preferred_node == b->v.preferred_node;
14741da177e4SLinus Torvalds 	default:
14751da177e4SLinus Torvalds 		BUG();
14761da177e4SLinus Torvalds 		return 0;
14771da177e4SLinus Torvalds 	}
14781da177e4SLinus Torvalds }
14791da177e4SLinus Torvalds 
14801da177e4SLinus Torvalds /* Slow path of a mpol destructor. */
14811da177e4SLinus Torvalds void __mpol_free(struct mempolicy *p)
14821da177e4SLinus Torvalds {
14831da177e4SLinus Torvalds 	if (!atomic_dec_and_test(&p->refcnt))
14841da177e4SLinus Torvalds 		return;
14851da177e4SLinus Torvalds 	p->policy = MPOL_DEFAULT;
14861da177e4SLinus Torvalds 	kmem_cache_free(policy_cache, p);
14871da177e4SLinus Torvalds }
14881da177e4SLinus Torvalds 
14891da177e4SLinus Torvalds /*
14901da177e4SLinus Torvalds  * Shared memory backing store policy support.
14911da177e4SLinus Torvalds  *
14921da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
14931da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
14941da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
14951da177e4SLinus Torvalds  * for any accesses to the tree.
14961da177e4SLinus Torvalds  */
14971da177e4SLinus Torvalds 
14981da177e4SLinus Torvalds /* lookup first element intersecting start-end */
14991da177e4SLinus Torvalds /* Caller holds sp->lock */
15001da177e4SLinus Torvalds static struct sp_node *
15011da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
15021da177e4SLinus Torvalds {
15031da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
15041da177e4SLinus Torvalds 
15051da177e4SLinus Torvalds 	while (n) {
15061da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
15071da177e4SLinus Torvalds 
15081da177e4SLinus Torvalds 		if (start >= p->end)
15091da177e4SLinus Torvalds 			n = n->rb_right;
15101da177e4SLinus Torvalds 		else if (end <= p->start)
15111da177e4SLinus Torvalds 			n = n->rb_left;
15121da177e4SLinus Torvalds 		else
15131da177e4SLinus Torvalds 			break;
15141da177e4SLinus Torvalds 	}
15151da177e4SLinus Torvalds 	if (!n)
15161da177e4SLinus Torvalds 		return NULL;
15171da177e4SLinus Torvalds 	for (;;) {
15181da177e4SLinus Torvalds 		struct sp_node *w = NULL;
15191da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
15201da177e4SLinus Torvalds 		if (!prev)
15211da177e4SLinus Torvalds 			break;
15221da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
15231da177e4SLinus Torvalds 		if (w->end <= start)
15241da177e4SLinus Torvalds 			break;
15251da177e4SLinus Torvalds 		n = prev;
15261da177e4SLinus Torvalds 	}
15271da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
15281da177e4SLinus Torvalds }
15291da177e4SLinus Torvalds 
15301da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
15311da177e4SLinus Torvalds /* Caller holds sp->lock */
15321da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
15331da177e4SLinus Torvalds {
15341da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
15351da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
15361da177e4SLinus Torvalds 	struct sp_node *nd;
15371da177e4SLinus Torvalds 
15381da177e4SLinus Torvalds 	while (*p) {
15391da177e4SLinus Torvalds 		parent = *p;
15401da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
15411da177e4SLinus Torvalds 		if (new->start < nd->start)
15421da177e4SLinus Torvalds 			p = &(*p)->rb_left;
15431da177e4SLinus Torvalds 		else if (new->end > nd->end)
15441da177e4SLinus Torvalds 			p = &(*p)->rb_right;
15451da177e4SLinus Torvalds 		else
15461da177e4SLinus Torvalds 			BUG();
15471da177e4SLinus Torvalds 	}
15481da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
15491da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
1550140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
15511da177e4SLinus Torvalds 		 new->policy ? new->policy->policy : 0);
15521da177e4SLinus Torvalds }
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds /* Find shared policy intersecting idx */
15551da177e4SLinus Torvalds struct mempolicy *
15561da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
15571da177e4SLinus Torvalds {
15581da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
15591da177e4SLinus Torvalds 	struct sp_node *sn;
15601da177e4SLinus Torvalds 
15611da177e4SLinus Torvalds 	if (!sp->root.rb_node)
15621da177e4SLinus Torvalds 		return NULL;
15631da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15641da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
15651da177e4SLinus Torvalds 	if (sn) {
15661da177e4SLinus Torvalds 		mpol_get(sn->policy);
15671da177e4SLinus Torvalds 		pol = sn->policy;
15681da177e4SLinus Torvalds 	}
15691da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
15701da177e4SLinus Torvalds 	return pol;
15711da177e4SLinus Torvalds }
15721da177e4SLinus Torvalds 
15731da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
15741da177e4SLinus Torvalds {
1575140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
15761da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
15771da177e4SLinus Torvalds 	mpol_free(n->policy);
15781da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
15791da177e4SLinus Torvalds }
15801da177e4SLinus Torvalds 
1581dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1582dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
15831da177e4SLinus Torvalds {
15841da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
15851da177e4SLinus Torvalds 
15861da177e4SLinus Torvalds 	if (!n)
15871da177e4SLinus Torvalds 		return NULL;
15881da177e4SLinus Torvalds 	n->start = start;
15891da177e4SLinus Torvalds 	n->end = end;
15901da177e4SLinus Torvalds 	mpol_get(pol);
15911da177e4SLinus Torvalds 	n->policy = pol;
15921da177e4SLinus Torvalds 	return n;
15931da177e4SLinus Torvalds }
15941da177e4SLinus Torvalds 
15951da177e4SLinus Torvalds /* Replace a policy range. */
15961da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
15971da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
15981da177e4SLinus Torvalds {
15991da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
16001da177e4SLinus Torvalds 
16011da177e4SLinus Torvalds restart:
16021da177e4SLinus Torvalds 	spin_lock(&sp->lock);
16031da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
16041da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
16051da177e4SLinus Torvalds 	while (n && n->start < end) {
16061da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
16071da177e4SLinus Torvalds 		if (n->start >= start) {
16081da177e4SLinus Torvalds 			if (n->end <= end)
16091da177e4SLinus Torvalds 				sp_delete(sp, n);
16101da177e4SLinus Torvalds 			else
16111da177e4SLinus Torvalds 				n->start = end;
16121da177e4SLinus Torvalds 		} else {
16131da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
16141da177e4SLinus Torvalds 			if (n->end > end) {
16151da177e4SLinus Torvalds 				if (!new2) {
16161da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
16171da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
16181da177e4SLinus Torvalds 					if (!new2)
16191da177e4SLinus Torvalds 						return -ENOMEM;
16201da177e4SLinus Torvalds 					goto restart;
16211da177e4SLinus Torvalds 				}
16221da177e4SLinus Torvalds 				n->end = start;
16231da177e4SLinus Torvalds 				sp_insert(sp, new2);
16241da177e4SLinus Torvalds 				new2 = NULL;
16251da177e4SLinus Torvalds 				break;
16261da177e4SLinus Torvalds 			} else
16271da177e4SLinus Torvalds 				n->end = start;
16281da177e4SLinus Torvalds 		}
16291da177e4SLinus Torvalds 		if (!next)
16301da177e4SLinus Torvalds 			break;
16311da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
16321da177e4SLinus Torvalds 	}
16331da177e4SLinus Torvalds 	if (new)
16341da177e4SLinus Torvalds 		sp_insert(sp, new);
16351da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
16361da177e4SLinus Torvalds 	if (new2) {
16371da177e4SLinus Torvalds 		mpol_free(new2->policy);
16381da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
16391da177e4SLinus Torvalds 	}
16401da177e4SLinus Torvalds 	return 0;
16411da177e4SLinus Torvalds }
16421da177e4SLinus Torvalds 
1643*a3b51e01SDavid Rientjes void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
16447339ff83SRobin Holt 				nodemask_t *policy_nodes)
16457339ff83SRobin Holt {
16467339ff83SRobin Holt 	info->root = RB_ROOT;
16477339ff83SRobin Holt 	spin_lock_init(&info->lock);
16487339ff83SRobin Holt 
16497339ff83SRobin Holt 	if (policy != MPOL_DEFAULT) {
16507339ff83SRobin Holt 		struct mempolicy *newpol;
16517339ff83SRobin Holt 
16527339ff83SRobin Holt 		/* Falls back to MPOL_DEFAULT on any error */
16537339ff83SRobin Holt 		newpol = mpol_new(policy, policy_nodes);
16547339ff83SRobin Holt 		if (!IS_ERR(newpol)) {
16557339ff83SRobin Holt 			/* Create pseudo-vma that contains just the policy */
16567339ff83SRobin Holt 			struct vm_area_struct pvma;
16577339ff83SRobin Holt 
16587339ff83SRobin Holt 			memset(&pvma, 0, sizeof(struct vm_area_struct));
16597339ff83SRobin Holt 			/* Policy covers entire file */
16607339ff83SRobin Holt 			pvma.vm_end = TASK_SIZE;
16617339ff83SRobin Holt 			mpol_set_shared_policy(info, &pvma, newpol);
16627339ff83SRobin Holt 			mpol_free(newpol);
16637339ff83SRobin Holt 		}
16647339ff83SRobin Holt 	}
16657339ff83SRobin Holt }
16667339ff83SRobin Holt 
16671da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
16681da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
16691da177e4SLinus Torvalds {
16701da177e4SLinus Torvalds 	int err;
16711da177e4SLinus Torvalds 	struct sp_node *new = NULL;
16721da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
16731da177e4SLinus Torvalds 
1674140d5a49SPaul Mundt 	pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
16751da177e4SLinus Torvalds 		 vma->vm_pgoff,
16761da177e4SLinus Torvalds 		 sz, npol? npol->policy : -1,
1677dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
16781da177e4SLinus Torvalds 
16791da177e4SLinus Torvalds 	if (npol) {
16801da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
16811da177e4SLinus Torvalds 		if (!new)
16821da177e4SLinus Torvalds 			return -ENOMEM;
16831da177e4SLinus Torvalds 	}
16841da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
16851da177e4SLinus Torvalds 	if (err && new)
16861da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
16871da177e4SLinus Torvalds 	return err;
16881da177e4SLinus Torvalds }
16891da177e4SLinus Torvalds 
16901da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
16911da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
16921da177e4SLinus Torvalds {
16931da177e4SLinus Torvalds 	struct sp_node *n;
16941da177e4SLinus Torvalds 	struct rb_node *next;
16951da177e4SLinus Torvalds 
16961da177e4SLinus Torvalds 	if (!p->root.rb_node)
16971da177e4SLinus Torvalds 		return;
16981da177e4SLinus Torvalds 	spin_lock(&p->lock);
16991da177e4SLinus Torvalds 	next = rb_first(&p->root);
17001da177e4SLinus Torvalds 	while (next) {
17011da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
17021da177e4SLinus Torvalds 		next = rb_next(&n->nd);
170390c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
17041da177e4SLinus Torvalds 		mpol_free(n->policy);
17051da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
17061da177e4SLinus Torvalds 	}
17071da177e4SLinus Torvalds 	spin_unlock(&p->lock);
17081da177e4SLinus Torvalds }
17091da177e4SLinus Torvalds 
17101da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
17111da177e4SLinus Torvalds void __init numa_policy_init(void)
17121da177e4SLinus Torvalds {
1713b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
1714b71636e2SPaul Mundt 	unsigned long largest = 0;
1715b71636e2SPaul Mundt 	int nid, prefer = 0;
1716b71636e2SPaul Mundt 
17171da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
17181da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
171920c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
17201da177e4SLinus Torvalds 
17211da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
17221da177e4SLinus Torvalds 				     sizeof(struct sp_node),
172320c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
17241da177e4SLinus Torvalds 
1725b71636e2SPaul Mundt 	/*
1726b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
1727b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
1728b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
1729b71636e2SPaul Mundt 	 */
1730b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
173156bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
1732b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
17331da177e4SLinus Torvalds 
1734b71636e2SPaul Mundt 		/* Preserve the largest node */
1735b71636e2SPaul Mundt 		if (largest < total_pages) {
1736b71636e2SPaul Mundt 			largest = total_pages;
1737b71636e2SPaul Mundt 			prefer = nid;
1738b71636e2SPaul Mundt 		}
1739b71636e2SPaul Mundt 
1740b71636e2SPaul Mundt 		/* Interleave this node? */
1741b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1742b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
1743b71636e2SPaul Mundt 	}
1744b71636e2SPaul Mundt 
1745b71636e2SPaul Mundt 	/* All too small, use the largest */
1746b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
1747b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
1748b71636e2SPaul Mundt 
1749b71636e2SPaul Mundt 	if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
17501da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
17511da177e4SLinus Torvalds }
17521da177e4SLinus Torvalds 
17538bccd85fSChristoph Lameter /* Reset policy of current process to default */
17541da177e4SLinus Torvalds void numa_default_policy(void)
17551da177e4SLinus Torvalds {
17568bccd85fSChristoph Lameter 	do_set_mempolicy(MPOL_DEFAULT, NULL);
17571da177e4SLinus Torvalds }
175868860ec1SPaul Jackson 
175968860ec1SPaul Jackson /* Migrate a policy to a different set of nodes */
1760dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
1761dbcb0f19SAdrian Bunk 			       const nodemask_t *newmask)
176268860ec1SPaul Jackson {
176374cb2155SPaul Jackson 	nodemask_t *mpolmask;
176468860ec1SPaul Jackson 	nodemask_t tmp;
176568860ec1SPaul Jackson 
176668860ec1SPaul Jackson 	if (!pol)
176768860ec1SPaul Jackson 		return;
176874cb2155SPaul Jackson 	mpolmask = &pol->cpuset_mems_allowed;
176974cb2155SPaul Jackson 	if (nodes_equal(*mpolmask, *newmask))
177074cb2155SPaul Jackson 		return;
177168860ec1SPaul Jackson 
177268860ec1SPaul Jackson 	switch (pol->policy) {
177368860ec1SPaul Jackson 	case MPOL_DEFAULT:
177468860ec1SPaul Jackson 		break;
177519770b32SMel Gorman 	case MPOL_BIND:
177619770b32SMel Gorman 		/* Fall through */
177768860ec1SPaul Jackson 	case MPOL_INTERLEAVE:
177874cb2155SPaul Jackson 		nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
177968860ec1SPaul Jackson 		pol->v.nodes = tmp;
178074cb2155SPaul Jackson 		*mpolmask = *newmask;
178174cb2155SPaul Jackson 		current->il_next = node_remap(current->il_next,
178274cb2155SPaul Jackson 						*mpolmask, *newmask);
178368860ec1SPaul Jackson 		break;
178468860ec1SPaul Jackson 	case MPOL_PREFERRED:
178568860ec1SPaul Jackson 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
178674cb2155SPaul Jackson 						*mpolmask, *newmask);
178774cb2155SPaul Jackson 		*mpolmask = *newmask;
178868860ec1SPaul Jackson 		break;
178968860ec1SPaul Jackson 	default:
179068860ec1SPaul Jackson 		BUG();
179168860ec1SPaul Jackson 		break;
179268860ec1SPaul Jackson 	}
179368860ec1SPaul Jackson }
179468860ec1SPaul Jackson 
179568860ec1SPaul Jackson /*
179674cb2155SPaul Jackson  * Wrapper for mpol_rebind_policy() that just requires task
179774cb2155SPaul Jackson  * pointer, and updates task mempolicy.
179868860ec1SPaul Jackson  */
179974cb2155SPaul Jackson 
180074cb2155SPaul Jackson void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
180168860ec1SPaul Jackson {
180274cb2155SPaul Jackson 	mpol_rebind_policy(tsk->mempolicy, new);
180368860ec1SPaul Jackson }
18041a75a6c8SChristoph Lameter 
18051a75a6c8SChristoph Lameter /*
18064225399aSPaul Jackson  * Rebind each vma in mm to new nodemask.
18074225399aSPaul Jackson  *
18084225399aSPaul Jackson  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
18094225399aSPaul Jackson  */
18104225399aSPaul Jackson 
18114225399aSPaul Jackson void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
18124225399aSPaul Jackson {
18134225399aSPaul Jackson 	struct vm_area_struct *vma;
18144225399aSPaul Jackson 
18154225399aSPaul Jackson 	down_write(&mm->mmap_sem);
18164225399aSPaul Jackson 	for (vma = mm->mmap; vma; vma = vma->vm_next)
18174225399aSPaul Jackson 		mpol_rebind_policy(vma->vm_policy, new);
18184225399aSPaul Jackson 	up_write(&mm->mmap_sem);
18194225399aSPaul Jackson }
18204225399aSPaul Jackson 
18214225399aSPaul Jackson /*
18221a75a6c8SChristoph Lameter  * Display pages allocated per node and memory policy via /proc.
18231a75a6c8SChristoph Lameter  */
18241a75a6c8SChristoph Lameter 
182515ad7cdcSHelge Deller static const char * const policy_types[] =
182615ad7cdcSHelge Deller 	{ "default", "prefer", "bind", "interleave" };
18271a75a6c8SChristoph Lameter 
18281a75a6c8SChristoph Lameter /*
18291a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
18301a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
18311a75a6c8SChristoph Lameter  * or an error (negative)
18321a75a6c8SChristoph Lameter  */
18331a75a6c8SChristoph Lameter static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
18341a75a6c8SChristoph Lameter {
18351a75a6c8SChristoph Lameter 	char *p = buffer;
18361a75a6c8SChristoph Lameter 	int l;
18371a75a6c8SChristoph Lameter 	nodemask_t nodes;
1838*a3b51e01SDavid Rientjes 	unsigned short mode = pol ? pol->policy : MPOL_DEFAULT;
18391a75a6c8SChristoph Lameter 
18401a75a6c8SChristoph Lameter 	switch (mode) {
18411a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
18421a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18431a75a6c8SChristoph Lameter 		break;
18441a75a6c8SChristoph Lameter 
18451a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
18461a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18471a75a6c8SChristoph Lameter 		node_set(pol->v.preferred_node, nodes);
18481a75a6c8SChristoph Lameter 		break;
18491a75a6c8SChristoph Lameter 
18501a75a6c8SChristoph Lameter 	case MPOL_BIND:
185119770b32SMel Gorman 		/* Fall through */
18521a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
18531a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
18541a75a6c8SChristoph Lameter 		break;
18551a75a6c8SChristoph Lameter 
18561a75a6c8SChristoph Lameter 	default:
18571a75a6c8SChristoph Lameter 		BUG();
18581a75a6c8SChristoph Lameter 		return -EFAULT;
18591a75a6c8SChristoph Lameter 	}
18601a75a6c8SChristoph Lameter 
18611a75a6c8SChristoph Lameter 	l = strlen(policy_types[mode]);
18621a75a6c8SChristoph Lameter  	if (buffer + maxlen < p + l + 1)
18631a75a6c8SChristoph Lameter  		return -ENOSPC;
18641a75a6c8SChristoph Lameter 
18651a75a6c8SChristoph Lameter 	strcpy(p, policy_types[mode]);
18661a75a6c8SChristoph Lameter 	p += l;
18671a75a6c8SChristoph Lameter 
18681a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
18691a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
18701a75a6c8SChristoph Lameter 			return -ENOSPC;
18711a75a6c8SChristoph Lameter 		*p++ = '=';
18721a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
18731a75a6c8SChristoph Lameter 	}
18741a75a6c8SChristoph Lameter 	return p - buffer;
18751a75a6c8SChristoph Lameter }
18761a75a6c8SChristoph Lameter 
18771a75a6c8SChristoph Lameter struct numa_maps {
18781a75a6c8SChristoph Lameter 	unsigned long pages;
18791a75a6c8SChristoph Lameter 	unsigned long anon;
1880397874dfSChristoph Lameter 	unsigned long active;
1881397874dfSChristoph Lameter 	unsigned long writeback;
18821a75a6c8SChristoph Lameter 	unsigned long mapcount_max;
1883397874dfSChristoph Lameter 	unsigned long dirty;
1884397874dfSChristoph Lameter 	unsigned long swapcache;
18851a75a6c8SChristoph Lameter 	unsigned long node[MAX_NUMNODES];
18861a75a6c8SChristoph Lameter };
18871a75a6c8SChristoph Lameter 
1888397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty)
18891a75a6c8SChristoph Lameter {
18901a75a6c8SChristoph Lameter 	struct numa_maps *md = private;
18911a75a6c8SChristoph Lameter 	int count = page_mapcount(page);
18921a75a6c8SChristoph Lameter 
18931a75a6c8SChristoph Lameter 	md->pages++;
1894397874dfSChristoph Lameter 	if (pte_dirty || PageDirty(page))
1895397874dfSChristoph Lameter 		md->dirty++;
1896397874dfSChristoph Lameter 
1897397874dfSChristoph Lameter 	if (PageSwapCache(page))
1898397874dfSChristoph Lameter 		md->swapcache++;
1899397874dfSChristoph Lameter 
1900397874dfSChristoph Lameter 	if (PageActive(page))
1901397874dfSChristoph Lameter 		md->active++;
1902397874dfSChristoph Lameter 
1903397874dfSChristoph Lameter 	if (PageWriteback(page))
1904397874dfSChristoph Lameter 		md->writeback++;
19051a75a6c8SChristoph Lameter 
19061a75a6c8SChristoph Lameter 	if (PageAnon(page))
19071a75a6c8SChristoph Lameter 		md->anon++;
19081a75a6c8SChristoph Lameter 
1909397874dfSChristoph Lameter 	if (count > md->mapcount_max)
1910397874dfSChristoph Lameter 		md->mapcount_max = count;
1911397874dfSChristoph Lameter 
19121a75a6c8SChristoph Lameter 	md->node[page_to_nid(page)]++;
19131a75a6c8SChristoph Lameter }
19141a75a6c8SChristoph Lameter 
19157f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE
1916397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma,
1917397874dfSChristoph Lameter 		unsigned long start, unsigned long end,
1918397874dfSChristoph Lameter 		struct numa_maps *md)
1919397874dfSChristoph Lameter {
1920397874dfSChristoph Lameter 	unsigned long addr;
1921397874dfSChristoph Lameter 	struct page *page;
1922397874dfSChristoph Lameter 
1923397874dfSChristoph Lameter 	for (addr = start; addr < end; addr += HPAGE_SIZE) {
1924397874dfSChristoph Lameter 		pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1925397874dfSChristoph Lameter 		pte_t pte;
1926397874dfSChristoph Lameter 
1927397874dfSChristoph Lameter 		if (!ptep)
1928397874dfSChristoph Lameter 			continue;
1929397874dfSChristoph Lameter 
1930397874dfSChristoph Lameter 		pte = *ptep;
1931397874dfSChristoph Lameter 		if (pte_none(pte))
1932397874dfSChristoph Lameter 			continue;
1933397874dfSChristoph Lameter 
1934397874dfSChristoph Lameter 		page = pte_page(pte);
1935397874dfSChristoph Lameter 		if (!page)
1936397874dfSChristoph Lameter 			continue;
1937397874dfSChristoph Lameter 
1938397874dfSChristoph Lameter 		gather_stats(page, md, pte_dirty(*ptep));
1939397874dfSChristoph Lameter 	}
1940397874dfSChristoph Lameter }
19417f709ed0SAndrew Morton #else
19427f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma,
19437f709ed0SAndrew Morton 		unsigned long start, unsigned long end,
19447f709ed0SAndrew Morton 		struct numa_maps *md)
19457f709ed0SAndrew Morton {
19467f709ed0SAndrew Morton }
19477f709ed0SAndrew Morton #endif
1948397874dfSChristoph Lameter 
19491a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v)
19501a75a6c8SChristoph Lameter {
195199f89551SEric W. Biederman 	struct proc_maps_private *priv = m->private;
19521a75a6c8SChristoph Lameter 	struct vm_area_struct *vma = v;
19531a75a6c8SChristoph Lameter 	struct numa_maps *md;
1954397874dfSChristoph Lameter 	struct file *file = vma->vm_file;
1955397874dfSChristoph Lameter 	struct mm_struct *mm = vma->vm_mm;
1956480eccf9SLee Schermerhorn 	struct mempolicy *pol;
19571a75a6c8SChristoph Lameter 	int n;
19581a75a6c8SChristoph Lameter 	char buffer[50];
19591a75a6c8SChristoph Lameter 
1960397874dfSChristoph Lameter 	if (!mm)
19611a75a6c8SChristoph Lameter 		return 0;
19621a75a6c8SChristoph Lameter 
19631a75a6c8SChristoph Lameter 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
19641a75a6c8SChristoph Lameter 	if (!md)
19651a75a6c8SChristoph Lameter 		return 0;
19661a75a6c8SChristoph Lameter 
1967480eccf9SLee Schermerhorn 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
1968480eccf9SLee Schermerhorn 	mpol_to_str(buffer, sizeof(buffer), pol);
1969480eccf9SLee Schermerhorn 	/*
1970480eccf9SLee Schermerhorn 	 * unref shared or other task's mempolicy
1971480eccf9SLee Schermerhorn 	 */
1972480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy)
1973480eccf9SLee Schermerhorn 		__mpol_free(pol);
19741a75a6c8SChristoph Lameter 
1975397874dfSChristoph Lameter 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1976397874dfSChristoph Lameter 
1977397874dfSChristoph Lameter 	if (file) {
1978397874dfSChristoph Lameter 		seq_printf(m, " file=");
1979c32c2f63SJan Blunck 		seq_path(m, &file->f_path, "\n\t= ");
1980397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1981397874dfSChristoph Lameter 		seq_printf(m, " heap");
1982397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->start_stack &&
1983397874dfSChristoph Lameter 			vma->vm_end >= mm->start_stack) {
1984397874dfSChristoph Lameter 		seq_printf(m, " stack");
1985397874dfSChristoph Lameter 	}
1986397874dfSChristoph Lameter 
1987397874dfSChristoph Lameter 	if (is_vm_hugetlb_page(vma)) {
1988397874dfSChristoph Lameter 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1989397874dfSChristoph Lameter 		seq_printf(m, " huge");
1990397874dfSChristoph Lameter 	} else {
1991397874dfSChristoph Lameter 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
199256bbd65dSChristoph Lameter 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
1993397874dfSChristoph Lameter 	}
1994397874dfSChristoph Lameter 
1995397874dfSChristoph Lameter 	if (!md->pages)
1996397874dfSChristoph Lameter 		goto out;
19971a75a6c8SChristoph Lameter 
19981a75a6c8SChristoph Lameter 	if (md->anon)
19991a75a6c8SChristoph Lameter 		seq_printf(m," anon=%lu",md->anon);
20001a75a6c8SChristoph Lameter 
2001397874dfSChristoph Lameter 	if (md->dirty)
2002397874dfSChristoph Lameter 		seq_printf(m," dirty=%lu",md->dirty);
2003397874dfSChristoph Lameter 
2004397874dfSChristoph Lameter 	if (md->pages != md->anon && md->pages != md->dirty)
2005397874dfSChristoph Lameter 		seq_printf(m, " mapped=%lu", md->pages);
2006397874dfSChristoph Lameter 
2007397874dfSChristoph Lameter 	if (md->mapcount_max > 1)
2008397874dfSChristoph Lameter 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2009397874dfSChristoph Lameter 
2010397874dfSChristoph Lameter 	if (md->swapcache)
2011397874dfSChristoph Lameter 		seq_printf(m," swapcache=%lu", md->swapcache);
2012397874dfSChristoph Lameter 
2013397874dfSChristoph Lameter 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2014397874dfSChristoph Lameter 		seq_printf(m," active=%lu", md->active);
2015397874dfSChristoph Lameter 
2016397874dfSChristoph Lameter 	if (md->writeback)
2017397874dfSChristoph Lameter 		seq_printf(m," writeback=%lu", md->writeback);
2018397874dfSChristoph Lameter 
201956bbd65dSChristoph Lameter 	for_each_node_state(n, N_HIGH_MEMORY)
20201a75a6c8SChristoph Lameter 		if (md->node[n])
20211a75a6c8SChristoph Lameter 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2022397874dfSChristoph Lameter out:
20231a75a6c8SChristoph Lameter 	seq_putc(m, '\n');
20241a75a6c8SChristoph Lameter 	kfree(md);
20251a75a6c8SChristoph Lameter 
20261a75a6c8SChristoph Lameter 	if (m->count < m->size)
202799f89551SEric W. Biederman 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
20281a75a6c8SChristoph Lameter 	return 0;
20291a75a6c8SChristoph Lameter }
2030