xref: /openbmc/linux/mm/mempolicy.c (revision c32c2f63a9d6c953aaf168c0b2551da9734f76d2)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds    could replace all the switch()es with a mempolicy_ops structure.
671da177e4SLinus Torvalds */
681da177e4SLinus Torvalds 
691da177e4SLinus Torvalds #include <linux/mempolicy.h>
701da177e4SLinus Torvalds #include <linux/mm.h>
711da177e4SLinus Torvalds #include <linux/highmem.h>
721da177e4SLinus Torvalds #include <linux/hugetlb.h>
731da177e4SLinus Torvalds #include <linux/kernel.h>
741da177e4SLinus Torvalds #include <linux/sched.h>
751da177e4SLinus Torvalds #include <linux/nodemask.h>
761da177e4SLinus Torvalds #include <linux/cpuset.h>
771da177e4SLinus Torvalds #include <linux/gfp.h>
781da177e4SLinus Torvalds #include <linux/slab.h>
791da177e4SLinus Torvalds #include <linux/string.h>
801da177e4SLinus Torvalds #include <linux/module.h>
81b488893aSPavel Emelyanov #include <linux/nsproxy.h>
821da177e4SLinus Torvalds #include <linux/interrupt.h>
831da177e4SLinus Torvalds #include <linux/init.h>
841da177e4SLinus Torvalds #include <linux/compat.h>
85dc9aa5b9SChristoph Lameter #include <linux/swap.h>
861a75a6c8SChristoph Lameter #include <linux/seq_file.h>
871a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
88b20a3503SChristoph Lameter #include <linux/migrate.h>
8995a402c3SChristoph Lameter #include <linux/rmap.h>
9086c3a764SDavid Quigley #include <linux/security.h>
91dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
92dc9aa5b9SChristoph Lameter 
931da177e4SLinus Torvalds #include <asm/tlbflush.h>
941da177e4SLinus Torvalds #include <asm/uaccess.h>
951da177e4SLinus Torvalds 
9638e35860SChristoph Lameter /* Internal flags */
97dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
9838e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
991a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
100dc9aa5b9SChristoph Lameter 
101fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
102fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1051da177e4SLinus Torvalds    policied. */
1066267276fSChristoph Lameter enum zone_type policy_zone = 0;
1071da177e4SLinus Torvalds 
108d42c6997SAndi Kleen struct mempolicy default_policy = {
1091da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1101da177e4SLinus Torvalds 	.policy = MPOL_DEFAULT,
1111da177e4SLinus Torvalds };
1121da177e4SLinus Torvalds 
113dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
114dbcb0f19SAdrian Bunk                                const nodemask_t *newmask);
115dbcb0f19SAdrian Bunk 
1161da177e4SLinus Torvalds /* Do sanity checking on a policy */
117dfcd3c0dSAndi Kleen static int mpol_check_policy(int mode, nodemask_t *nodes)
1181da177e4SLinus Torvalds {
11931f1de46SKOSAKI Motohiro 	int was_empty, is_empty;
12031f1de46SKOSAKI Motohiro 
12131f1de46SKOSAKI Motohiro 	if (!nodes)
12231f1de46SKOSAKI Motohiro 		return 0;
12331f1de46SKOSAKI Motohiro 
12431f1de46SKOSAKI Motohiro 	/*
12531f1de46SKOSAKI Motohiro 	 * "Contextualize" the in-coming nodemast for cpusets:
12631f1de46SKOSAKI Motohiro 	 * Remember whether in-coming nodemask was empty,  If not,
12731f1de46SKOSAKI Motohiro 	 * restrict the nodes to the allowed nodes in the cpuset.
12831f1de46SKOSAKI Motohiro 	 * This is guaranteed to be a subset of nodes with memory.
12931f1de46SKOSAKI Motohiro 	 */
13031f1de46SKOSAKI Motohiro 	cpuset_update_task_memory_state();
13131f1de46SKOSAKI Motohiro 	is_empty = was_empty = nodes_empty(*nodes);
13231f1de46SKOSAKI Motohiro 	if (!was_empty) {
13331f1de46SKOSAKI Motohiro 		nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
13431f1de46SKOSAKI Motohiro 		is_empty = nodes_empty(*nodes);	/* after "contextualization" */
13531f1de46SKOSAKI Motohiro 	}
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds 	switch (mode) {
1381da177e4SLinus Torvalds 	case MPOL_DEFAULT:
13931f1de46SKOSAKI Motohiro 		/*
14031f1de46SKOSAKI Motohiro 		 * require caller to specify an empty nodemask
14131f1de46SKOSAKI Motohiro 		 * before "contextualization"
14231f1de46SKOSAKI Motohiro 		 */
14331f1de46SKOSAKI Motohiro 		if (!was_empty)
1441da177e4SLinus Torvalds 			return -EINVAL;
1451da177e4SLinus Torvalds 		break;
1461da177e4SLinus Torvalds 	case MPOL_BIND:
1471da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
14831f1de46SKOSAKI Motohiro 		/*
14931f1de46SKOSAKI Motohiro 		 * require at least 1 valid node after "contextualization"
15031f1de46SKOSAKI Motohiro 		 */
15131f1de46SKOSAKI Motohiro 		if (is_empty)
15231f1de46SKOSAKI Motohiro 			return -EINVAL;
15331f1de46SKOSAKI Motohiro 		break;
15431f1de46SKOSAKI Motohiro 	case MPOL_PREFERRED:
15531f1de46SKOSAKI Motohiro 		/*
15631f1de46SKOSAKI Motohiro 		 * Did caller specify invalid nodes?
15731f1de46SKOSAKI Motohiro 		 * Don't silently accept this as "local allocation".
15831f1de46SKOSAKI Motohiro 		 */
15931f1de46SKOSAKI Motohiro 		if (!was_empty && is_empty)
1601da177e4SLinus Torvalds 			return -EINVAL;
1611da177e4SLinus Torvalds 		break;
1621da177e4SLinus Torvalds 	}
16331f1de46SKOSAKI Motohiro 	return 0;
1641da177e4SLinus Torvalds }
165dd942ae3SAndi Kleen 
1661da177e4SLinus Torvalds /* Generate a custom zonelist for the BIND policy. */
167dfcd3c0dSAndi Kleen static struct zonelist *bind_zonelist(nodemask_t *nodes)
1681da177e4SLinus Torvalds {
1691da177e4SLinus Torvalds 	struct zonelist *zl;
1702f6726e5SChristoph Lameter 	int num, max, nd;
1712f6726e5SChristoph Lameter 	enum zone_type k;
1721da177e4SLinus Torvalds 
173dfcd3c0dSAndi Kleen 	max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
1749276b1bcSPaul Jackson 	max++;			/* space for zlcache_ptr (see mmzone.h) */
175dd942ae3SAndi Kleen 	zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
1761da177e4SLinus Torvalds 	if (!zl)
1778af5e2ebSKAMEZAWA Hiroyuki 		return ERR_PTR(-ENOMEM);
1789276b1bcSPaul Jackson 	zl->zlcache_ptr = NULL;
1791da177e4SLinus Torvalds 	num = 0;
180dd942ae3SAndi Kleen 	/* First put in the highest zones from all nodes, then all the next
181dd942ae3SAndi Kleen 	   lower zones etc. Avoid empty zones because the memory allocator
182dd942ae3SAndi Kleen 	   doesn't like them. If you implement node hot removal you
183dd942ae3SAndi Kleen 	   have to fix that. */
184b377fd39SMel Gorman 	k = MAX_NR_ZONES - 1;
1852f6726e5SChristoph Lameter 	while (1) {
186dd942ae3SAndi Kleen 		for_each_node_mask(nd, *nodes) {
187dd942ae3SAndi Kleen 			struct zone *z = &NODE_DATA(nd)->node_zones[k];
188dd942ae3SAndi Kleen 			if (z->present_pages > 0)
189dd942ae3SAndi Kleen 				zl->zones[num++] = z;
190dd942ae3SAndi Kleen 		}
1912f6726e5SChristoph Lameter 		if (k == 0)
1922f6726e5SChristoph Lameter 			break;
1932f6726e5SChristoph Lameter 		k--;
194dd942ae3SAndi Kleen 	}
1958af5e2ebSKAMEZAWA Hiroyuki 	if (num == 0) {
1968af5e2ebSKAMEZAWA Hiroyuki 		kfree(zl);
1978af5e2ebSKAMEZAWA Hiroyuki 		return ERR_PTR(-EINVAL);
1988af5e2ebSKAMEZAWA Hiroyuki 	}
1991da177e4SLinus Torvalds 	zl->zones[num] = NULL;
2001da177e4SLinus Torvalds 	return zl;
2011da177e4SLinus Torvalds }
2021da177e4SLinus Torvalds 
2031da177e4SLinus Torvalds /* Create a new policy */
204dfcd3c0dSAndi Kleen static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
2051da177e4SLinus Torvalds {
2061da177e4SLinus Torvalds 	struct mempolicy *policy;
2071da177e4SLinus Torvalds 
208140d5a49SPaul Mundt 	pr_debug("setting mode %d nodes[0] %lx\n",
209140d5a49SPaul Mundt 		 mode, nodes ? nodes_addr(*nodes)[0] : -1);
210140d5a49SPaul Mundt 
2111da177e4SLinus Torvalds 	if (mode == MPOL_DEFAULT)
2121da177e4SLinus Torvalds 		return NULL;
2131da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2141da177e4SLinus Torvalds 	if (!policy)
2151da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2161da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
2171da177e4SLinus Torvalds 	switch (mode) {
2181da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
219dfcd3c0dSAndi Kleen 		policy->v.nodes = *nodes;
2206eaf806aSChristoph Lameter 		if (nodes_weight(policy->v.nodes) == 0) {
2218f493d79SAndi Kleen 			kmem_cache_free(policy_cache, policy);
2228f493d79SAndi Kleen 			return ERR_PTR(-EINVAL);
2238f493d79SAndi Kleen 		}
2241da177e4SLinus Torvalds 		break;
2251da177e4SLinus Torvalds 	case MPOL_PREFERRED:
226dfcd3c0dSAndi Kleen 		policy->v.preferred_node = first_node(*nodes);
2271da177e4SLinus Torvalds 		if (policy->v.preferred_node >= MAX_NUMNODES)
2281da177e4SLinus Torvalds 			policy->v.preferred_node = -1;
2291da177e4SLinus Torvalds 		break;
2301da177e4SLinus Torvalds 	case MPOL_BIND:
2311da177e4SLinus Torvalds 		policy->v.zonelist = bind_zonelist(nodes);
2328af5e2ebSKAMEZAWA Hiroyuki 		if (IS_ERR(policy->v.zonelist)) {
2338af5e2ebSKAMEZAWA Hiroyuki 			void *error_code = policy->v.zonelist;
2341da177e4SLinus Torvalds 			kmem_cache_free(policy_cache, policy);
2358af5e2ebSKAMEZAWA Hiroyuki 			return error_code;
2361da177e4SLinus Torvalds 		}
2371da177e4SLinus Torvalds 		break;
2381da177e4SLinus Torvalds 	}
2391da177e4SLinus Torvalds 	policy->policy = mode;
24074cb2155SPaul Jackson 	policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
2411da177e4SLinus Torvalds 	return policy;
2421da177e4SLinus Torvalds }
2431da177e4SLinus Torvalds 
244397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty);
245fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
246fc301289SChristoph Lameter 				unsigned long flags);
2471a75a6c8SChristoph Lameter 
24838e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
249b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
250dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
251dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
25238e35860SChristoph Lameter 		void *private)
2531da177e4SLinus Torvalds {
25491612e0dSHugh Dickins 	pte_t *orig_pte;
25591612e0dSHugh Dickins 	pte_t *pte;
256705e87c0SHugh Dickins 	spinlock_t *ptl;
257941150a3SHugh Dickins 
258705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
25991612e0dSHugh Dickins 	do {
2606aab341eSLinus Torvalds 		struct page *page;
26125ba77c1SAndy Whitcroft 		int nid;
26291612e0dSHugh Dickins 
26391612e0dSHugh Dickins 		if (!pte_present(*pte))
26491612e0dSHugh Dickins 			continue;
2656aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
2666aab341eSLinus Torvalds 		if (!page)
26791612e0dSHugh Dickins 			continue;
268053837fcSNick Piggin 		/*
269053837fcSNick Piggin 		 * The check for PageReserved here is important to avoid
270053837fcSNick Piggin 		 * handling zero pages and other pages that may have been
271053837fcSNick Piggin 		 * marked special by the system.
272053837fcSNick Piggin 		 *
273053837fcSNick Piggin 		 * If the PageReserved would not be checked here then f.e.
274053837fcSNick Piggin 		 * the location of the zero page could have an influence
275053837fcSNick Piggin 		 * on MPOL_MF_STRICT, zero pages would be counted for
276053837fcSNick Piggin 		 * the per node stats, and there would be useless attempts
277053837fcSNick Piggin 		 * to put zero pages on the migration list.
278053837fcSNick Piggin 		 */
279f4598c8bSChristoph Lameter 		if (PageReserved(page))
280f4598c8bSChristoph Lameter 			continue;
2816aab341eSLinus Torvalds 		nid = page_to_nid(page);
28238e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
28338e35860SChristoph Lameter 			continue;
28438e35860SChristoph Lameter 
2851a75a6c8SChristoph Lameter 		if (flags & MPOL_MF_STATS)
286397874dfSChristoph Lameter 			gather_stats(page, private, pte_dirty(*pte));
287053837fcSNick Piggin 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
288fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
289dc9aa5b9SChristoph Lameter 		else
2901da177e4SLinus Torvalds 			break;
29191612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
292705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
29391612e0dSHugh Dickins 	return addr != end;
29491612e0dSHugh Dickins }
29591612e0dSHugh Dickins 
296b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
297dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
298dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
29938e35860SChristoph Lameter 		void *private)
30091612e0dSHugh Dickins {
30191612e0dSHugh Dickins 	pmd_t *pmd;
30291612e0dSHugh Dickins 	unsigned long next;
30391612e0dSHugh Dickins 
30491612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
30591612e0dSHugh Dickins 	do {
30691612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
30791612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
30891612e0dSHugh Dickins 			continue;
309dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
31038e35860SChristoph Lameter 				    flags, private))
31191612e0dSHugh Dickins 			return -EIO;
31291612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
31391612e0dSHugh Dickins 	return 0;
31491612e0dSHugh Dickins }
31591612e0dSHugh Dickins 
316b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
317dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
318dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
31938e35860SChristoph Lameter 		void *private)
32091612e0dSHugh Dickins {
32191612e0dSHugh Dickins 	pud_t *pud;
32291612e0dSHugh Dickins 	unsigned long next;
32391612e0dSHugh Dickins 
32491612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
32591612e0dSHugh Dickins 	do {
32691612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
32791612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
32891612e0dSHugh Dickins 			continue;
329dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
33038e35860SChristoph Lameter 				    flags, private))
33191612e0dSHugh Dickins 			return -EIO;
33291612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
33391612e0dSHugh Dickins 	return 0;
33491612e0dSHugh Dickins }
33591612e0dSHugh Dickins 
336b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
337dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
338dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
33938e35860SChristoph Lameter 		void *private)
34091612e0dSHugh Dickins {
34191612e0dSHugh Dickins 	pgd_t *pgd;
34291612e0dSHugh Dickins 	unsigned long next;
34391612e0dSHugh Dickins 
344b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
34591612e0dSHugh Dickins 	do {
34691612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
34791612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
34891612e0dSHugh Dickins 			continue;
349dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
35038e35860SChristoph Lameter 				    flags, private))
35191612e0dSHugh Dickins 			return -EIO;
35291612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
35391612e0dSHugh Dickins 	return 0;
3541da177e4SLinus Torvalds }
3551da177e4SLinus Torvalds 
356dc9aa5b9SChristoph Lameter /*
357dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
358dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
359dc9aa5b9SChristoph Lameter  * put them on the pagelist.
360dc9aa5b9SChristoph Lameter  */
3611da177e4SLinus Torvalds static struct vm_area_struct *
3621da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
36338e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
3641da177e4SLinus Torvalds {
3651da177e4SLinus Torvalds 	int err;
3661da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
3671da177e4SLinus Torvalds 
36890036ee5SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
36990036ee5SChristoph Lameter 
370b20a3503SChristoph Lameter 		err = migrate_prep();
371b20a3503SChristoph Lameter 		if (err)
372b20a3503SChristoph Lameter 			return ERR_PTR(err);
37390036ee5SChristoph Lameter 	}
374053837fcSNick Piggin 
3751da177e4SLinus Torvalds 	first = find_vma(mm, start);
3761da177e4SLinus Torvalds 	if (!first)
3771da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
3781da177e4SLinus Torvalds 	prev = NULL;
3791da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
380dc9aa5b9SChristoph Lameter 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
3811da177e4SLinus Torvalds 			if (!vma->vm_next && vma->vm_end < end)
3821da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
3831da177e4SLinus Torvalds 			if (prev && prev->vm_end < vma->vm_start)
3841da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
385dc9aa5b9SChristoph Lameter 		}
386dc9aa5b9SChristoph Lameter 		if (!is_vm_hugetlb_page(vma) &&
387dc9aa5b9SChristoph Lameter 		    ((flags & MPOL_MF_STRICT) ||
388dc9aa5b9SChristoph Lameter 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
389dc9aa5b9SChristoph Lameter 				vma_migratable(vma)))) {
3905b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
391dc9aa5b9SChristoph Lameter 
3925b952b3cSAndi Kleen 			if (endvma > end)
3935b952b3cSAndi Kleen 				endvma = end;
3945b952b3cSAndi Kleen 			if (vma->vm_start > start)
3955b952b3cSAndi Kleen 				start = vma->vm_start;
396dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
39738e35860SChristoph Lameter 						flags, private);
3981da177e4SLinus Torvalds 			if (err) {
3991da177e4SLinus Torvalds 				first = ERR_PTR(err);
4001da177e4SLinus Torvalds 				break;
4011da177e4SLinus Torvalds 			}
4021da177e4SLinus Torvalds 		}
4031da177e4SLinus Torvalds 		prev = vma;
4041da177e4SLinus Torvalds 	}
4051da177e4SLinus Torvalds 	return first;
4061da177e4SLinus Torvalds }
4071da177e4SLinus Torvalds 
4081da177e4SLinus Torvalds /* Apply policy to a single VMA */
4091da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
4101da177e4SLinus Torvalds {
4111da177e4SLinus Torvalds 	int err = 0;
4121da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
4131da177e4SLinus Torvalds 
414140d5a49SPaul Mundt 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
4151da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
4161da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
4171da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
4181da177e4SLinus Torvalds 
4191da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
4201da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
4211da177e4SLinus Torvalds 	if (!err) {
4221da177e4SLinus Torvalds 		mpol_get(new);
4231da177e4SLinus Torvalds 		vma->vm_policy = new;
4241da177e4SLinus Torvalds 		mpol_free(old);
4251da177e4SLinus Torvalds 	}
4261da177e4SLinus Torvalds 	return err;
4271da177e4SLinus Torvalds }
4281da177e4SLinus Torvalds 
4291da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
4301da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start,
4311da177e4SLinus Torvalds 		       unsigned long end, struct mempolicy *new)
4321da177e4SLinus Torvalds {
4331da177e4SLinus Torvalds 	struct vm_area_struct *next;
4341da177e4SLinus Torvalds 	int err;
4351da177e4SLinus Torvalds 
4361da177e4SLinus Torvalds 	err = 0;
4371da177e4SLinus Torvalds 	for (; vma && vma->vm_start < end; vma = next) {
4381da177e4SLinus Torvalds 		next = vma->vm_next;
4391da177e4SLinus Torvalds 		if (vma->vm_start < start)
4401da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, start, 1);
4411da177e4SLinus Torvalds 		if (!err && vma->vm_end > end)
4421da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, end, 0);
4431da177e4SLinus Torvalds 		if (!err)
4441da177e4SLinus Torvalds 			err = policy_vma(vma, new);
4451da177e4SLinus Torvalds 		if (err)
4461da177e4SLinus Torvalds 			break;
4471da177e4SLinus Torvalds 	}
4481da177e4SLinus Torvalds 	return err;
4491da177e4SLinus Torvalds }
4501da177e4SLinus Torvalds 
451c61afb18SPaul Jackson /*
452c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
453c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
454c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
455c61afb18SPaul Jackson  *
456c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
457c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
458c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
459c61afb18SPaul Jackson  *
460c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
461c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
462c61afb18SPaul Jackson  *
463c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
464c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
465c61afb18SPaul Jackson  * for use within this file.
466c61afb18SPaul Jackson  */
467c61afb18SPaul Jackson 
468c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
469c61afb18SPaul Jackson {
470c61afb18SPaul Jackson 	if (p->mempolicy)
471c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
472c61afb18SPaul Jackson 	else
473c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
474c61afb18SPaul Jackson }
475c61afb18SPaul Jackson 
476c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
477c61afb18SPaul Jackson {
478c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
479c61afb18SPaul Jackson }
480c61afb18SPaul Jackson 
4811da177e4SLinus Torvalds /* Set the process memory policy */
482dbcb0f19SAdrian Bunk static long do_set_mempolicy(int mode, nodemask_t *nodes)
4831da177e4SLinus Torvalds {
4841da177e4SLinus Torvalds 	struct mempolicy *new;
4851da177e4SLinus Torvalds 
48631f1de46SKOSAKI Motohiro 	if (mpol_check_policy(mode, nodes))
4871da177e4SLinus Torvalds 		return -EINVAL;
4888bccd85fSChristoph Lameter 	new = mpol_new(mode, nodes);
4891da177e4SLinus Torvalds 	if (IS_ERR(new))
4901da177e4SLinus Torvalds 		return PTR_ERR(new);
4911da177e4SLinus Torvalds 	mpol_free(current->mempolicy);
4921da177e4SLinus Torvalds 	current->mempolicy = new;
493c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
4941da177e4SLinus Torvalds 	if (new && new->policy == MPOL_INTERLEAVE)
495dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
4961da177e4SLinus Torvalds 	return 0;
4971da177e4SLinus Torvalds }
4981da177e4SLinus Torvalds 
4991da177e4SLinus Torvalds /* Fill a zone bitmap for a policy */
500dfcd3c0dSAndi Kleen static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
5011da177e4SLinus Torvalds {
5021da177e4SLinus Torvalds 	int i;
5031da177e4SLinus Torvalds 
504dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
5051da177e4SLinus Torvalds 	switch (p->policy) {
5061da177e4SLinus Torvalds 	case MPOL_BIND:
5071da177e4SLinus Torvalds 		for (i = 0; p->v.zonelist->zones[i]; i++)
50889fa3024SChristoph Lameter 			node_set(zone_to_nid(p->v.zonelist->zones[i]),
5098bccd85fSChristoph Lameter 				*nodes);
5101da177e4SLinus Torvalds 		break;
5111da177e4SLinus Torvalds 	case MPOL_DEFAULT:
5121da177e4SLinus Torvalds 		break;
5131da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
514dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
5151da177e4SLinus Torvalds 		break;
5161da177e4SLinus Torvalds 	case MPOL_PREFERRED:
51756bbd65dSChristoph Lameter 		/* or use current node instead of memory_map? */
5181da177e4SLinus Torvalds 		if (p->v.preferred_node < 0)
51956bbd65dSChristoph Lameter 			*nodes = node_states[N_HIGH_MEMORY];
5201da177e4SLinus Torvalds 		else
521dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
5221da177e4SLinus Torvalds 		break;
5231da177e4SLinus Torvalds 	default:
5241da177e4SLinus Torvalds 		BUG();
5251da177e4SLinus Torvalds 	}
5261da177e4SLinus Torvalds }
5271da177e4SLinus Torvalds 
5281da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
5291da177e4SLinus Torvalds {
5301da177e4SLinus Torvalds 	struct page *p;
5311da177e4SLinus Torvalds 	int err;
5321da177e4SLinus Torvalds 
5331da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
5341da177e4SLinus Torvalds 	if (err >= 0) {
5351da177e4SLinus Torvalds 		err = page_to_nid(p);
5361da177e4SLinus Torvalds 		put_page(p);
5371da177e4SLinus Torvalds 	}
5381da177e4SLinus Torvalds 	return err;
5391da177e4SLinus Torvalds }
5401da177e4SLinus Torvalds 
5411da177e4SLinus Torvalds /* Retrieve NUMA policy */
542dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
5431da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
5441da177e4SLinus Torvalds {
5458bccd85fSChristoph Lameter 	int err;
5461da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
5471da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
5481da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
5491da177e4SLinus Torvalds 
550cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
551754af6f5SLee Schermerhorn 	if (flags &
552754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
5531da177e4SLinus Torvalds 		return -EINVAL;
554754af6f5SLee Schermerhorn 
555754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
556754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
557754af6f5SLee Schermerhorn 			return -EINVAL;
558754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
559754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
560754af6f5SLee Schermerhorn 		return 0;
561754af6f5SLee Schermerhorn 	}
562754af6f5SLee Schermerhorn 
5631da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
5641da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
5651da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
5661da177e4SLinus Torvalds 		if (!vma) {
5671da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
5681da177e4SLinus Torvalds 			return -EFAULT;
5691da177e4SLinus Torvalds 		}
5701da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
5711da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
5721da177e4SLinus Torvalds 		else
5731da177e4SLinus Torvalds 			pol = vma->vm_policy;
5741da177e4SLinus Torvalds 	} else if (addr)
5751da177e4SLinus Torvalds 		return -EINVAL;
5761da177e4SLinus Torvalds 
5771da177e4SLinus Torvalds 	if (!pol)
5781da177e4SLinus Torvalds 		pol = &default_policy;
5791da177e4SLinus Torvalds 
5801da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
5811da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
5821da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
5831da177e4SLinus Torvalds 			if (err < 0)
5841da177e4SLinus Torvalds 				goto out;
5858bccd85fSChristoph Lameter 			*policy = err;
5861da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
5871da177e4SLinus Torvalds 				pol->policy == MPOL_INTERLEAVE) {
5888bccd85fSChristoph Lameter 			*policy = current->il_next;
5891da177e4SLinus Torvalds 		} else {
5901da177e4SLinus Torvalds 			err = -EINVAL;
5911da177e4SLinus Torvalds 			goto out;
5921da177e4SLinus Torvalds 		}
5931da177e4SLinus Torvalds 	} else
5948bccd85fSChristoph Lameter 		*policy = pol->policy;
5951da177e4SLinus Torvalds 
5961da177e4SLinus Torvalds 	if (vma) {
5971da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5981da177e4SLinus Torvalds 		vma = NULL;
5991da177e4SLinus Torvalds 	}
6001da177e4SLinus Torvalds 
6011da177e4SLinus Torvalds 	err = 0;
6028bccd85fSChristoph Lameter 	if (nmask)
6038bccd85fSChristoph Lameter 		get_zonemask(pol, nmask);
6041da177e4SLinus Torvalds 
6051da177e4SLinus Torvalds  out:
6061da177e4SLinus Torvalds 	if (vma)
6071da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
6081da177e4SLinus Torvalds 	return err;
6091da177e4SLinus Torvalds }
6101da177e4SLinus Torvalds 
611b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
6128bccd85fSChristoph Lameter /*
6136ce3c4c0SChristoph Lameter  * page migration
6146ce3c4c0SChristoph Lameter  */
615fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
616fc301289SChristoph Lameter 				unsigned long flags)
6176ce3c4c0SChristoph Lameter {
6186ce3c4c0SChristoph Lameter 	/*
619fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
6206ce3c4c0SChristoph Lameter 	 */
621b20a3503SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
622b20a3503SChristoph Lameter 		isolate_lru_page(page, pagelist);
6236ce3c4c0SChristoph Lameter }
6246ce3c4c0SChristoph Lameter 
625742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
62695a402c3SChristoph Lameter {
627769848c0SMel Gorman 	return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
62895a402c3SChristoph Lameter }
62995a402c3SChristoph Lameter 
6306ce3c4c0SChristoph Lameter /*
6317e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
6327e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
6337e2ab150SChristoph Lameter  */
634dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
635dbcb0f19SAdrian Bunk 			   int flags)
6367e2ab150SChristoph Lameter {
6377e2ab150SChristoph Lameter 	nodemask_t nmask;
6387e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
6397e2ab150SChristoph Lameter 	int err = 0;
6407e2ab150SChristoph Lameter 
6417e2ab150SChristoph Lameter 	nodes_clear(nmask);
6427e2ab150SChristoph Lameter 	node_set(source, nmask);
6437e2ab150SChristoph Lameter 
6447e2ab150SChristoph Lameter 	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
6457e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
6467e2ab150SChristoph Lameter 
6477e2ab150SChristoph Lameter 	if (!list_empty(&pagelist))
64895a402c3SChristoph Lameter 		err = migrate_pages(&pagelist, new_node_page, dest);
64995a402c3SChristoph Lameter 
6507e2ab150SChristoph Lameter 	return err;
6517e2ab150SChristoph Lameter }
6527e2ab150SChristoph Lameter 
6537e2ab150SChristoph Lameter /*
6547e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
6557e2ab150SChristoph Lameter  * layout as much as possible.
65639743889SChristoph Lameter  *
65739743889SChristoph Lameter  * Returns the number of page that could not be moved.
65839743889SChristoph Lameter  */
65939743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
66039743889SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
66139743889SChristoph Lameter {
66239743889SChristoph Lameter 	LIST_HEAD(pagelist);
6637e2ab150SChristoph Lameter 	int busy = 0;
6647e2ab150SChristoph Lameter 	int err = 0;
6657e2ab150SChristoph Lameter 	nodemask_t tmp;
66639743889SChristoph Lameter 
66739743889SChristoph Lameter   	down_read(&mm->mmap_sem);
668d4984711SChristoph Lameter 
6697b2259b3SChristoph Lameter 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
6707b2259b3SChristoph Lameter 	if (err)
6717b2259b3SChristoph Lameter 		goto out;
6727b2259b3SChristoph Lameter 
6737e2ab150SChristoph Lameter /*
6747e2ab150SChristoph Lameter  * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
6757e2ab150SChristoph Lameter  * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
6767e2ab150SChristoph Lameter  * bit in 'tmp', and return that <source, dest> pair for migration.
6777e2ab150SChristoph Lameter  * The pair of nodemasks 'to' and 'from' define the map.
6787e2ab150SChristoph Lameter  *
6797e2ab150SChristoph Lameter  * If no pair of bits is found that way, fallback to picking some
6807e2ab150SChristoph Lameter  * pair of 'source' and 'dest' bits that are not the same.  If the
6817e2ab150SChristoph Lameter  * 'source' and 'dest' bits are the same, this represents a node
6827e2ab150SChristoph Lameter  * that will be migrating to itself, so no pages need move.
6837e2ab150SChristoph Lameter  *
6847e2ab150SChristoph Lameter  * If no bits are left in 'tmp', or if all remaining bits left
6857e2ab150SChristoph Lameter  * in 'tmp' correspond to the same bit in 'to', return false
6867e2ab150SChristoph Lameter  * (nothing left to migrate).
6877e2ab150SChristoph Lameter  *
6887e2ab150SChristoph Lameter  * This lets us pick a pair of nodes to migrate between, such that
6897e2ab150SChristoph Lameter  * if possible the dest node is not already occupied by some other
6907e2ab150SChristoph Lameter  * source node, minimizing the risk of overloading the memory on a
6917e2ab150SChristoph Lameter  * node that would happen if we migrated incoming memory to a node
6927e2ab150SChristoph Lameter  * before migrating outgoing memory source that same node.
6937e2ab150SChristoph Lameter  *
6947e2ab150SChristoph Lameter  * A single scan of tmp is sufficient.  As we go, we remember the
6957e2ab150SChristoph Lameter  * most recent <s, d> pair that moved (s != d).  If we find a pair
6967e2ab150SChristoph Lameter  * that not only moved, but what's better, moved to an empty slot
6977e2ab150SChristoph Lameter  * (d is not set in tmp), then we break out then, with that pair.
6987e2ab150SChristoph Lameter  * Otherwise when we finish scannng from_tmp, we at least have the
6997e2ab150SChristoph Lameter  * most recent <s, d> pair that moved.  If we get all the way through
7007e2ab150SChristoph Lameter  * the scan of tmp without finding any node that moved, much less
7017e2ab150SChristoph Lameter  * moved to an empty node, then there is nothing left worth migrating.
7027e2ab150SChristoph Lameter  */
7037e2ab150SChristoph Lameter 
7047e2ab150SChristoph Lameter 	tmp = *from_nodes;
7057e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
7067e2ab150SChristoph Lameter 		int s,d;
7077e2ab150SChristoph Lameter 		int source = -1;
7087e2ab150SChristoph Lameter 		int dest = 0;
7097e2ab150SChristoph Lameter 
7107e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
7117e2ab150SChristoph Lameter 			d = node_remap(s, *from_nodes, *to_nodes);
7127e2ab150SChristoph Lameter 			if (s == d)
7137e2ab150SChristoph Lameter 				continue;
7147e2ab150SChristoph Lameter 
7157e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
7167e2ab150SChristoph Lameter 			dest = d;
7177e2ab150SChristoph Lameter 
7187e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
7197e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
7207e2ab150SChristoph Lameter 				break;
7217e2ab150SChristoph Lameter 		}
7227e2ab150SChristoph Lameter 		if (source == -1)
7237e2ab150SChristoph Lameter 			break;
7247e2ab150SChristoph Lameter 
7257e2ab150SChristoph Lameter 		node_clear(source, tmp);
7267e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
7277e2ab150SChristoph Lameter 		if (err > 0)
7287e2ab150SChristoph Lameter 			busy += err;
7297e2ab150SChristoph Lameter 		if (err < 0)
7307e2ab150SChristoph Lameter 			break;
73139743889SChristoph Lameter 	}
7327b2259b3SChristoph Lameter out:
73339743889SChristoph Lameter 	up_read(&mm->mmap_sem);
7347e2ab150SChristoph Lameter 	if (err < 0)
7357e2ab150SChristoph Lameter 		return err;
7367e2ab150SChristoph Lameter 	return busy;
737b20a3503SChristoph Lameter 
73839743889SChristoph Lameter }
73939743889SChristoph Lameter 
7403ad33b24SLee Schermerhorn /*
7413ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
7423ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
7433ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
7443ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
7453ad33b24SLee Schermerhorn  * is in virtual address order.
7463ad33b24SLee Schermerhorn  */
747742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
74895a402c3SChristoph Lameter {
74995a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
7503ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
75195a402c3SChristoph Lameter 
7523ad33b24SLee Schermerhorn 	while (vma) {
7533ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
7543ad33b24SLee Schermerhorn 		if (address != -EFAULT)
7553ad33b24SLee Schermerhorn 			break;
7563ad33b24SLee Schermerhorn 		vma = vma->vm_next;
7573ad33b24SLee Schermerhorn 	}
7583ad33b24SLee Schermerhorn 
7593ad33b24SLee Schermerhorn 	/*
7603ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
7613ad33b24SLee Schermerhorn 	 */
7623ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
76395a402c3SChristoph Lameter }
764b20a3503SChristoph Lameter #else
765b20a3503SChristoph Lameter 
766b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
767b20a3503SChristoph Lameter 				unsigned long flags)
768b20a3503SChristoph Lameter {
769b20a3503SChristoph Lameter }
770b20a3503SChristoph Lameter 
771b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
772b20a3503SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
773b20a3503SChristoph Lameter {
774b20a3503SChristoph Lameter 	return -ENOSYS;
775b20a3503SChristoph Lameter }
77695a402c3SChristoph Lameter 
77769939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
77895a402c3SChristoph Lameter {
77995a402c3SChristoph Lameter 	return NULL;
78095a402c3SChristoph Lameter }
781b20a3503SChristoph Lameter #endif
782b20a3503SChristoph Lameter 
783dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
784dbcb0f19SAdrian Bunk 		     unsigned long mode, nodemask_t *nmask,
785dbcb0f19SAdrian Bunk 		     unsigned long flags)
7866ce3c4c0SChristoph Lameter {
7876ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
7886ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
7896ce3c4c0SChristoph Lameter 	struct mempolicy *new;
7906ce3c4c0SChristoph Lameter 	unsigned long end;
7916ce3c4c0SChristoph Lameter 	int err;
7926ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
7936ce3c4c0SChristoph Lameter 
7946ce3c4c0SChristoph Lameter 	if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
7956ce3c4c0SChristoph Lameter 				      MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
7966ce3c4c0SChristoph Lameter 	    || mode > MPOL_MAX)
7976ce3c4c0SChristoph Lameter 		return -EINVAL;
79874c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
7996ce3c4c0SChristoph Lameter 		return -EPERM;
8006ce3c4c0SChristoph Lameter 
8016ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
8026ce3c4c0SChristoph Lameter 		return -EINVAL;
8036ce3c4c0SChristoph Lameter 
8046ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
8056ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
8066ce3c4c0SChristoph Lameter 
8076ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
8086ce3c4c0SChristoph Lameter 	end = start + len;
8096ce3c4c0SChristoph Lameter 
8106ce3c4c0SChristoph Lameter 	if (end < start)
8116ce3c4c0SChristoph Lameter 		return -EINVAL;
8126ce3c4c0SChristoph Lameter 	if (end == start)
8136ce3c4c0SChristoph Lameter 		return 0;
8146ce3c4c0SChristoph Lameter 
8156ce3c4c0SChristoph Lameter 	if (mpol_check_policy(mode, nmask))
8166ce3c4c0SChristoph Lameter 		return -EINVAL;
8176ce3c4c0SChristoph Lameter 
8186ce3c4c0SChristoph Lameter 	new = mpol_new(mode, nmask);
8196ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
8206ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
8216ce3c4c0SChristoph Lameter 
8226ce3c4c0SChristoph Lameter 	/*
8236ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
8246ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
8256ce3c4c0SChristoph Lameter 	 */
8266ce3c4c0SChristoph Lameter 	if (!new)
8276ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
8286ce3c4c0SChristoph Lameter 
829140d5a49SPaul Mundt 	pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
830140d5a49SPaul Mundt 		 mode, nmask ? nodes_addr(*nmask)[0] : -1);
8316ce3c4c0SChristoph Lameter 
8326ce3c4c0SChristoph Lameter 	down_write(&mm->mmap_sem);
8336ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
8346ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
8356ce3c4c0SChristoph Lameter 
8366ce3c4c0SChristoph Lameter 	err = PTR_ERR(vma);
8376ce3c4c0SChristoph Lameter 	if (!IS_ERR(vma)) {
8386ce3c4c0SChristoph Lameter 		int nr_failed = 0;
8396ce3c4c0SChristoph Lameter 
8406ce3c4c0SChristoph Lameter 		err = mbind_range(vma, start, end, new);
8417e2ab150SChristoph Lameter 
8426ce3c4c0SChristoph Lameter 		if (!list_empty(&pagelist))
84395a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
84495a402c3SChristoph Lameter 						(unsigned long)vma);
8456ce3c4c0SChristoph Lameter 
8466ce3c4c0SChristoph Lameter 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
8476ce3c4c0SChristoph Lameter 			err = -EIO;
8486ce3c4c0SChristoph Lameter 	}
849b20a3503SChristoph Lameter 
8506ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
8516ce3c4c0SChristoph Lameter 	mpol_free(new);
8526ce3c4c0SChristoph Lameter 	return err;
8536ce3c4c0SChristoph Lameter }
8546ce3c4c0SChristoph Lameter 
85539743889SChristoph Lameter /*
8568bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
8578bccd85fSChristoph Lameter  */
8588bccd85fSChristoph Lameter 
8598bccd85fSChristoph Lameter /* Copy a node mask from user space. */
86039743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8618bccd85fSChristoph Lameter 		     unsigned long maxnode)
8628bccd85fSChristoph Lameter {
8638bccd85fSChristoph Lameter 	unsigned long k;
8648bccd85fSChristoph Lameter 	unsigned long nlongs;
8658bccd85fSChristoph Lameter 	unsigned long endmask;
8668bccd85fSChristoph Lameter 
8678bccd85fSChristoph Lameter 	--maxnode;
8688bccd85fSChristoph Lameter 	nodes_clear(*nodes);
8698bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
8708bccd85fSChristoph Lameter 		return 0;
871a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
872636f13c1SChris Wright 		return -EINVAL;
8738bccd85fSChristoph Lameter 
8748bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
8758bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
8768bccd85fSChristoph Lameter 		endmask = ~0UL;
8778bccd85fSChristoph Lameter 	else
8788bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
8798bccd85fSChristoph Lameter 
8808bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
8818bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
8828bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8838bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
8848bccd85fSChristoph Lameter 			return -EINVAL;
8858bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8868bccd85fSChristoph Lameter 			unsigned long t;
8878bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
8888bccd85fSChristoph Lameter 				return -EFAULT;
8898bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
8908bccd85fSChristoph Lameter 				if (t & endmask)
8918bccd85fSChristoph Lameter 					return -EINVAL;
8928bccd85fSChristoph Lameter 			} else if (t)
8938bccd85fSChristoph Lameter 				return -EINVAL;
8948bccd85fSChristoph Lameter 		}
8958bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
8968bccd85fSChristoph Lameter 		endmask = ~0UL;
8978bccd85fSChristoph Lameter 	}
8988bccd85fSChristoph Lameter 
8998bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
9008bccd85fSChristoph Lameter 		return -EFAULT;
9018bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
9028bccd85fSChristoph Lameter 	return 0;
9038bccd85fSChristoph Lameter }
9048bccd85fSChristoph Lameter 
9058bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
9068bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
9078bccd85fSChristoph Lameter 			      nodemask_t *nodes)
9088bccd85fSChristoph Lameter {
9098bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
9108bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
9118bccd85fSChristoph Lameter 
9128bccd85fSChristoph Lameter 	if (copy > nbytes) {
9138bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
9148bccd85fSChristoph Lameter 			return -EINVAL;
9158bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
9168bccd85fSChristoph Lameter 			return -EFAULT;
9178bccd85fSChristoph Lameter 		copy = nbytes;
9188bccd85fSChristoph Lameter 	}
9198bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
9208bccd85fSChristoph Lameter }
9218bccd85fSChristoph Lameter 
9228bccd85fSChristoph Lameter asmlinkage long sys_mbind(unsigned long start, unsigned long len,
9238bccd85fSChristoph Lameter 			unsigned long mode,
9248bccd85fSChristoph Lameter 			unsigned long __user *nmask, unsigned long maxnode,
9258bccd85fSChristoph Lameter 			unsigned flags)
9268bccd85fSChristoph Lameter {
9278bccd85fSChristoph Lameter 	nodemask_t nodes;
9288bccd85fSChristoph Lameter 	int err;
9298bccd85fSChristoph Lameter 
9308bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
9318bccd85fSChristoph Lameter 	if (err)
9328bccd85fSChristoph Lameter 		return err;
9338bccd85fSChristoph Lameter 	return do_mbind(start, len, mode, &nodes, flags);
9348bccd85fSChristoph Lameter }
9358bccd85fSChristoph Lameter 
9368bccd85fSChristoph Lameter /* Set the process memory policy */
9378bccd85fSChristoph Lameter asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
9388bccd85fSChristoph Lameter 		unsigned long maxnode)
9398bccd85fSChristoph Lameter {
9408bccd85fSChristoph Lameter 	int err;
9418bccd85fSChristoph Lameter 	nodemask_t nodes;
9428bccd85fSChristoph Lameter 
9438bccd85fSChristoph Lameter 	if (mode < 0 || mode > MPOL_MAX)
9448bccd85fSChristoph Lameter 		return -EINVAL;
9458bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
9468bccd85fSChristoph Lameter 	if (err)
9478bccd85fSChristoph Lameter 		return err;
9488bccd85fSChristoph Lameter 	return do_set_mempolicy(mode, &nodes);
9498bccd85fSChristoph Lameter }
9508bccd85fSChristoph Lameter 
95139743889SChristoph Lameter asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
95239743889SChristoph Lameter 		const unsigned long __user *old_nodes,
95339743889SChristoph Lameter 		const unsigned long __user *new_nodes)
95439743889SChristoph Lameter {
95539743889SChristoph Lameter 	struct mm_struct *mm;
95639743889SChristoph Lameter 	struct task_struct *task;
95739743889SChristoph Lameter 	nodemask_t old;
95839743889SChristoph Lameter 	nodemask_t new;
95939743889SChristoph Lameter 	nodemask_t task_nodes;
96039743889SChristoph Lameter 	int err;
96139743889SChristoph Lameter 
96239743889SChristoph Lameter 	err = get_nodes(&old, old_nodes, maxnode);
96339743889SChristoph Lameter 	if (err)
96439743889SChristoph Lameter 		return err;
96539743889SChristoph Lameter 
96639743889SChristoph Lameter 	err = get_nodes(&new, new_nodes, maxnode);
96739743889SChristoph Lameter 	if (err)
96839743889SChristoph Lameter 		return err;
96939743889SChristoph Lameter 
97039743889SChristoph Lameter 	/* Find the mm_struct */
97139743889SChristoph Lameter 	read_lock(&tasklist_lock);
972228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
97339743889SChristoph Lameter 	if (!task) {
97439743889SChristoph Lameter 		read_unlock(&tasklist_lock);
97539743889SChristoph Lameter 		return -ESRCH;
97639743889SChristoph Lameter 	}
97739743889SChristoph Lameter 	mm = get_task_mm(task);
97839743889SChristoph Lameter 	read_unlock(&tasklist_lock);
97939743889SChristoph Lameter 
98039743889SChristoph Lameter 	if (!mm)
98139743889SChristoph Lameter 		return -EINVAL;
98239743889SChristoph Lameter 
98339743889SChristoph Lameter 	/*
98439743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
98539743889SChristoph Lameter 	 * process. The right exists if the process has administrative
9867f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
98739743889SChristoph Lameter 	 * userid as the target process.
98839743889SChristoph Lameter 	 */
98939743889SChristoph Lameter 	if ((current->euid != task->suid) && (current->euid != task->uid) &&
99039743889SChristoph Lameter 	    (current->uid != task->suid) && (current->uid != task->uid) &&
99174c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
99239743889SChristoph Lameter 		err = -EPERM;
99339743889SChristoph Lameter 		goto out;
99439743889SChristoph Lameter 	}
99539743889SChristoph Lameter 
99639743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
99739743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
99874c00241SChristoph Lameter 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
99939743889SChristoph Lameter 		err = -EPERM;
100039743889SChristoph Lameter 		goto out;
100139743889SChristoph Lameter 	}
100239743889SChristoph Lameter 
100337b07e41SLee Schermerhorn 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
10043b42d28bSChristoph Lameter 		err = -EINVAL;
10053b42d28bSChristoph Lameter 		goto out;
10063b42d28bSChristoph Lameter 	}
10073b42d28bSChristoph Lameter 
100886c3a764SDavid Quigley 	err = security_task_movememory(task);
100986c3a764SDavid Quigley 	if (err)
101086c3a764SDavid Quigley 		goto out;
101186c3a764SDavid Quigley 
1012511030bcSChristoph Lameter 	err = do_migrate_pages(mm, &old, &new,
101374c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
101439743889SChristoph Lameter out:
101539743889SChristoph Lameter 	mmput(mm);
101639743889SChristoph Lameter 	return err;
101739743889SChristoph Lameter }
101839743889SChristoph Lameter 
101939743889SChristoph Lameter 
10208bccd85fSChristoph Lameter /* Retrieve NUMA policy */
10218bccd85fSChristoph Lameter asmlinkage long sys_get_mempolicy(int __user *policy,
10228bccd85fSChristoph Lameter 				unsigned long __user *nmask,
10238bccd85fSChristoph Lameter 				unsigned long maxnode,
10248bccd85fSChristoph Lameter 				unsigned long addr, unsigned long flags)
10258bccd85fSChristoph Lameter {
1026dbcb0f19SAdrian Bunk 	int err;
1027dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
10288bccd85fSChristoph Lameter 	nodemask_t nodes;
10298bccd85fSChristoph Lameter 
10308bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
10318bccd85fSChristoph Lameter 		return -EINVAL;
10328bccd85fSChristoph Lameter 
10338bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
10348bccd85fSChristoph Lameter 
10358bccd85fSChristoph Lameter 	if (err)
10368bccd85fSChristoph Lameter 		return err;
10378bccd85fSChristoph Lameter 
10388bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
10398bccd85fSChristoph Lameter 		return -EFAULT;
10408bccd85fSChristoph Lameter 
10418bccd85fSChristoph Lameter 	if (nmask)
10428bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
10438bccd85fSChristoph Lameter 
10448bccd85fSChristoph Lameter 	return err;
10458bccd85fSChristoph Lameter }
10468bccd85fSChristoph Lameter 
10471da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
10481da177e4SLinus Torvalds 
10491da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
10501da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
10511da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
10521da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
10531da177e4SLinus Torvalds {
10541da177e4SLinus Torvalds 	long err;
10551da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10561da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10571da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10581da177e4SLinus Torvalds 
10591da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10601da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10611da177e4SLinus Torvalds 
10621da177e4SLinus Torvalds 	if (nmask)
10631da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10641da177e4SLinus Torvalds 
10651da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
10661da177e4SLinus Torvalds 
10671da177e4SLinus Torvalds 	if (!err && nmask) {
10681da177e4SLinus Torvalds 		err = copy_from_user(bm, nm, alloc_size);
10691da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
10701da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
10711da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
10721da177e4SLinus Torvalds 	}
10731da177e4SLinus Torvalds 
10741da177e4SLinus Torvalds 	return err;
10751da177e4SLinus Torvalds }
10761da177e4SLinus Torvalds 
10771da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
10781da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
10791da177e4SLinus Torvalds {
10801da177e4SLinus Torvalds 	long err = 0;
10811da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10821da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10831da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10841da177e4SLinus Torvalds 
10851da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10861da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10871da177e4SLinus Torvalds 
10881da177e4SLinus Torvalds 	if (nmask) {
10891da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
10901da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10911da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
10921da177e4SLinus Torvalds 	}
10931da177e4SLinus Torvalds 
10941da177e4SLinus Torvalds 	if (err)
10951da177e4SLinus Torvalds 		return -EFAULT;
10961da177e4SLinus Torvalds 
10971da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
10981da177e4SLinus Torvalds }
10991da177e4SLinus Torvalds 
11001da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
11011da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
11021da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
11031da177e4SLinus Torvalds {
11041da177e4SLinus Torvalds 	long err = 0;
11051da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
11061da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1107dfcd3c0dSAndi Kleen 	nodemask_t bm;
11081da177e4SLinus Torvalds 
11091da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
11101da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
11111da177e4SLinus Torvalds 
11121da177e4SLinus Torvalds 	if (nmask) {
1113dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
11141da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1115dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
11161da177e4SLinus Torvalds 	}
11171da177e4SLinus Torvalds 
11181da177e4SLinus Torvalds 	if (err)
11191da177e4SLinus Torvalds 		return -EFAULT;
11201da177e4SLinus Torvalds 
11211da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
11221da177e4SLinus Torvalds }
11231da177e4SLinus Torvalds 
11241da177e4SLinus Torvalds #endif
11251da177e4SLinus Torvalds 
1126480eccf9SLee Schermerhorn /*
1127480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1128480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1129480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1130480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1131480eccf9SLee Schermerhorn  *
1132480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1133480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
1134480eccf9SLee Schermerhorn  * Returned policy has extra reference count if shared, vma,
1135480eccf9SLee Schermerhorn  * or some other task's policy [show_numa_maps() can pass
1136480eccf9SLee Schermerhorn  * @task != current].  It is the caller's responsibility to
1137480eccf9SLee Schermerhorn  * free the reference in these cases.
1138480eccf9SLee Schermerhorn  */
113948fce342SChristoph Lameter static struct mempolicy * get_vma_policy(struct task_struct *task,
114048fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
11411da177e4SLinus Torvalds {
11426e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
1143480eccf9SLee Schermerhorn 	int shared_pol = 0;
11441da177e4SLinus Torvalds 
11451da177e4SLinus Torvalds 	if (vma) {
1146480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
11471da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
1148480eccf9SLee Schermerhorn 			shared_pol = 1;	/* if pol non-NULL, add ref below */
1149480eccf9SLee Schermerhorn 		} else if (vma->vm_policy &&
11501da177e4SLinus Torvalds 				vma->vm_policy->policy != MPOL_DEFAULT)
11511da177e4SLinus Torvalds 			pol = vma->vm_policy;
11521da177e4SLinus Torvalds 	}
11531da177e4SLinus Torvalds 	if (!pol)
11541da177e4SLinus Torvalds 		pol = &default_policy;
1155480eccf9SLee Schermerhorn 	else if (!shared_pol && pol != current->mempolicy)
1156480eccf9SLee Schermerhorn 		mpol_get(pol);	/* vma or other task's policy */
11571da177e4SLinus Torvalds 	return pol;
11581da177e4SLinus Torvalds }
11591da177e4SLinus Torvalds 
11601da177e4SLinus Torvalds /* Return a zonelist representing a mempolicy */
1161dd0fc66fSAl Viro static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
11621da177e4SLinus Torvalds {
11631da177e4SLinus Torvalds 	int nd;
11641da177e4SLinus Torvalds 
11651da177e4SLinus Torvalds 	switch (policy->policy) {
11661da177e4SLinus Torvalds 	case MPOL_PREFERRED:
11671da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
11681da177e4SLinus Torvalds 		if (nd < 0)
11691da177e4SLinus Torvalds 			nd = numa_node_id();
11701da177e4SLinus Torvalds 		break;
11711da177e4SLinus Torvalds 	case MPOL_BIND:
11721da177e4SLinus Torvalds 		/* Lower zones don't get a policy applied */
11731da177e4SLinus Torvalds 		/* Careful: current->mems_allowed might have moved */
117419655d34SChristoph Lameter 		if (gfp_zone(gfp) >= policy_zone)
11751da177e4SLinus Torvalds 			if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
11761da177e4SLinus Torvalds 				return policy->v.zonelist;
11771da177e4SLinus Torvalds 		/*FALL THROUGH*/
11781da177e4SLinus Torvalds 	case MPOL_INTERLEAVE: /* should not happen */
11791da177e4SLinus Torvalds 	case MPOL_DEFAULT:
11801da177e4SLinus Torvalds 		nd = numa_node_id();
11811da177e4SLinus Torvalds 		break;
11821da177e4SLinus Torvalds 	default:
11831da177e4SLinus Torvalds 		nd = 0;
11841da177e4SLinus Torvalds 		BUG();
11851da177e4SLinus Torvalds 	}
1186af4ca457SAl Viro 	return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
11871da177e4SLinus Torvalds }
11881da177e4SLinus Torvalds 
11891da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
11901da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
11911da177e4SLinus Torvalds {
11921da177e4SLinus Torvalds 	unsigned nid, next;
11931da177e4SLinus Torvalds 	struct task_struct *me = current;
11941da177e4SLinus Torvalds 
11951da177e4SLinus Torvalds 	nid = me->il_next;
1196dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
11971da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1198dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
11991da177e4SLinus Torvalds 	me->il_next = next;
12001da177e4SLinus Torvalds 	return nid;
12011da177e4SLinus Torvalds }
12021da177e4SLinus Torvalds 
1203dc85da15SChristoph Lameter /*
1204dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1205dc85da15SChristoph Lameter  * next slab entry.
1206dc85da15SChristoph Lameter  */
1207dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy)
1208dc85da15SChristoph Lameter {
1209765c4507SChristoph Lameter 	int pol = policy ? policy->policy : MPOL_DEFAULT;
1210765c4507SChristoph Lameter 
1211765c4507SChristoph Lameter 	switch (pol) {
1212dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1213dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1214dc85da15SChristoph Lameter 
1215dc85da15SChristoph Lameter 	case MPOL_BIND:
1216dc85da15SChristoph Lameter 		/*
1217dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1218dc85da15SChristoph Lameter 		 * first node.
1219dc85da15SChristoph Lameter 		 */
122089fa3024SChristoph Lameter 		return zone_to_nid(policy->v.zonelist->zones[0]);
1221dc85da15SChristoph Lameter 
1222dc85da15SChristoph Lameter 	case MPOL_PREFERRED:
1223dc85da15SChristoph Lameter 		if (policy->v.preferred_node >= 0)
1224dc85da15SChristoph Lameter 			return policy->v.preferred_node;
1225dc85da15SChristoph Lameter 		/* Fall through */
1226dc85da15SChristoph Lameter 
1227dc85da15SChristoph Lameter 	default:
1228dc85da15SChristoph Lameter 		return numa_node_id();
1229dc85da15SChristoph Lameter 	}
1230dc85da15SChristoph Lameter }
1231dc85da15SChristoph Lameter 
12321da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
12331da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
12341da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
12351da177e4SLinus Torvalds {
1236dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
12371da177e4SLinus Torvalds 	unsigned target = (unsigned)off % nnodes;
12381da177e4SLinus Torvalds 	int c;
12391da177e4SLinus Torvalds 	int nid = -1;
12401da177e4SLinus Torvalds 
12411da177e4SLinus Torvalds 	c = 0;
12421da177e4SLinus Torvalds 	do {
1243dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
12441da177e4SLinus Torvalds 		c++;
12451da177e4SLinus Torvalds 	} while (c <= target);
12461da177e4SLinus Torvalds 	return nid;
12471da177e4SLinus Torvalds }
12481da177e4SLinus Torvalds 
12495da7ca86SChristoph Lameter /* Determine a node number for interleave */
12505da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
12515da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
12525da7ca86SChristoph Lameter {
12535da7ca86SChristoph Lameter 	if (vma) {
12545da7ca86SChristoph Lameter 		unsigned long off;
12555da7ca86SChristoph Lameter 
12563b98b087SNishanth Aravamudan 		/*
12573b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
12583b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
12593b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
12603b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
12613b98b087SNishanth Aravamudan 		 * a useful offset.
12623b98b087SNishanth Aravamudan 		 */
12633b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
12643b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
12655da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
12665da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
12675da7ca86SChristoph Lameter 	} else
12685da7ca86SChristoph Lameter 		return interleave_nodes(pol);
12695da7ca86SChristoph Lameter }
12705da7ca86SChristoph Lameter 
127100ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1272480eccf9SLee Schermerhorn /*
1273480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1274480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1275480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1276480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
1277480eccf9SLee Schermerhorn  * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
1278480eccf9SLee Schermerhorn  *
1279480eccf9SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation.
1280480eccf9SLee Schermerhorn  * If the effective policy is 'BIND, returns pointer to policy's zonelist.
1281480eccf9SLee Schermerhorn  * If it is also a policy for which get_vma_policy() returns an extra
1282480eccf9SLee Schermerhorn  * reference, we must hold that reference until after allocation.
1283480eccf9SLee Schermerhorn  * In that case, return policy via @mpol so hugetlb allocation can drop
1284480eccf9SLee Schermerhorn  * the reference.  For non-'BIND referenced policies, we can/do drop the
1285480eccf9SLee Schermerhorn  * reference here, so the caller doesn't need to know about the special case
1286480eccf9SLee Schermerhorn  * for default and current task policy.
1287480eccf9SLee Schermerhorn  */
1288396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1289480eccf9SLee Schermerhorn 				gfp_t gfp_flags, struct mempolicy **mpol)
12905da7ca86SChristoph Lameter {
12915da7ca86SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1292480eccf9SLee Schermerhorn 	struct zonelist *zl;
12935da7ca86SChristoph Lameter 
1294480eccf9SLee Schermerhorn 	*mpol = NULL;		/* probably no unref needed */
12955da7ca86SChristoph Lameter 	if (pol->policy == MPOL_INTERLEAVE) {
12965da7ca86SChristoph Lameter 		unsigned nid;
12975da7ca86SChristoph Lameter 
12985da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1299480eccf9SLee Schermerhorn 		__mpol_free(pol);		/* finished with pol */
1300396faf03SMel Gorman 		return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
13015da7ca86SChristoph Lameter 	}
1302480eccf9SLee Schermerhorn 
1303480eccf9SLee Schermerhorn 	zl = zonelist_policy(GFP_HIGHUSER, pol);
1304480eccf9SLee Schermerhorn 	if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1305480eccf9SLee Schermerhorn 		if (pol->policy != MPOL_BIND)
1306480eccf9SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
1307480eccf9SLee Schermerhorn 		else
1308480eccf9SLee Schermerhorn 			*mpol = pol;	/* unref needed after allocation */
1309480eccf9SLee Schermerhorn 	}
1310480eccf9SLee Schermerhorn 	return zl;
13115da7ca86SChristoph Lameter }
131200ac59adSChen, Kenneth W #endif
13135da7ca86SChristoph Lameter 
13141da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
13151da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1316662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1317662f3a0bSAndi Kleen 					unsigned nid)
13181da177e4SLinus Torvalds {
13191da177e4SLinus Torvalds 	struct zonelist *zl;
13201da177e4SLinus Torvalds 	struct page *page;
13211da177e4SLinus Torvalds 
1322af4ca457SAl Viro 	zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
13231da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1324ca889e6cSChristoph Lameter 	if (page && page_zone(page) == zl->zones[0])
1325ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
13261da177e4SLinus Torvalds 	return page;
13271da177e4SLinus Torvalds }
13281da177e4SLinus Torvalds 
13291da177e4SLinus Torvalds /**
13301da177e4SLinus Torvalds  * 	alloc_page_vma	- Allocate a page for a VMA.
13311da177e4SLinus Torvalds  *
13321da177e4SLinus Torvalds  * 	@gfp:
13331da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
13341da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
13351da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
13361da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
13371da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
13381da177e4SLinus Torvalds  *
13391da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
13401da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
13411da177e4SLinus Torvalds  *
13421da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
13431da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
13441da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
13451da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
13461da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
13471da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
13481da177e4SLinus Torvalds  *
13491da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
13501da177e4SLinus Torvalds  */
13511da177e4SLinus Torvalds struct page *
1352dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
13531da177e4SLinus Torvalds {
13546e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1355480eccf9SLee Schermerhorn 	struct zonelist *zl;
13561da177e4SLinus Torvalds 
1357cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
13581da177e4SLinus Torvalds 
13591da177e4SLinus Torvalds 	if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
13601da177e4SLinus Torvalds 		unsigned nid;
13615da7ca86SChristoph Lameter 
13625da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
13631da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, 0, nid);
13641da177e4SLinus Torvalds 	}
1365480eccf9SLee Schermerhorn 	zl = zonelist_policy(gfp, pol);
1366480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy) {
1367480eccf9SLee Schermerhorn 		/*
1368480eccf9SLee Schermerhorn 		 * slow path: ref counted policy -- shared or vma
1369480eccf9SLee Schermerhorn 		 */
1370480eccf9SLee Schermerhorn 		struct page *page =  __alloc_pages(gfp, 0, zl);
1371480eccf9SLee Schermerhorn 		__mpol_free(pol);
1372480eccf9SLee Schermerhorn 		return page;
1373480eccf9SLee Schermerhorn 	}
1374480eccf9SLee Schermerhorn 	/*
1375480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
1376480eccf9SLee Schermerhorn 	 */
1377480eccf9SLee Schermerhorn 	return __alloc_pages(gfp, 0, zl);
13781da177e4SLinus Torvalds }
13791da177e4SLinus Torvalds 
13801da177e4SLinus Torvalds /**
13811da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
13821da177e4SLinus Torvalds  *
13831da177e4SLinus Torvalds  *	@gfp:
13841da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
13851da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
13861da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
13871da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
13881da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
13891da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
13901da177e4SLinus Torvalds  *
13911da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
13921da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
13931da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
13941da177e4SLinus Torvalds  *
1395cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
13961da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
13971da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
13981da177e4SLinus Torvalds  */
1399dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
14001da177e4SLinus Torvalds {
14011da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
14021da177e4SLinus Torvalds 
14031da177e4SLinus Torvalds 	if ((gfp & __GFP_WAIT) && !in_interrupt())
1404cf2a473cSPaul Jackson 		cpuset_update_task_memory_state();
14059b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
14061da177e4SLinus Torvalds 		pol = &default_policy;
14071da177e4SLinus Torvalds 	if (pol->policy == MPOL_INTERLEAVE)
14081da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
14091da177e4SLinus Torvalds 	return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
14101da177e4SLinus Torvalds }
14111da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
14121da177e4SLinus Torvalds 
14134225399aSPaul Jackson /*
14144225399aSPaul Jackson  * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
14154225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
14164225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
14174225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
14184225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
14194225399aSPaul Jackson  */
14204225399aSPaul Jackson 
14211da177e4SLinus Torvalds /* Slow path of a mempolicy copy */
14221da177e4SLinus Torvalds struct mempolicy *__mpol_copy(struct mempolicy *old)
14231da177e4SLinus Torvalds {
14241da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
14251da177e4SLinus Torvalds 
14261da177e4SLinus Torvalds 	if (!new)
14271da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
14284225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
14294225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
14304225399aSPaul Jackson 		mpol_rebind_policy(old, &mems);
14314225399aSPaul Jackson 	}
14321da177e4SLinus Torvalds 	*new = *old;
14331da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
14341da177e4SLinus Torvalds 	if (new->policy == MPOL_BIND) {
14351da177e4SLinus Torvalds 		int sz = ksize(old->v.zonelist);
1436e94b1766SChristoph Lameter 		new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
14371da177e4SLinus Torvalds 		if (!new->v.zonelist) {
14381da177e4SLinus Torvalds 			kmem_cache_free(policy_cache, new);
14391da177e4SLinus Torvalds 			return ERR_PTR(-ENOMEM);
14401da177e4SLinus Torvalds 		}
14411da177e4SLinus Torvalds 	}
14421da177e4SLinus Torvalds 	return new;
14431da177e4SLinus Torvalds }
14441da177e4SLinus Torvalds 
14451da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
14461da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
14471da177e4SLinus Torvalds {
14481da177e4SLinus Torvalds 	if (!a || !b)
14491da177e4SLinus Torvalds 		return 0;
14501da177e4SLinus Torvalds 	if (a->policy != b->policy)
14511da177e4SLinus Torvalds 		return 0;
14521da177e4SLinus Torvalds 	switch (a->policy) {
14531da177e4SLinus Torvalds 	case MPOL_DEFAULT:
14541da177e4SLinus Torvalds 		return 1;
14551da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1456dfcd3c0dSAndi Kleen 		return nodes_equal(a->v.nodes, b->v.nodes);
14571da177e4SLinus Torvalds 	case MPOL_PREFERRED:
14581da177e4SLinus Torvalds 		return a->v.preferred_node == b->v.preferred_node;
14591da177e4SLinus Torvalds 	case MPOL_BIND: {
14601da177e4SLinus Torvalds 		int i;
14611da177e4SLinus Torvalds 		for (i = 0; a->v.zonelist->zones[i]; i++)
14621da177e4SLinus Torvalds 			if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
14631da177e4SLinus Torvalds 				return 0;
14641da177e4SLinus Torvalds 		return b->v.zonelist->zones[i] == NULL;
14651da177e4SLinus Torvalds 	}
14661da177e4SLinus Torvalds 	default:
14671da177e4SLinus Torvalds 		BUG();
14681da177e4SLinus Torvalds 		return 0;
14691da177e4SLinus Torvalds 	}
14701da177e4SLinus Torvalds }
14711da177e4SLinus Torvalds 
14721da177e4SLinus Torvalds /* Slow path of a mpol destructor. */
14731da177e4SLinus Torvalds void __mpol_free(struct mempolicy *p)
14741da177e4SLinus Torvalds {
14751da177e4SLinus Torvalds 	if (!atomic_dec_and_test(&p->refcnt))
14761da177e4SLinus Torvalds 		return;
14771da177e4SLinus Torvalds 	if (p->policy == MPOL_BIND)
14781da177e4SLinus Torvalds 		kfree(p->v.zonelist);
14791da177e4SLinus Torvalds 	p->policy = MPOL_DEFAULT;
14801da177e4SLinus Torvalds 	kmem_cache_free(policy_cache, p);
14811da177e4SLinus Torvalds }
14821da177e4SLinus Torvalds 
14831da177e4SLinus Torvalds /*
14841da177e4SLinus Torvalds  * Shared memory backing store policy support.
14851da177e4SLinus Torvalds  *
14861da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
14871da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
14881da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
14891da177e4SLinus Torvalds  * for any accesses to the tree.
14901da177e4SLinus Torvalds  */
14911da177e4SLinus Torvalds 
14921da177e4SLinus Torvalds /* lookup first element intersecting start-end */
14931da177e4SLinus Torvalds /* Caller holds sp->lock */
14941da177e4SLinus Torvalds static struct sp_node *
14951da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
14961da177e4SLinus Torvalds {
14971da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
14981da177e4SLinus Torvalds 
14991da177e4SLinus Torvalds 	while (n) {
15001da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
15011da177e4SLinus Torvalds 
15021da177e4SLinus Torvalds 		if (start >= p->end)
15031da177e4SLinus Torvalds 			n = n->rb_right;
15041da177e4SLinus Torvalds 		else if (end <= p->start)
15051da177e4SLinus Torvalds 			n = n->rb_left;
15061da177e4SLinus Torvalds 		else
15071da177e4SLinus Torvalds 			break;
15081da177e4SLinus Torvalds 	}
15091da177e4SLinus Torvalds 	if (!n)
15101da177e4SLinus Torvalds 		return NULL;
15111da177e4SLinus Torvalds 	for (;;) {
15121da177e4SLinus Torvalds 		struct sp_node *w = NULL;
15131da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
15141da177e4SLinus Torvalds 		if (!prev)
15151da177e4SLinus Torvalds 			break;
15161da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
15171da177e4SLinus Torvalds 		if (w->end <= start)
15181da177e4SLinus Torvalds 			break;
15191da177e4SLinus Torvalds 		n = prev;
15201da177e4SLinus Torvalds 	}
15211da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
15221da177e4SLinus Torvalds }
15231da177e4SLinus Torvalds 
15241da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
15251da177e4SLinus Torvalds /* Caller holds sp->lock */
15261da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
15271da177e4SLinus Torvalds {
15281da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
15291da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
15301da177e4SLinus Torvalds 	struct sp_node *nd;
15311da177e4SLinus Torvalds 
15321da177e4SLinus Torvalds 	while (*p) {
15331da177e4SLinus Torvalds 		parent = *p;
15341da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
15351da177e4SLinus Torvalds 		if (new->start < nd->start)
15361da177e4SLinus Torvalds 			p = &(*p)->rb_left;
15371da177e4SLinus Torvalds 		else if (new->end > nd->end)
15381da177e4SLinus Torvalds 			p = &(*p)->rb_right;
15391da177e4SLinus Torvalds 		else
15401da177e4SLinus Torvalds 			BUG();
15411da177e4SLinus Torvalds 	}
15421da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
15431da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
1544140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
15451da177e4SLinus Torvalds 		 new->policy ? new->policy->policy : 0);
15461da177e4SLinus Torvalds }
15471da177e4SLinus Torvalds 
15481da177e4SLinus Torvalds /* Find shared policy intersecting idx */
15491da177e4SLinus Torvalds struct mempolicy *
15501da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
15511da177e4SLinus Torvalds {
15521da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
15531da177e4SLinus Torvalds 	struct sp_node *sn;
15541da177e4SLinus Torvalds 
15551da177e4SLinus Torvalds 	if (!sp->root.rb_node)
15561da177e4SLinus Torvalds 		return NULL;
15571da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15581da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
15591da177e4SLinus Torvalds 	if (sn) {
15601da177e4SLinus Torvalds 		mpol_get(sn->policy);
15611da177e4SLinus Torvalds 		pol = sn->policy;
15621da177e4SLinus Torvalds 	}
15631da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
15641da177e4SLinus Torvalds 	return pol;
15651da177e4SLinus Torvalds }
15661da177e4SLinus Torvalds 
15671da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
15681da177e4SLinus Torvalds {
1569140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
15701da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
15711da177e4SLinus Torvalds 	mpol_free(n->policy);
15721da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
15731da177e4SLinus Torvalds }
15741da177e4SLinus Torvalds 
1575dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1576dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
15771da177e4SLinus Torvalds {
15781da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
15791da177e4SLinus Torvalds 
15801da177e4SLinus Torvalds 	if (!n)
15811da177e4SLinus Torvalds 		return NULL;
15821da177e4SLinus Torvalds 	n->start = start;
15831da177e4SLinus Torvalds 	n->end = end;
15841da177e4SLinus Torvalds 	mpol_get(pol);
15851da177e4SLinus Torvalds 	n->policy = pol;
15861da177e4SLinus Torvalds 	return n;
15871da177e4SLinus Torvalds }
15881da177e4SLinus Torvalds 
15891da177e4SLinus Torvalds /* Replace a policy range. */
15901da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
15911da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
15921da177e4SLinus Torvalds {
15931da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
15941da177e4SLinus Torvalds 
15951da177e4SLinus Torvalds restart:
15961da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15971da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
15981da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
15991da177e4SLinus Torvalds 	while (n && n->start < end) {
16001da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
16011da177e4SLinus Torvalds 		if (n->start >= start) {
16021da177e4SLinus Torvalds 			if (n->end <= end)
16031da177e4SLinus Torvalds 				sp_delete(sp, n);
16041da177e4SLinus Torvalds 			else
16051da177e4SLinus Torvalds 				n->start = end;
16061da177e4SLinus Torvalds 		} else {
16071da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
16081da177e4SLinus Torvalds 			if (n->end > end) {
16091da177e4SLinus Torvalds 				if (!new2) {
16101da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
16111da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
16121da177e4SLinus Torvalds 					if (!new2)
16131da177e4SLinus Torvalds 						return -ENOMEM;
16141da177e4SLinus Torvalds 					goto restart;
16151da177e4SLinus Torvalds 				}
16161da177e4SLinus Torvalds 				n->end = start;
16171da177e4SLinus Torvalds 				sp_insert(sp, new2);
16181da177e4SLinus Torvalds 				new2 = NULL;
16191da177e4SLinus Torvalds 				break;
16201da177e4SLinus Torvalds 			} else
16211da177e4SLinus Torvalds 				n->end = start;
16221da177e4SLinus Torvalds 		}
16231da177e4SLinus Torvalds 		if (!next)
16241da177e4SLinus Torvalds 			break;
16251da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
16261da177e4SLinus Torvalds 	}
16271da177e4SLinus Torvalds 	if (new)
16281da177e4SLinus Torvalds 		sp_insert(sp, new);
16291da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
16301da177e4SLinus Torvalds 	if (new2) {
16311da177e4SLinus Torvalds 		mpol_free(new2->policy);
16321da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
16331da177e4SLinus Torvalds 	}
16341da177e4SLinus Torvalds 	return 0;
16351da177e4SLinus Torvalds }
16361da177e4SLinus Torvalds 
16377339ff83SRobin Holt void mpol_shared_policy_init(struct shared_policy *info, int policy,
16387339ff83SRobin Holt 				nodemask_t *policy_nodes)
16397339ff83SRobin Holt {
16407339ff83SRobin Holt 	info->root = RB_ROOT;
16417339ff83SRobin Holt 	spin_lock_init(&info->lock);
16427339ff83SRobin Holt 
16437339ff83SRobin Holt 	if (policy != MPOL_DEFAULT) {
16447339ff83SRobin Holt 		struct mempolicy *newpol;
16457339ff83SRobin Holt 
16467339ff83SRobin Holt 		/* Falls back to MPOL_DEFAULT on any error */
16477339ff83SRobin Holt 		newpol = mpol_new(policy, policy_nodes);
16487339ff83SRobin Holt 		if (!IS_ERR(newpol)) {
16497339ff83SRobin Holt 			/* Create pseudo-vma that contains just the policy */
16507339ff83SRobin Holt 			struct vm_area_struct pvma;
16517339ff83SRobin Holt 
16527339ff83SRobin Holt 			memset(&pvma, 0, sizeof(struct vm_area_struct));
16537339ff83SRobin Holt 			/* Policy covers entire file */
16547339ff83SRobin Holt 			pvma.vm_end = TASK_SIZE;
16557339ff83SRobin Holt 			mpol_set_shared_policy(info, &pvma, newpol);
16567339ff83SRobin Holt 			mpol_free(newpol);
16577339ff83SRobin Holt 		}
16587339ff83SRobin Holt 	}
16597339ff83SRobin Holt }
16607339ff83SRobin Holt 
16611da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
16621da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
16631da177e4SLinus Torvalds {
16641da177e4SLinus Torvalds 	int err;
16651da177e4SLinus Torvalds 	struct sp_node *new = NULL;
16661da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
16671da177e4SLinus Torvalds 
1668140d5a49SPaul Mundt 	pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
16691da177e4SLinus Torvalds 		 vma->vm_pgoff,
16701da177e4SLinus Torvalds 		 sz, npol? npol->policy : -1,
1671dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
16721da177e4SLinus Torvalds 
16731da177e4SLinus Torvalds 	if (npol) {
16741da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
16751da177e4SLinus Torvalds 		if (!new)
16761da177e4SLinus Torvalds 			return -ENOMEM;
16771da177e4SLinus Torvalds 	}
16781da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
16791da177e4SLinus Torvalds 	if (err && new)
16801da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
16811da177e4SLinus Torvalds 	return err;
16821da177e4SLinus Torvalds }
16831da177e4SLinus Torvalds 
16841da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
16851da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
16861da177e4SLinus Torvalds {
16871da177e4SLinus Torvalds 	struct sp_node *n;
16881da177e4SLinus Torvalds 	struct rb_node *next;
16891da177e4SLinus Torvalds 
16901da177e4SLinus Torvalds 	if (!p->root.rb_node)
16911da177e4SLinus Torvalds 		return;
16921da177e4SLinus Torvalds 	spin_lock(&p->lock);
16931da177e4SLinus Torvalds 	next = rb_first(&p->root);
16941da177e4SLinus Torvalds 	while (next) {
16951da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
16961da177e4SLinus Torvalds 		next = rb_next(&n->nd);
169790c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
16981da177e4SLinus Torvalds 		mpol_free(n->policy);
16991da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
17001da177e4SLinus Torvalds 	}
17011da177e4SLinus Torvalds 	spin_unlock(&p->lock);
17021da177e4SLinus Torvalds }
17031da177e4SLinus Torvalds 
17041da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
17051da177e4SLinus Torvalds void __init numa_policy_init(void)
17061da177e4SLinus Torvalds {
1707b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
1708b71636e2SPaul Mundt 	unsigned long largest = 0;
1709b71636e2SPaul Mundt 	int nid, prefer = 0;
1710b71636e2SPaul Mundt 
17111da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
17121da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
171320c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
17141da177e4SLinus Torvalds 
17151da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
17161da177e4SLinus Torvalds 				     sizeof(struct sp_node),
171720c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
17181da177e4SLinus Torvalds 
1719b71636e2SPaul Mundt 	/*
1720b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
1721b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
1722b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
1723b71636e2SPaul Mundt 	 */
1724b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
172556bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
1726b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
17271da177e4SLinus Torvalds 
1728b71636e2SPaul Mundt 		/* Preserve the largest node */
1729b71636e2SPaul Mundt 		if (largest < total_pages) {
1730b71636e2SPaul Mundt 			largest = total_pages;
1731b71636e2SPaul Mundt 			prefer = nid;
1732b71636e2SPaul Mundt 		}
1733b71636e2SPaul Mundt 
1734b71636e2SPaul Mundt 		/* Interleave this node? */
1735b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1736b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
1737b71636e2SPaul Mundt 	}
1738b71636e2SPaul Mundt 
1739b71636e2SPaul Mundt 	/* All too small, use the largest */
1740b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
1741b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
1742b71636e2SPaul Mundt 
1743b71636e2SPaul Mundt 	if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
17441da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
17451da177e4SLinus Torvalds }
17461da177e4SLinus Torvalds 
17478bccd85fSChristoph Lameter /* Reset policy of current process to default */
17481da177e4SLinus Torvalds void numa_default_policy(void)
17491da177e4SLinus Torvalds {
17508bccd85fSChristoph Lameter 	do_set_mempolicy(MPOL_DEFAULT, NULL);
17511da177e4SLinus Torvalds }
175268860ec1SPaul Jackson 
175368860ec1SPaul Jackson /* Migrate a policy to a different set of nodes */
1754dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
1755dbcb0f19SAdrian Bunk 			       const nodemask_t *newmask)
175668860ec1SPaul Jackson {
175774cb2155SPaul Jackson 	nodemask_t *mpolmask;
175868860ec1SPaul Jackson 	nodemask_t tmp;
175968860ec1SPaul Jackson 
176068860ec1SPaul Jackson 	if (!pol)
176168860ec1SPaul Jackson 		return;
176274cb2155SPaul Jackson 	mpolmask = &pol->cpuset_mems_allowed;
176374cb2155SPaul Jackson 	if (nodes_equal(*mpolmask, *newmask))
176474cb2155SPaul Jackson 		return;
176568860ec1SPaul Jackson 
176668860ec1SPaul Jackson 	switch (pol->policy) {
176768860ec1SPaul Jackson 	case MPOL_DEFAULT:
176868860ec1SPaul Jackson 		break;
176968860ec1SPaul Jackson 	case MPOL_INTERLEAVE:
177074cb2155SPaul Jackson 		nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
177168860ec1SPaul Jackson 		pol->v.nodes = tmp;
177274cb2155SPaul Jackson 		*mpolmask = *newmask;
177374cb2155SPaul Jackson 		current->il_next = node_remap(current->il_next,
177474cb2155SPaul Jackson 						*mpolmask, *newmask);
177568860ec1SPaul Jackson 		break;
177668860ec1SPaul Jackson 	case MPOL_PREFERRED:
177768860ec1SPaul Jackson 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
177874cb2155SPaul Jackson 						*mpolmask, *newmask);
177974cb2155SPaul Jackson 		*mpolmask = *newmask;
178068860ec1SPaul Jackson 		break;
178168860ec1SPaul Jackson 	case MPOL_BIND: {
178268860ec1SPaul Jackson 		nodemask_t nodes;
178368860ec1SPaul Jackson 		struct zone **z;
178468860ec1SPaul Jackson 		struct zonelist *zonelist;
178568860ec1SPaul Jackson 
178668860ec1SPaul Jackson 		nodes_clear(nodes);
178768860ec1SPaul Jackson 		for (z = pol->v.zonelist->zones; *z; z++)
178889fa3024SChristoph Lameter 			node_set(zone_to_nid(*z), nodes);
178974cb2155SPaul Jackson 		nodes_remap(tmp, nodes, *mpolmask, *newmask);
179068860ec1SPaul Jackson 		nodes = tmp;
179168860ec1SPaul Jackson 
179268860ec1SPaul Jackson 		zonelist = bind_zonelist(&nodes);
179368860ec1SPaul Jackson 
179468860ec1SPaul Jackson 		/* If no mem, then zonelist is NULL and we keep old zonelist.
179568860ec1SPaul Jackson 		 * If that old zonelist has no remaining mems_allowed nodes,
179668860ec1SPaul Jackson 		 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
179768860ec1SPaul Jackson 		 */
179868860ec1SPaul Jackson 
17998af5e2ebSKAMEZAWA Hiroyuki 		if (!IS_ERR(zonelist)) {
180068860ec1SPaul Jackson 			/* Good - got mem - substitute new zonelist */
180168860ec1SPaul Jackson 			kfree(pol->v.zonelist);
180268860ec1SPaul Jackson 			pol->v.zonelist = zonelist;
180368860ec1SPaul Jackson 		}
180474cb2155SPaul Jackson 		*mpolmask = *newmask;
180568860ec1SPaul Jackson 		break;
180668860ec1SPaul Jackson 	}
180768860ec1SPaul Jackson 	default:
180868860ec1SPaul Jackson 		BUG();
180968860ec1SPaul Jackson 		break;
181068860ec1SPaul Jackson 	}
181168860ec1SPaul Jackson }
181268860ec1SPaul Jackson 
181368860ec1SPaul Jackson /*
181474cb2155SPaul Jackson  * Wrapper for mpol_rebind_policy() that just requires task
181574cb2155SPaul Jackson  * pointer, and updates task mempolicy.
181668860ec1SPaul Jackson  */
181774cb2155SPaul Jackson 
181874cb2155SPaul Jackson void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
181968860ec1SPaul Jackson {
182074cb2155SPaul Jackson 	mpol_rebind_policy(tsk->mempolicy, new);
182168860ec1SPaul Jackson }
18221a75a6c8SChristoph Lameter 
18231a75a6c8SChristoph Lameter /*
18244225399aSPaul Jackson  * Rebind each vma in mm to new nodemask.
18254225399aSPaul Jackson  *
18264225399aSPaul Jackson  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
18274225399aSPaul Jackson  */
18284225399aSPaul Jackson 
18294225399aSPaul Jackson void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
18304225399aSPaul Jackson {
18314225399aSPaul Jackson 	struct vm_area_struct *vma;
18324225399aSPaul Jackson 
18334225399aSPaul Jackson 	down_write(&mm->mmap_sem);
18344225399aSPaul Jackson 	for (vma = mm->mmap; vma; vma = vma->vm_next)
18354225399aSPaul Jackson 		mpol_rebind_policy(vma->vm_policy, new);
18364225399aSPaul Jackson 	up_write(&mm->mmap_sem);
18374225399aSPaul Jackson }
18384225399aSPaul Jackson 
18394225399aSPaul Jackson /*
18401a75a6c8SChristoph Lameter  * Display pages allocated per node and memory policy via /proc.
18411a75a6c8SChristoph Lameter  */
18421a75a6c8SChristoph Lameter 
184315ad7cdcSHelge Deller static const char * const policy_types[] =
184415ad7cdcSHelge Deller 	{ "default", "prefer", "bind", "interleave" };
18451a75a6c8SChristoph Lameter 
18461a75a6c8SChristoph Lameter /*
18471a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
18481a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
18491a75a6c8SChristoph Lameter  * or an error (negative)
18501a75a6c8SChristoph Lameter  */
18511a75a6c8SChristoph Lameter static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
18521a75a6c8SChristoph Lameter {
18531a75a6c8SChristoph Lameter 	char *p = buffer;
18541a75a6c8SChristoph Lameter 	int l;
18551a75a6c8SChristoph Lameter 	nodemask_t nodes;
18561a75a6c8SChristoph Lameter 	int mode = pol ? pol->policy : MPOL_DEFAULT;
18571a75a6c8SChristoph Lameter 
18581a75a6c8SChristoph Lameter 	switch (mode) {
18591a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
18601a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18611a75a6c8SChristoph Lameter 		break;
18621a75a6c8SChristoph Lameter 
18631a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
18641a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18651a75a6c8SChristoph Lameter 		node_set(pol->v.preferred_node, nodes);
18661a75a6c8SChristoph Lameter 		break;
18671a75a6c8SChristoph Lameter 
18681a75a6c8SChristoph Lameter 	case MPOL_BIND:
18691a75a6c8SChristoph Lameter 		get_zonemask(pol, &nodes);
18701a75a6c8SChristoph Lameter 		break;
18711a75a6c8SChristoph Lameter 
18721a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
18731a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
18741a75a6c8SChristoph Lameter 		break;
18751a75a6c8SChristoph Lameter 
18761a75a6c8SChristoph Lameter 	default:
18771a75a6c8SChristoph Lameter 		BUG();
18781a75a6c8SChristoph Lameter 		return -EFAULT;
18791a75a6c8SChristoph Lameter 	}
18801a75a6c8SChristoph Lameter 
18811a75a6c8SChristoph Lameter 	l = strlen(policy_types[mode]);
18821a75a6c8SChristoph Lameter  	if (buffer + maxlen < p + l + 1)
18831a75a6c8SChristoph Lameter  		return -ENOSPC;
18841a75a6c8SChristoph Lameter 
18851a75a6c8SChristoph Lameter 	strcpy(p, policy_types[mode]);
18861a75a6c8SChristoph Lameter 	p += l;
18871a75a6c8SChristoph Lameter 
18881a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
18891a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
18901a75a6c8SChristoph Lameter 			return -ENOSPC;
18911a75a6c8SChristoph Lameter 		*p++ = '=';
18921a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
18931a75a6c8SChristoph Lameter 	}
18941a75a6c8SChristoph Lameter 	return p - buffer;
18951a75a6c8SChristoph Lameter }
18961a75a6c8SChristoph Lameter 
18971a75a6c8SChristoph Lameter struct numa_maps {
18981a75a6c8SChristoph Lameter 	unsigned long pages;
18991a75a6c8SChristoph Lameter 	unsigned long anon;
1900397874dfSChristoph Lameter 	unsigned long active;
1901397874dfSChristoph Lameter 	unsigned long writeback;
19021a75a6c8SChristoph Lameter 	unsigned long mapcount_max;
1903397874dfSChristoph Lameter 	unsigned long dirty;
1904397874dfSChristoph Lameter 	unsigned long swapcache;
19051a75a6c8SChristoph Lameter 	unsigned long node[MAX_NUMNODES];
19061a75a6c8SChristoph Lameter };
19071a75a6c8SChristoph Lameter 
1908397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty)
19091a75a6c8SChristoph Lameter {
19101a75a6c8SChristoph Lameter 	struct numa_maps *md = private;
19111a75a6c8SChristoph Lameter 	int count = page_mapcount(page);
19121a75a6c8SChristoph Lameter 
19131a75a6c8SChristoph Lameter 	md->pages++;
1914397874dfSChristoph Lameter 	if (pte_dirty || PageDirty(page))
1915397874dfSChristoph Lameter 		md->dirty++;
1916397874dfSChristoph Lameter 
1917397874dfSChristoph Lameter 	if (PageSwapCache(page))
1918397874dfSChristoph Lameter 		md->swapcache++;
1919397874dfSChristoph Lameter 
1920397874dfSChristoph Lameter 	if (PageActive(page))
1921397874dfSChristoph Lameter 		md->active++;
1922397874dfSChristoph Lameter 
1923397874dfSChristoph Lameter 	if (PageWriteback(page))
1924397874dfSChristoph Lameter 		md->writeback++;
19251a75a6c8SChristoph Lameter 
19261a75a6c8SChristoph Lameter 	if (PageAnon(page))
19271a75a6c8SChristoph Lameter 		md->anon++;
19281a75a6c8SChristoph Lameter 
1929397874dfSChristoph Lameter 	if (count > md->mapcount_max)
1930397874dfSChristoph Lameter 		md->mapcount_max = count;
1931397874dfSChristoph Lameter 
19321a75a6c8SChristoph Lameter 	md->node[page_to_nid(page)]++;
19331a75a6c8SChristoph Lameter }
19341a75a6c8SChristoph Lameter 
19357f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE
1936397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma,
1937397874dfSChristoph Lameter 		unsigned long start, unsigned long end,
1938397874dfSChristoph Lameter 		struct numa_maps *md)
1939397874dfSChristoph Lameter {
1940397874dfSChristoph Lameter 	unsigned long addr;
1941397874dfSChristoph Lameter 	struct page *page;
1942397874dfSChristoph Lameter 
1943397874dfSChristoph Lameter 	for (addr = start; addr < end; addr += HPAGE_SIZE) {
1944397874dfSChristoph Lameter 		pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1945397874dfSChristoph Lameter 		pte_t pte;
1946397874dfSChristoph Lameter 
1947397874dfSChristoph Lameter 		if (!ptep)
1948397874dfSChristoph Lameter 			continue;
1949397874dfSChristoph Lameter 
1950397874dfSChristoph Lameter 		pte = *ptep;
1951397874dfSChristoph Lameter 		if (pte_none(pte))
1952397874dfSChristoph Lameter 			continue;
1953397874dfSChristoph Lameter 
1954397874dfSChristoph Lameter 		page = pte_page(pte);
1955397874dfSChristoph Lameter 		if (!page)
1956397874dfSChristoph Lameter 			continue;
1957397874dfSChristoph Lameter 
1958397874dfSChristoph Lameter 		gather_stats(page, md, pte_dirty(*ptep));
1959397874dfSChristoph Lameter 	}
1960397874dfSChristoph Lameter }
19617f709ed0SAndrew Morton #else
19627f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma,
19637f709ed0SAndrew Morton 		unsigned long start, unsigned long end,
19647f709ed0SAndrew Morton 		struct numa_maps *md)
19657f709ed0SAndrew Morton {
19667f709ed0SAndrew Morton }
19677f709ed0SAndrew Morton #endif
1968397874dfSChristoph Lameter 
19691a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v)
19701a75a6c8SChristoph Lameter {
197199f89551SEric W. Biederman 	struct proc_maps_private *priv = m->private;
19721a75a6c8SChristoph Lameter 	struct vm_area_struct *vma = v;
19731a75a6c8SChristoph Lameter 	struct numa_maps *md;
1974397874dfSChristoph Lameter 	struct file *file = vma->vm_file;
1975397874dfSChristoph Lameter 	struct mm_struct *mm = vma->vm_mm;
1976480eccf9SLee Schermerhorn 	struct mempolicy *pol;
19771a75a6c8SChristoph Lameter 	int n;
19781a75a6c8SChristoph Lameter 	char buffer[50];
19791a75a6c8SChristoph Lameter 
1980397874dfSChristoph Lameter 	if (!mm)
19811a75a6c8SChristoph Lameter 		return 0;
19821a75a6c8SChristoph Lameter 
19831a75a6c8SChristoph Lameter 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
19841a75a6c8SChristoph Lameter 	if (!md)
19851a75a6c8SChristoph Lameter 		return 0;
19861a75a6c8SChristoph Lameter 
1987480eccf9SLee Schermerhorn 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
1988480eccf9SLee Schermerhorn 	mpol_to_str(buffer, sizeof(buffer), pol);
1989480eccf9SLee Schermerhorn 	/*
1990480eccf9SLee Schermerhorn 	 * unref shared or other task's mempolicy
1991480eccf9SLee Schermerhorn 	 */
1992480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy)
1993480eccf9SLee Schermerhorn 		__mpol_free(pol);
19941a75a6c8SChristoph Lameter 
1995397874dfSChristoph Lameter 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1996397874dfSChristoph Lameter 
1997397874dfSChristoph Lameter 	if (file) {
1998397874dfSChristoph Lameter 		seq_printf(m, " file=");
1999*c32c2f63SJan Blunck 		seq_path(m, &file->f_path, "\n\t= ");
2000397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2001397874dfSChristoph Lameter 		seq_printf(m, " heap");
2002397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->start_stack &&
2003397874dfSChristoph Lameter 			vma->vm_end >= mm->start_stack) {
2004397874dfSChristoph Lameter 		seq_printf(m, " stack");
2005397874dfSChristoph Lameter 	}
2006397874dfSChristoph Lameter 
2007397874dfSChristoph Lameter 	if (is_vm_hugetlb_page(vma)) {
2008397874dfSChristoph Lameter 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2009397874dfSChristoph Lameter 		seq_printf(m, " huge");
2010397874dfSChristoph Lameter 	} else {
2011397874dfSChristoph Lameter 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
201256bbd65dSChristoph Lameter 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2013397874dfSChristoph Lameter 	}
2014397874dfSChristoph Lameter 
2015397874dfSChristoph Lameter 	if (!md->pages)
2016397874dfSChristoph Lameter 		goto out;
20171a75a6c8SChristoph Lameter 
20181a75a6c8SChristoph Lameter 	if (md->anon)
20191a75a6c8SChristoph Lameter 		seq_printf(m," anon=%lu",md->anon);
20201a75a6c8SChristoph Lameter 
2021397874dfSChristoph Lameter 	if (md->dirty)
2022397874dfSChristoph Lameter 		seq_printf(m," dirty=%lu",md->dirty);
2023397874dfSChristoph Lameter 
2024397874dfSChristoph Lameter 	if (md->pages != md->anon && md->pages != md->dirty)
2025397874dfSChristoph Lameter 		seq_printf(m, " mapped=%lu", md->pages);
2026397874dfSChristoph Lameter 
2027397874dfSChristoph Lameter 	if (md->mapcount_max > 1)
2028397874dfSChristoph Lameter 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2029397874dfSChristoph Lameter 
2030397874dfSChristoph Lameter 	if (md->swapcache)
2031397874dfSChristoph Lameter 		seq_printf(m," swapcache=%lu", md->swapcache);
2032397874dfSChristoph Lameter 
2033397874dfSChristoph Lameter 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2034397874dfSChristoph Lameter 		seq_printf(m," active=%lu", md->active);
2035397874dfSChristoph Lameter 
2036397874dfSChristoph Lameter 	if (md->writeback)
2037397874dfSChristoph Lameter 		seq_printf(m," writeback=%lu", md->writeback);
2038397874dfSChristoph Lameter 
203956bbd65dSChristoph Lameter 	for_each_node_state(n, N_HIGH_MEMORY)
20401a75a6c8SChristoph Lameter 		if (md->node[n])
20411a75a6c8SChristoph Lameter 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2042397874dfSChristoph Lameter out:
20431a75a6c8SChristoph Lameter 	seq_putc(m, '\n');
20441a75a6c8SChristoph Lameter 	kfree(md);
20451a75a6c8SChristoph Lameter 
20461a75a6c8SChristoph Lameter 	if (m->count < m->size)
204799f89551SEric W. Biederman 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
20481a75a6c8SChristoph Lameter 	return 0;
20491a75a6c8SChristoph Lameter }
2050