xref: /openbmc/linux/mm/mempolicy.c (revision b488893a390edfe027bae7a46e9af8083e740668)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds    could replace all the switch()es with a mempolicy_ops structure.
671da177e4SLinus Torvalds */
681da177e4SLinus Torvalds 
691da177e4SLinus Torvalds #include <linux/mempolicy.h>
701da177e4SLinus Torvalds #include <linux/mm.h>
711da177e4SLinus Torvalds #include <linux/highmem.h>
721da177e4SLinus Torvalds #include <linux/hugetlb.h>
731da177e4SLinus Torvalds #include <linux/kernel.h>
741da177e4SLinus Torvalds #include <linux/sched.h>
751da177e4SLinus Torvalds #include <linux/nodemask.h>
761da177e4SLinus Torvalds #include <linux/cpuset.h>
771da177e4SLinus Torvalds #include <linux/gfp.h>
781da177e4SLinus Torvalds #include <linux/slab.h>
791da177e4SLinus Torvalds #include <linux/string.h>
801da177e4SLinus Torvalds #include <linux/module.h>
81*b488893aSPavel Emelyanov #include <linux/nsproxy.h>
821da177e4SLinus Torvalds #include <linux/interrupt.h>
831da177e4SLinus Torvalds #include <linux/init.h>
841da177e4SLinus Torvalds #include <linux/compat.h>
85dc9aa5b9SChristoph Lameter #include <linux/swap.h>
861a75a6c8SChristoph Lameter #include <linux/seq_file.h>
871a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
88b20a3503SChristoph Lameter #include <linux/migrate.h>
8995a402c3SChristoph Lameter #include <linux/rmap.h>
9086c3a764SDavid Quigley #include <linux/security.h>
91dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
92dc9aa5b9SChristoph Lameter 
931da177e4SLinus Torvalds #include <asm/tlbflush.h>
941da177e4SLinus Torvalds #include <asm/uaccess.h>
951da177e4SLinus Torvalds 
9638e35860SChristoph Lameter /* Internal flags */
97dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
9838e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
991a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
100dc9aa5b9SChristoph Lameter 
101fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
102fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1051da177e4SLinus Torvalds    policied. */
1066267276fSChristoph Lameter enum zone_type policy_zone = 0;
1071da177e4SLinus Torvalds 
108d42c6997SAndi Kleen struct mempolicy default_policy = {
1091da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1101da177e4SLinus Torvalds 	.policy = MPOL_DEFAULT,
1111da177e4SLinus Torvalds };
1121da177e4SLinus Torvalds 
113dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
114dbcb0f19SAdrian Bunk                                const nodemask_t *newmask);
115dbcb0f19SAdrian Bunk 
1161da177e4SLinus Torvalds /* Do sanity checking on a policy */
117dfcd3c0dSAndi Kleen static int mpol_check_policy(int mode, nodemask_t *nodes)
1181da177e4SLinus Torvalds {
119dfcd3c0dSAndi Kleen 	int empty = nodes_empty(*nodes);
1201da177e4SLinus Torvalds 
1211da177e4SLinus Torvalds 	switch (mode) {
1221da177e4SLinus Torvalds 	case MPOL_DEFAULT:
1231da177e4SLinus Torvalds 		if (!empty)
1241da177e4SLinus Torvalds 			return -EINVAL;
1251da177e4SLinus Torvalds 		break;
1261da177e4SLinus Torvalds 	case MPOL_BIND:
1271da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1281da177e4SLinus Torvalds 		/* Preferred will only use the first bit, but allow
1291da177e4SLinus Torvalds 		   more for now. */
1301da177e4SLinus Torvalds 		if (empty)
1311da177e4SLinus Torvalds 			return -EINVAL;
1321da177e4SLinus Torvalds 		break;
1331da177e4SLinus Torvalds 	}
13437b07e41SLee Schermerhorn  	return nodes_subset(*nodes, node_states[N_HIGH_MEMORY]) ? 0 : -EINVAL;
1351da177e4SLinus Torvalds }
136dd942ae3SAndi Kleen 
1371da177e4SLinus Torvalds /* Generate a custom zonelist for the BIND policy. */
138dfcd3c0dSAndi Kleen static struct zonelist *bind_zonelist(nodemask_t *nodes)
1391da177e4SLinus Torvalds {
1401da177e4SLinus Torvalds 	struct zonelist *zl;
1412f6726e5SChristoph Lameter 	int num, max, nd;
1422f6726e5SChristoph Lameter 	enum zone_type k;
1431da177e4SLinus Torvalds 
144dfcd3c0dSAndi Kleen 	max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
1459276b1bcSPaul Jackson 	max++;			/* space for zlcache_ptr (see mmzone.h) */
146dd942ae3SAndi Kleen 	zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
1471da177e4SLinus Torvalds 	if (!zl)
1488af5e2ebSKAMEZAWA Hiroyuki 		return ERR_PTR(-ENOMEM);
1499276b1bcSPaul Jackson 	zl->zlcache_ptr = NULL;
1501da177e4SLinus Torvalds 	num = 0;
151dd942ae3SAndi Kleen 	/* First put in the highest zones from all nodes, then all the next
152dd942ae3SAndi Kleen 	   lower zones etc. Avoid empty zones because the memory allocator
153dd942ae3SAndi Kleen 	   doesn't like them. If you implement node hot removal you
154dd942ae3SAndi Kleen 	   have to fix that. */
155b377fd39SMel Gorman 	k = MAX_NR_ZONES - 1;
1562f6726e5SChristoph Lameter 	while (1) {
157dd942ae3SAndi Kleen 		for_each_node_mask(nd, *nodes) {
158dd942ae3SAndi Kleen 			struct zone *z = &NODE_DATA(nd)->node_zones[k];
159dd942ae3SAndi Kleen 			if (z->present_pages > 0)
160dd942ae3SAndi Kleen 				zl->zones[num++] = z;
161dd942ae3SAndi Kleen 		}
1622f6726e5SChristoph Lameter 		if (k == 0)
1632f6726e5SChristoph Lameter 			break;
1642f6726e5SChristoph Lameter 		k--;
165dd942ae3SAndi Kleen 	}
1668af5e2ebSKAMEZAWA Hiroyuki 	if (num == 0) {
1678af5e2ebSKAMEZAWA Hiroyuki 		kfree(zl);
1688af5e2ebSKAMEZAWA Hiroyuki 		return ERR_PTR(-EINVAL);
1698af5e2ebSKAMEZAWA Hiroyuki 	}
1701da177e4SLinus Torvalds 	zl->zones[num] = NULL;
1711da177e4SLinus Torvalds 	return zl;
1721da177e4SLinus Torvalds }
1731da177e4SLinus Torvalds 
1741da177e4SLinus Torvalds /* Create a new policy */
175dfcd3c0dSAndi Kleen static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
1761da177e4SLinus Torvalds {
1771da177e4SLinus Torvalds 	struct mempolicy *policy;
1781da177e4SLinus Torvalds 
179140d5a49SPaul Mundt 	pr_debug("setting mode %d nodes[0] %lx\n",
180140d5a49SPaul Mundt 		 mode, nodes ? nodes_addr(*nodes)[0] : -1);
181140d5a49SPaul Mundt 
1821da177e4SLinus Torvalds 	if (mode == MPOL_DEFAULT)
1831da177e4SLinus Torvalds 		return NULL;
1841da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1851da177e4SLinus Torvalds 	if (!policy)
1861da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
1871da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
1881da177e4SLinus Torvalds 	switch (mode) {
1891da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
190dfcd3c0dSAndi Kleen 		policy->v.nodes = *nodes;
1916eaf806aSChristoph Lameter 		nodes_and(policy->v.nodes, policy->v.nodes,
1926eaf806aSChristoph Lameter 					node_states[N_HIGH_MEMORY]);
1936eaf806aSChristoph Lameter 		if (nodes_weight(policy->v.nodes) == 0) {
1948f493d79SAndi Kleen 			kmem_cache_free(policy_cache, policy);
1958f493d79SAndi Kleen 			return ERR_PTR(-EINVAL);
1968f493d79SAndi Kleen 		}
1971da177e4SLinus Torvalds 		break;
1981da177e4SLinus Torvalds 	case MPOL_PREFERRED:
199dfcd3c0dSAndi Kleen 		policy->v.preferred_node = first_node(*nodes);
2001da177e4SLinus Torvalds 		if (policy->v.preferred_node >= MAX_NUMNODES)
2011da177e4SLinus Torvalds 			policy->v.preferred_node = -1;
2021da177e4SLinus Torvalds 		break;
2031da177e4SLinus Torvalds 	case MPOL_BIND:
2041da177e4SLinus Torvalds 		policy->v.zonelist = bind_zonelist(nodes);
2058af5e2ebSKAMEZAWA Hiroyuki 		if (IS_ERR(policy->v.zonelist)) {
2068af5e2ebSKAMEZAWA Hiroyuki 			void *error_code = policy->v.zonelist;
2071da177e4SLinus Torvalds 			kmem_cache_free(policy_cache, policy);
2088af5e2ebSKAMEZAWA Hiroyuki 			return error_code;
2091da177e4SLinus Torvalds 		}
2101da177e4SLinus Torvalds 		break;
2111da177e4SLinus Torvalds 	}
2121da177e4SLinus Torvalds 	policy->policy = mode;
21374cb2155SPaul Jackson 	policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
2141da177e4SLinus Torvalds 	return policy;
2151da177e4SLinus Torvalds }
2161da177e4SLinus Torvalds 
217397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty);
218fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
219fc301289SChristoph Lameter 				unsigned long flags);
2201a75a6c8SChristoph Lameter 
22138e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
222b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
223dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
224dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
22538e35860SChristoph Lameter 		void *private)
2261da177e4SLinus Torvalds {
22791612e0dSHugh Dickins 	pte_t *orig_pte;
22891612e0dSHugh Dickins 	pte_t *pte;
229705e87c0SHugh Dickins 	spinlock_t *ptl;
230941150a3SHugh Dickins 
231705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
23291612e0dSHugh Dickins 	do {
2336aab341eSLinus Torvalds 		struct page *page;
23425ba77c1SAndy Whitcroft 		int nid;
23591612e0dSHugh Dickins 
23691612e0dSHugh Dickins 		if (!pte_present(*pte))
23791612e0dSHugh Dickins 			continue;
2386aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
2396aab341eSLinus Torvalds 		if (!page)
24091612e0dSHugh Dickins 			continue;
241053837fcSNick Piggin 		/*
242053837fcSNick Piggin 		 * The check for PageReserved here is important to avoid
243053837fcSNick Piggin 		 * handling zero pages and other pages that may have been
244053837fcSNick Piggin 		 * marked special by the system.
245053837fcSNick Piggin 		 *
246053837fcSNick Piggin 		 * If the PageReserved would not be checked here then f.e.
247053837fcSNick Piggin 		 * the location of the zero page could have an influence
248053837fcSNick Piggin 		 * on MPOL_MF_STRICT, zero pages would be counted for
249053837fcSNick Piggin 		 * the per node stats, and there would be useless attempts
250053837fcSNick Piggin 		 * to put zero pages on the migration list.
251053837fcSNick Piggin 		 */
252f4598c8bSChristoph Lameter 		if (PageReserved(page))
253f4598c8bSChristoph Lameter 			continue;
2546aab341eSLinus Torvalds 		nid = page_to_nid(page);
25538e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
25638e35860SChristoph Lameter 			continue;
25738e35860SChristoph Lameter 
2581a75a6c8SChristoph Lameter 		if (flags & MPOL_MF_STATS)
259397874dfSChristoph Lameter 			gather_stats(page, private, pte_dirty(*pte));
260053837fcSNick Piggin 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
261fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
262dc9aa5b9SChristoph Lameter 		else
2631da177e4SLinus Torvalds 			break;
26491612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
265705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
26691612e0dSHugh Dickins 	return addr != end;
26791612e0dSHugh Dickins }
26891612e0dSHugh Dickins 
269b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
270dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
271dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
27238e35860SChristoph Lameter 		void *private)
27391612e0dSHugh Dickins {
27491612e0dSHugh Dickins 	pmd_t *pmd;
27591612e0dSHugh Dickins 	unsigned long next;
27691612e0dSHugh Dickins 
27791612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
27891612e0dSHugh Dickins 	do {
27991612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
28091612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
28191612e0dSHugh Dickins 			continue;
282dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
28338e35860SChristoph Lameter 				    flags, private))
28491612e0dSHugh Dickins 			return -EIO;
28591612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
28691612e0dSHugh Dickins 	return 0;
28791612e0dSHugh Dickins }
28891612e0dSHugh Dickins 
289b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
290dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
291dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
29238e35860SChristoph Lameter 		void *private)
29391612e0dSHugh Dickins {
29491612e0dSHugh Dickins 	pud_t *pud;
29591612e0dSHugh Dickins 	unsigned long next;
29691612e0dSHugh Dickins 
29791612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
29891612e0dSHugh Dickins 	do {
29991612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
30091612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
30191612e0dSHugh Dickins 			continue;
302dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
30338e35860SChristoph Lameter 				    flags, private))
30491612e0dSHugh Dickins 			return -EIO;
30591612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
30691612e0dSHugh Dickins 	return 0;
30791612e0dSHugh Dickins }
30891612e0dSHugh Dickins 
309b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
310dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
311dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
31238e35860SChristoph Lameter 		void *private)
31391612e0dSHugh Dickins {
31491612e0dSHugh Dickins 	pgd_t *pgd;
31591612e0dSHugh Dickins 	unsigned long next;
31691612e0dSHugh Dickins 
317b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
31891612e0dSHugh Dickins 	do {
31991612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
32091612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
32191612e0dSHugh Dickins 			continue;
322dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
32338e35860SChristoph Lameter 				    flags, private))
32491612e0dSHugh Dickins 			return -EIO;
32591612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
32691612e0dSHugh Dickins 	return 0;
3271da177e4SLinus Torvalds }
3281da177e4SLinus Torvalds 
329dc9aa5b9SChristoph Lameter /*
330dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
331dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
332dc9aa5b9SChristoph Lameter  * put them on the pagelist.
333dc9aa5b9SChristoph Lameter  */
3341da177e4SLinus Torvalds static struct vm_area_struct *
3351da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
33638e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
3371da177e4SLinus Torvalds {
3381da177e4SLinus Torvalds 	int err;
3391da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
3401da177e4SLinus Torvalds 
34190036ee5SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
34290036ee5SChristoph Lameter 
343b20a3503SChristoph Lameter 		err = migrate_prep();
344b20a3503SChristoph Lameter 		if (err)
345b20a3503SChristoph Lameter 			return ERR_PTR(err);
34690036ee5SChristoph Lameter 	}
347053837fcSNick Piggin 
3481da177e4SLinus Torvalds 	first = find_vma(mm, start);
3491da177e4SLinus Torvalds 	if (!first)
3501da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
3511da177e4SLinus Torvalds 	prev = NULL;
3521da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
353dc9aa5b9SChristoph Lameter 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
3541da177e4SLinus Torvalds 			if (!vma->vm_next && vma->vm_end < end)
3551da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
3561da177e4SLinus Torvalds 			if (prev && prev->vm_end < vma->vm_start)
3571da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
358dc9aa5b9SChristoph Lameter 		}
359dc9aa5b9SChristoph Lameter 		if (!is_vm_hugetlb_page(vma) &&
360dc9aa5b9SChristoph Lameter 		    ((flags & MPOL_MF_STRICT) ||
361dc9aa5b9SChristoph Lameter 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
362dc9aa5b9SChristoph Lameter 				vma_migratable(vma)))) {
3635b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
364dc9aa5b9SChristoph Lameter 
3655b952b3cSAndi Kleen 			if (endvma > end)
3665b952b3cSAndi Kleen 				endvma = end;
3675b952b3cSAndi Kleen 			if (vma->vm_start > start)
3685b952b3cSAndi Kleen 				start = vma->vm_start;
369dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
37038e35860SChristoph Lameter 						flags, private);
3711da177e4SLinus Torvalds 			if (err) {
3721da177e4SLinus Torvalds 				first = ERR_PTR(err);
3731da177e4SLinus Torvalds 				break;
3741da177e4SLinus Torvalds 			}
3751da177e4SLinus Torvalds 		}
3761da177e4SLinus Torvalds 		prev = vma;
3771da177e4SLinus Torvalds 	}
3781da177e4SLinus Torvalds 	return first;
3791da177e4SLinus Torvalds }
3801da177e4SLinus Torvalds 
3811da177e4SLinus Torvalds /* Apply policy to a single VMA */
3821da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
3831da177e4SLinus Torvalds {
3841da177e4SLinus Torvalds 	int err = 0;
3851da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
3861da177e4SLinus Torvalds 
387140d5a49SPaul Mundt 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
3881da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
3891da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
3901da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
3911da177e4SLinus Torvalds 
3921da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
3931da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
3941da177e4SLinus Torvalds 	if (!err) {
3951da177e4SLinus Torvalds 		mpol_get(new);
3961da177e4SLinus Torvalds 		vma->vm_policy = new;
3971da177e4SLinus Torvalds 		mpol_free(old);
3981da177e4SLinus Torvalds 	}
3991da177e4SLinus Torvalds 	return err;
4001da177e4SLinus Torvalds }
4011da177e4SLinus Torvalds 
4021da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
4031da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start,
4041da177e4SLinus Torvalds 		       unsigned long end, struct mempolicy *new)
4051da177e4SLinus Torvalds {
4061da177e4SLinus Torvalds 	struct vm_area_struct *next;
4071da177e4SLinus Torvalds 	int err;
4081da177e4SLinus Torvalds 
4091da177e4SLinus Torvalds 	err = 0;
4101da177e4SLinus Torvalds 	for (; vma && vma->vm_start < end; vma = next) {
4111da177e4SLinus Torvalds 		next = vma->vm_next;
4121da177e4SLinus Torvalds 		if (vma->vm_start < start)
4131da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, start, 1);
4141da177e4SLinus Torvalds 		if (!err && vma->vm_end > end)
4151da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, end, 0);
4161da177e4SLinus Torvalds 		if (!err)
4171da177e4SLinus Torvalds 			err = policy_vma(vma, new);
4181da177e4SLinus Torvalds 		if (err)
4191da177e4SLinus Torvalds 			break;
4201da177e4SLinus Torvalds 	}
4211da177e4SLinus Torvalds 	return err;
4221da177e4SLinus Torvalds }
4231da177e4SLinus Torvalds 
4248bccd85fSChristoph Lameter static int contextualize_policy(int mode, nodemask_t *nodes)
4258bccd85fSChristoph Lameter {
4268bccd85fSChristoph Lameter 	if (!nodes)
4278bccd85fSChristoph Lameter 		return 0;
4288bccd85fSChristoph Lameter 
429cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
4305966514dSPaul Jackson 	if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
4315966514dSPaul Jackson 		return -EINVAL;
4328bccd85fSChristoph Lameter 	return mpol_check_policy(mode, nodes);
4338bccd85fSChristoph Lameter }
4348bccd85fSChristoph Lameter 
435c61afb18SPaul Jackson 
436c61afb18SPaul Jackson /*
437c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
438c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
439c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
440c61afb18SPaul Jackson  *
441c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
442c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
443c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
444c61afb18SPaul Jackson  *
445c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
446c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
447c61afb18SPaul Jackson  *
448c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
449c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
450c61afb18SPaul Jackson  * for use within this file.
451c61afb18SPaul Jackson  */
452c61afb18SPaul Jackson 
453c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
454c61afb18SPaul Jackson {
455c61afb18SPaul Jackson 	if (p->mempolicy)
456c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
457c61afb18SPaul Jackson 	else
458c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
459c61afb18SPaul Jackson }
460c61afb18SPaul Jackson 
461c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
462c61afb18SPaul Jackson {
463c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
464c61afb18SPaul Jackson }
465c61afb18SPaul Jackson 
4661da177e4SLinus Torvalds /* Set the process memory policy */
467dbcb0f19SAdrian Bunk static long do_set_mempolicy(int mode, nodemask_t *nodes)
4681da177e4SLinus Torvalds {
4691da177e4SLinus Torvalds 	struct mempolicy *new;
4701da177e4SLinus Torvalds 
4718bccd85fSChristoph Lameter 	if (contextualize_policy(mode, nodes))
4721da177e4SLinus Torvalds 		return -EINVAL;
4738bccd85fSChristoph Lameter 	new = mpol_new(mode, nodes);
4741da177e4SLinus Torvalds 	if (IS_ERR(new))
4751da177e4SLinus Torvalds 		return PTR_ERR(new);
4761da177e4SLinus Torvalds 	mpol_free(current->mempolicy);
4771da177e4SLinus Torvalds 	current->mempolicy = new;
478c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
4791da177e4SLinus Torvalds 	if (new && new->policy == MPOL_INTERLEAVE)
480dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
4811da177e4SLinus Torvalds 	return 0;
4821da177e4SLinus Torvalds }
4831da177e4SLinus Torvalds 
4841da177e4SLinus Torvalds /* Fill a zone bitmap for a policy */
485dfcd3c0dSAndi Kleen static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
4861da177e4SLinus Torvalds {
4871da177e4SLinus Torvalds 	int i;
4881da177e4SLinus Torvalds 
489dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
4901da177e4SLinus Torvalds 	switch (p->policy) {
4911da177e4SLinus Torvalds 	case MPOL_BIND:
4921da177e4SLinus Torvalds 		for (i = 0; p->v.zonelist->zones[i]; i++)
49389fa3024SChristoph Lameter 			node_set(zone_to_nid(p->v.zonelist->zones[i]),
4948bccd85fSChristoph Lameter 				*nodes);
4951da177e4SLinus Torvalds 		break;
4961da177e4SLinus Torvalds 	case MPOL_DEFAULT:
4971da177e4SLinus Torvalds 		break;
4981da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
499dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
5001da177e4SLinus Torvalds 		break;
5011da177e4SLinus Torvalds 	case MPOL_PREFERRED:
50256bbd65dSChristoph Lameter 		/* or use current node instead of memory_map? */
5031da177e4SLinus Torvalds 		if (p->v.preferred_node < 0)
50456bbd65dSChristoph Lameter 			*nodes = node_states[N_HIGH_MEMORY];
5051da177e4SLinus Torvalds 		else
506dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
5071da177e4SLinus Torvalds 		break;
5081da177e4SLinus Torvalds 	default:
5091da177e4SLinus Torvalds 		BUG();
5101da177e4SLinus Torvalds 	}
5111da177e4SLinus Torvalds }
5121da177e4SLinus Torvalds 
5131da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
5141da177e4SLinus Torvalds {
5151da177e4SLinus Torvalds 	struct page *p;
5161da177e4SLinus Torvalds 	int err;
5171da177e4SLinus Torvalds 
5181da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
5191da177e4SLinus Torvalds 	if (err >= 0) {
5201da177e4SLinus Torvalds 		err = page_to_nid(p);
5211da177e4SLinus Torvalds 		put_page(p);
5221da177e4SLinus Torvalds 	}
5231da177e4SLinus Torvalds 	return err;
5241da177e4SLinus Torvalds }
5251da177e4SLinus Torvalds 
5261da177e4SLinus Torvalds /* Retrieve NUMA policy */
527dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
5281da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
5291da177e4SLinus Torvalds {
5308bccd85fSChristoph Lameter 	int err;
5311da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
5321da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
5331da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
5341da177e4SLinus Torvalds 
535cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
536754af6f5SLee Schermerhorn 	if (flags &
537754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
5381da177e4SLinus Torvalds 		return -EINVAL;
539754af6f5SLee Schermerhorn 
540754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
541754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
542754af6f5SLee Schermerhorn 			return -EINVAL;
543754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
544754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
545754af6f5SLee Schermerhorn 		return 0;
546754af6f5SLee Schermerhorn 	}
547754af6f5SLee Schermerhorn 
5481da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
5491da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
5501da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
5511da177e4SLinus Torvalds 		if (!vma) {
5521da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
5531da177e4SLinus Torvalds 			return -EFAULT;
5541da177e4SLinus Torvalds 		}
5551da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
5561da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
5571da177e4SLinus Torvalds 		else
5581da177e4SLinus Torvalds 			pol = vma->vm_policy;
5591da177e4SLinus Torvalds 	} else if (addr)
5601da177e4SLinus Torvalds 		return -EINVAL;
5611da177e4SLinus Torvalds 
5621da177e4SLinus Torvalds 	if (!pol)
5631da177e4SLinus Torvalds 		pol = &default_policy;
5641da177e4SLinus Torvalds 
5651da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
5661da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
5671da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
5681da177e4SLinus Torvalds 			if (err < 0)
5691da177e4SLinus Torvalds 				goto out;
5708bccd85fSChristoph Lameter 			*policy = err;
5711da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
5721da177e4SLinus Torvalds 				pol->policy == MPOL_INTERLEAVE) {
5738bccd85fSChristoph Lameter 			*policy = current->il_next;
5741da177e4SLinus Torvalds 		} else {
5751da177e4SLinus Torvalds 			err = -EINVAL;
5761da177e4SLinus Torvalds 			goto out;
5771da177e4SLinus Torvalds 		}
5781da177e4SLinus Torvalds 	} else
5798bccd85fSChristoph Lameter 		*policy = pol->policy;
5801da177e4SLinus Torvalds 
5811da177e4SLinus Torvalds 	if (vma) {
5821da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5831da177e4SLinus Torvalds 		vma = NULL;
5841da177e4SLinus Torvalds 	}
5851da177e4SLinus Torvalds 
5861da177e4SLinus Torvalds 	err = 0;
5878bccd85fSChristoph Lameter 	if (nmask)
5888bccd85fSChristoph Lameter 		get_zonemask(pol, nmask);
5891da177e4SLinus Torvalds 
5901da177e4SLinus Torvalds  out:
5911da177e4SLinus Torvalds 	if (vma)
5921da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5931da177e4SLinus Torvalds 	return err;
5941da177e4SLinus Torvalds }
5951da177e4SLinus Torvalds 
596b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
5978bccd85fSChristoph Lameter /*
5986ce3c4c0SChristoph Lameter  * page migration
5996ce3c4c0SChristoph Lameter  */
600fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
601fc301289SChristoph Lameter 				unsigned long flags)
6026ce3c4c0SChristoph Lameter {
6036ce3c4c0SChristoph Lameter 	/*
604fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
6056ce3c4c0SChristoph Lameter 	 */
606b20a3503SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
607b20a3503SChristoph Lameter 		isolate_lru_page(page, pagelist);
6086ce3c4c0SChristoph Lameter }
6096ce3c4c0SChristoph Lameter 
610742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
61195a402c3SChristoph Lameter {
612769848c0SMel Gorman 	return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
61395a402c3SChristoph Lameter }
61495a402c3SChristoph Lameter 
6156ce3c4c0SChristoph Lameter /*
6167e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
6177e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
6187e2ab150SChristoph Lameter  */
619dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
620dbcb0f19SAdrian Bunk 			   int flags)
6217e2ab150SChristoph Lameter {
6227e2ab150SChristoph Lameter 	nodemask_t nmask;
6237e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
6247e2ab150SChristoph Lameter 	int err = 0;
6257e2ab150SChristoph Lameter 
6267e2ab150SChristoph Lameter 	nodes_clear(nmask);
6277e2ab150SChristoph Lameter 	node_set(source, nmask);
6287e2ab150SChristoph Lameter 
6297e2ab150SChristoph Lameter 	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
6307e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
6317e2ab150SChristoph Lameter 
6327e2ab150SChristoph Lameter 	if (!list_empty(&pagelist))
63395a402c3SChristoph Lameter 		err = migrate_pages(&pagelist, new_node_page, dest);
63495a402c3SChristoph Lameter 
6357e2ab150SChristoph Lameter 	return err;
6367e2ab150SChristoph Lameter }
6377e2ab150SChristoph Lameter 
6387e2ab150SChristoph Lameter /*
6397e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
6407e2ab150SChristoph Lameter  * layout as much as possible.
64139743889SChristoph Lameter  *
64239743889SChristoph Lameter  * Returns the number of page that could not be moved.
64339743889SChristoph Lameter  */
64439743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
64539743889SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
64639743889SChristoph Lameter {
64739743889SChristoph Lameter 	LIST_HEAD(pagelist);
6487e2ab150SChristoph Lameter 	int busy = 0;
6497e2ab150SChristoph Lameter 	int err = 0;
6507e2ab150SChristoph Lameter 	nodemask_t tmp;
65139743889SChristoph Lameter 
65239743889SChristoph Lameter   	down_read(&mm->mmap_sem);
653d4984711SChristoph Lameter 
6547b2259b3SChristoph Lameter 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
6557b2259b3SChristoph Lameter 	if (err)
6567b2259b3SChristoph Lameter 		goto out;
6577b2259b3SChristoph Lameter 
6587e2ab150SChristoph Lameter /*
6597e2ab150SChristoph Lameter  * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
6607e2ab150SChristoph Lameter  * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
6617e2ab150SChristoph Lameter  * bit in 'tmp', and return that <source, dest> pair for migration.
6627e2ab150SChristoph Lameter  * The pair of nodemasks 'to' and 'from' define the map.
6637e2ab150SChristoph Lameter  *
6647e2ab150SChristoph Lameter  * If no pair of bits is found that way, fallback to picking some
6657e2ab150SChristoph Lameter  * pair of 'source' and 'dest' bits that are not the same.  If the
6667e2ab150SChristoph Lameter  * 'source' and 'dest' bits are the same, this represents a node
6677e2ab150SChristoph Lameter  * that will be migrating to itself, so no pages need move.
6687e2ab150SChristoph Lameter  *
6697e2ab150SChristoph Lameter  * If no bits are left in 'tmp', or if all remaining bits left
6707e2ab150SChristoph Lameter  * in 'tmp' correspond to the same bit in 'to', return false
6717e2ab150SChristoph Lameter  * (nothing left to migrate).
6727e2ab150SChristoph Lameter  *
6737e2ab150SChristoph Lameter  * This lets us pick a pair of nodes to migrate between, such that
6747e2ab150SChristoph Lameter  * if possible the dest node is not already occupied by some other
6757e2ab150SChristoph Lameter  * source node, minimizing the risk of overloading the memory on a
6767e2ab150SChristoph Lameter  * node that would happen if we migrated incoming memory to a node
6777e2ab150SChristoph Lameter  * before migrating outgoing memory source that same node.
6787e2ab150SChristoph Lameter  *
6797e2ab150SChristoph Lameter  * A single scan of tmp is sufficient.  As we go, we remember the
6807e2ab150SChristoph Lameter  * most recent <s, d> pair that moved (s != d).  If we find a pair
6817e2ab150SChristoph Lameter  * that not only moved, but what's better, moved to an empty slot
6827e2ab150SChristoph Lameter  * (d is not set in tmp), then we break out then, with that pair.
6837e2ab150SChristoph Lameter  * Otherwise when we finish scannng from_tmp, we at least have the
6847e2ab150SChristoph Lameter  * most recent <s, d> pair that moved.  If we get all the way through
6857e2ab150SChristoph Lameter  * the scan of tmp without finding any node that moved, much less
6867e2ab150SChristoph Lameter  * moved to an empty node, then there is nothing left worth migrating.
6877e2ab150SChristoph Lameter  */
6887e2ab150SChristoph Lameter 
6897e2ab150SChristoph Lameter 	tmp = *from_nodes;
6907e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
6917e2ab150SChristoph Lameter 		int s,d;
6927e2ab150SChristoph Lameter 		int source = -1;
6937e2ab150SChristoph Lameter 		int dest = 0;
6947e2ab150SChristoph Lameter 
6957e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
6967e2ab150SChristoph Lameter 			d = node_remap(s, *from_nodes, *to_nodes);
6977e2ab150SChristoph Lameter 			if (s == d)
6987e2ab150SChristoph Lameter 				continue;
6997e2ab150SChristoph Lameter 
7007e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
7017e2ab150SChristoph Lameter 			dest = d;
7027e2ab150SChristoph Lameter 
7037e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
7047e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
7057e2ab150SChristoph Lameter 				break;
7067e2ab150SChristoph Lameter 		}
7077e2ab150SChristoph Lameter 		if (source == -1)
7087e2ab150SChristoph Lameter 			break;
7097e2ab150SChristoph Lameter 
7107e2ab150SChristoph Lameter 		node_clear(source, tmp);
7117e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
7127e2ab150SChristoph Lameter 		if (err > 0)
7137e2ab150SChristoph Lameter 			busy += err;
7147e2ab150SChristoph Lameter 		if (err < 0)
7157e2ab150SChristoph Lameter 			break;
71639743889SChristoph Lameter 	}
7177b2259b3SChristoph Lameter out:
71839743889SChristoph Lameter 	up_read(&mm->mmap_sem);
7197e2ab150SChristoph Lameter 	if (err < 0)
7207e2ab150SChristoph Lameter 		return err;
7217e2ab150SChristoph Lameter 	return busy;
722b20a3503SChristoph Lameter 
72339743889SChristoph Lameter }
72439743889SChristoph Lameter 
725742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
72695a402c3SChristoph Lameter {
72795a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
72895a402c3SChristoph Lameter 
729769848c0SMel Gorman 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
730769848c0SMel Gorman 					page_address_in_vma(page, vma));
73195a402c3SChristoph Lameter }
732b20a3503SChristoph Lameter #else
733b20a3503SChristoph Lameter 
734b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
735b20a3503SChristoph Lameter 				unsigned long flags)
736b20a3503SChristoph Lameter {
737b20a3503SChristoph Lameter }
738b20a3503SChristoph Lameter 
739b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
740b20a3503SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
741b20a3503SChristoph Lameter {
742b20a3503SChristoph Lameter 	return -ENOSYS;
743b20a3503SChristoph Lameter }
74495a402c3SChristoph Lameter 
74569939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
74695a402c3SChristoph Lameter {
74795a402c3SChristoph Lameter 	return NULL;
74895a402c3SChristoph Lameter }
749b20a3503SChristoph Lameter #endif
750b20a3503SChristoph Lameter 
751dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
752dbcb0f19SAdrian Bunk 		     unsigned long mode, nodemask_t *nmask,
753dbcb0f19SAdrian Bunk 		     unsigned long flags)
7546ce3c4c0SChristoph Lameter {
7556ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
7566ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
7576ce3c4c0SChristoph Lameter 	struct mempolicy *new;
7586ce3c4c0SChristoph Lameter 	unsigned long end;
7596ce3c4c0SChristoph Lameter 	int err;
7606ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
7616ce3c4c0SChristoph Lameter 
7626ce3c4c0SChristoph Lameter 	if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
7636ce3c4c0SChristoph Lameter 				      MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
7646ce3c4c0SChristoph Lameter 	    || mode > MPOL_MAX)
7656ce3c4c0SChristoph Lameter 		return -EINVAL;
76674c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
7676ce3c4c0SChristoph Lameter 		return -EPERM;
7686ce3c4c0SChristoph Lameter 
7696ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
7706ce3c4c0SChristoph Lameter 		return -EINVAL;
7716ce3c4c0SChristoph Lameter 
7726ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
7736ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
7746ce3c4c0SChristoph Lameter 
7756ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
7766ce3c4c0SChristoph Lameter 	end = start + len;
7776ce3c4c0SChristoph Lameter 
7786ce3c4c0SChristoph Lameter 	if (end < start)
7796ce3c4c0SChristoph Lameter 		return -EINVAL;
7806ce3c4c0SChristoph Lameter 	if (end == start)
7816ce3c4c0SChristoph Lameter 		return 0;
7826ce3c4c0SChristoph Lameter 
7836ce3c4c0SChristoph Lameter 	if (mpol_check_policy(mode, nmask))
7846ce3c4c0SChristoph Lameter 		return -EINVAL;
7856ce3c4c0SChristoph Lameter 
7866ce3c4c0SChristoph Lameter 	new = mpol_new(mode, nmask);
7876ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
7886ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
7896ce3c4c0SChristoph Lameter 
7906ce3c4c0SChristoph Lameter 	/*
7916ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
7926ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
7936ce3c4c0SChristoph Lameter 	 */
7946ce3c4c0SChristoph Lameter 	if (!new)
7956ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
7966ce3c4c0SChristoph Lameter 
797140d5a49SPaul Mundt 	pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
798140d5a49SPaul Mundt 		 mode, nmask ? nodes_addr(*nmask)[0] : -1);
7996ce3c4c0SChristoph Lameter 
8006ce3c4c0SChristoph Lameter 	down_write(&mm->mmap_sem);
8016ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
8026ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
8036ce3c4c0SChristoph Lameter 
8046ce3c4c0SChristoph Lameter 	err = PTR_ERR(vma);
8056ce3c4c0SChristoph Lameter 	if (!IS_ERR(vma)) {
8066ce3c4c0SChristoph Lameter 		int nr_failed = 0;
8076ce3c4c0SChristoph Lameter 
8086ce3c4c0SChristoph Lameter 		err = mbind_range(vma, start, end, new);
8097e2ab150SChristoph Lameter 
8106ce3c4c0SChristoph Lameter 		if (!list_empty(&pagelist))
81195a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
81295a402c3SChristoph Lameter 						(unsigned long)vma);
8136ce3c4c0SChristoph Lameter 
8146ce3c4c0SChristoph Lameter 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
8156ce3c4c0SChristoph Lameter 			err = -EIO;
8166ce3c4c0SChristoph Lameter 	}
817b20a3503SChristoph Lameter 
8186ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
8196ce3c4c0SChristoph Lameter 	mpol_free(new);
8206ce3c4c0SChristoph Lameter 	return err;
8216ce3c4c0SChristoph Lameter }
8226ce3c4c0SChristoph Lameter 
82339743889SChristoph Lameter /*
8248bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
8258bccd85fSChristoph Lameter  */
8268bccd85fSChristoph Lameter 
8278bccd85fSChristoph Lameter /* Copy a node mask from user space. */
82839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8298bccd85fSChristoph Lameter 		     unsigned long maxnode)
8308bccd85fSChristoph Lameter {
8318bccd85fSChristoph Lameter 	unsigned long k;
8328bccd85fSChristoph Lameter 	unsigned long nlongs;
8338bccd85fSChristoph Lameter 	unsigned long endmask;
8348bccd85fSChristoph Lameter 
8358bccd85fSChristoph Lameter 	--maxnode;
8368bccd85fSChristoph Lameter 	nodes_clear(*nodes);
8378bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
8388bccd85fSChristoph Lameter 		return 0;
839a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
840636f13c1SChris Wright 		return -EINVAL;
8418bccd85fSChristoph Lameter 
8428bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
8438bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
8448bccd85fSChristoph Lameter 		endmask = ~0UL;
8458bccd85fSChristoph Lameter 	else
8468bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
8478bccd85fSChristoph Lameter 
8488bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
8498bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
8508bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8518bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
8528bccd85fSChristoph Lameter 			return -EINVAL;
8538bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8548bccd85fSChristoph Lameter 			unsigned long t;
8558bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
8568bccd85fSChristoph Lameter 				return -EFAULT;
8578bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
8588bccd85fSChristoph Lameter 				if (t & endmask)
8598bccd85fSChristoph Lameter 					return -EINVAL;
8608bccd85fSChristoph Lameter 			} else if (t)
8618bccd85fSChristoph Lameter 				return -EINVAL;
8628bccd85fSChristoph Lameter 		}
8638bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
8648bccd85fSChristoph Lameter 		endmask = ~0UL;
8658bccd85fSChristoph Lameter 	}
8668bccd85fSChristoph Lameter 
8678bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
8688bccd85fSChristoph Lameter 		return -EFAULT;
8698bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
8708bccd85fSChristoph Lameter 	return 0;
8718bccd85fSChristoph Lameter }
8728bccd85fSChristoph Lameter 
8738bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
8748bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
8758bccd85fSChristoph Lameter 			      nodemask_t *nodes)
8768bccd85fSChristoph Lameter {
8778bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
8788bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
8798bccd85fSChristoph Lameter 
8808bccd85fSChristoph Lameter 	if (copy > nbytes) {
8818bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
8828bccd85fSChristoph Lameter 			return -EINVAL;
8838bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
8848bccd85fSChristoph Lameter 			return -EFAULT;
8858bccd85fSChristoph Lameter 		copy = nbytes;
8868bccd85fSChristoph Lameter 	}
8878bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
8888bccd85fSChristoph Lameter }
8898bccd85fSChristoph Lameter 
8908bccd85fSChristoph Lameter asmlinkage long sys_mbind(unsigned long start, unsigned long len,
8918bccd85fSChristoph Lameter 			unsigned long mode,
8928bccd85fSChristoph Lameter 			unsigned long __user *nmask, unsigned long maxnode,
8938bccd85fSChristoph Lameter 			unsigned flags)
8948bccd85fSChristoph Lameter {
8958bccd85fSChristoph Lameter 	nodemask_t nodes;
8968bccd85fSChristoph Lameter 	int err;
8978bccd85fSChristoph Lameter 
8988bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
8998bccd85fSChristoph Lameter 	if (err)
9008bccd85fSChristoph Lameter 		return err;
90130150f8dSChristoph Lameter #ifdef CONFIG_CPUSETS
90230150f8dSChristoph Lameter 	/* Restrict the nodes to the allowed nodes in the cpuset */
90330150f8dSChristoph Lameter 	nodes_and(nodes, nodes, current->mems_allowed);
90430150f8dSChristoph Lameter #endif
9058bccd85fSChristoph Lameter 	return do_mbind(start, len, mode, &nodes, flags);
9068bccd85fSChristoph Lameter }
9078bccd85fSChristoph Lameter 
9088bccd85fSChristoph Lameter /* Set the process memory policy */
9098bccd85fSChristoph Lameter asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
9108bccd85fSChristoph Lameter 		unsigned long maxnode)
9118bccd85fSChristoph Lameter {
9128bccd85fSChristoph Lameter 	int err;
9138bccd85fSChristoph Lameter 	nodemask_t nodes;
9148bccd85fSChristoph Lameter 
9158bccd85fSChristoph Lameter 	if (mode < 0 || mode > MPOL_MAX)
9168bccd85fSChristoph Lameter 		return -EINVAL;
9178bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
9188bccd85fSChristoph Lameter 	if (err)
9198bccd85fSChristoph Lameter 		return err;
9208bccd85fSChristoph Lameter 	return do_set_mempolicy(mode, &nodes);
9218bccd85fSChristoph Lameter }
9228bccd85fSChristoph Lameter 
92339743889SChristoph Lameter asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
92439743889SChristoph Lameter 		const unsigned long __user *old_nodes,
92539743889SChristoph Lameter 		const unsigned long __user *new_nodes)
92639743889SChristoph Lameter {
92739743889SChristoph Lameter 	struct mm_struct *mm;
92839743889SChristoph Lameter 	struct task_struct *task;
92939743889SChristoph Lameter 	nodemask_t old;
93039743889SChristoph Lameter 	nodemask_t new;
93139743889SChristoph Lameter 	nodemask_t task_nodes;
93239743889SChristoph Lameter 	int err;
93339743889SChristoph Lameter 
93439743889SChristoph Lameter 	err = get_nodes(&old, old_nodes, maxnode);
93539743889SChristoph Lameter 	if (err)
93639743889SChristoph Lameter 		return err;
93739743889SChristoph Lameter 
93839743889SChristoph Lameter 	err = get_nodes(&new, new_nodes, maxnode);
93939743889SChristoph Lameter 	if (err)
94039743889SChristoph Lameter 		return err;
94139743889SChristoph Lameter 
94239743889SChristoph Lameter 	/* Find the mm_struct */
94339743889SChristoph Lameter 	read_lock(&tasklist_lock);
944*b488893aSPavel Emelyanov 	task = pid ?
945*b488893aSPavel Emelyanov 		find_task_by_pid_ns(pid, current->nsproxy->pid_ns) : current;
94639743889SChristoph Lameter 	if (!task) {
94739743889SChristoph Lameter 		read_unlock(&tasklist_lock);
94839743889SChristoph Lameter 		return -ESRCH;
94939743889SChristoph Lameter 	}
95039743889SChristoph Lameter 	mm = get_task_mm(task);
95139743889SChristoph Lameter 	read_unlock(&tasklist_lock);
95239743889SChristoph Lameter 
95339743889SChristoph Lameter 	if (!mm)
95439743889SChristoph Lameter 		return -EINVAL;
95539743889SChristoph Lameter 
95639743889SChristoph Lameter 	/*
95739743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
95839743889SChristoph Lameter 	 * process. The right exists if the process has administrative
9597f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
96039743889SChristoph Lameter 	 * userid as the target process.
96139743889SChristoph Lameter 	 */
96239743889SChristoph Lameter 	if ((current->euid != task->suid) && (current->euid != task->uid) &&
96339743889SChristoph Lameter 	    (current->uid != task->suid) && (current->uid != task->uid) &&
96474c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
96539743889SChristoph Lameter 		err = -EPERM;
96639743889SChristoph Lameter 		goto out;
96739743889SChristoph Lameter 	}
96839743889SChristoph Lameter 
96939743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
97039743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
97174c00241SChristoph Lameter 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
97239743889SChristoph Lameter 		err = -EPERM;
97339743889SChristoph Lameter 		goto out;
97439743889SChristoph Lameter 	}
97539743889SChristoph Lameter 
97637b07e41SLee Schermerhorn 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
9773b42d28bSChristoph Lameter 		err = -EINVAL;
9783b42d28bSChristoph Lameter 		goto out;
9793b42d28bSChristoph Lameter 	}
9803b42d28bSChristoph Lameter 
98186c3a764SDavid Quigley 	err = security_task_movememory(task);
98286c3a764SDavid Quigley 	if (err)
98386c3a764SDavid Quigley 		goto out;
98486c3a764SDavid Quigley 
985511030bcSChristoph Lameter 	err = do_migrate_pages(mm, &old, &new,
98674c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
98739743889SChristoph Lameter out:
98839743889SChristoph Lameter 	mmput(mm);
98939743889SChristoph Lameter 	return err;
99039743889SChristoph Lameter }
99139743889SChristoph Lameter 
99239743889SChristoph Lameter 
9938bccd85fSChristoph Lameter /* Retrieve NUMA policy */
9948bccd85fSChristoph Lameter asmlinkage long sys_get_mempolicy(int __user *policy,
9958bccd85fSChristoph Lameter 				unsigned long __user *nmask,
9968bccd85fSChristoph Lameter 				unsigned long maxnode,
9978bccd85fSChristoph Lameter 				unsigned long addr, unsigned long flags)
9988bccd85fSChristoph Lameter {
999dbcb0f19SAdrian Bunk 	int err;
1000dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
10018bccd85fSChristoph Lameter 	nodemask_t nodes;
10028bccd85fSChristoph Lameter 
10038bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
10048bccd85fSChristoph Lameter 		return -EINVAL;
10058bccd85fSChristoph Lameter 
10068bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
10078bccd85fSChristoph Lameter 
10088bccd85fSChristoph Lameter 	if (err)
10098bccd85fSChristoph Lameter 		return err;
10108bccd85fSChristoph Lameter 
10118bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
10128bccd85fSChristoph Lameter 		return -EFAULT;
10138bccd85fSChristoph Lameter 
10148bccd85fSChristoph Lameter 	if (nmask)
10158bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
10168bccd85fSChristoph Lameter 
10178bccd85fSChristoph Lameter 	return err;
10188bccd85fSChristoph Lameter }
10198bccd85fSChristoph Lameter 
10201da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
10211da177e4SLinus Torvalds 
10221da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
10231da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
10241da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
10251da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
10261da177e4SLinus Torvalds {
10271da177e4SLinus Torvalds 	long err;
10281da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10291da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10301da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10311da177e4SLinus Torvalds 
10321da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10331da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10341da177e4SLinus Torvalds 
10351da177e4SLinus Torvalds 	if (nmask)
10361da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10371da177e4SLinus Torvalds 
10381da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
10391da177e4SLinus Torvalds 
10401da177e4SLinus Torvalds 	if (!err && nmask) {
10411da177e4SLinus Torvalds 		err = copy_from_user(bm, nm, alloc_size);
10421da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
10431da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
10441da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
10451da177e4SLinus Torvalds 	}
10461da177e4SLinus Torvalds 
10471da177e4SLinus Torvalds 	return err;
10481da177e4SLinus Torvalds }
10491da177e4SLinus Torvalds 
10501da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
10511da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
10521da177e4SLinus Torvalds {
10531da177e4SLinus Torvalds 	long err = 0;
10541da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10551da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10561da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10571da177e4SLinus Torvalds 
10581da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10591da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10601da177e4SLinus Torvalds 
10611da177e4SLinus Torvalds 	if (nmask) {
10621da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
10631da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10641da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
10651da177e4SLinus Torvalds 	}
10661da177e4SLinus Torvalds 
10671da177e4SLinus Torvalds 	if (err)
10681da177e4SLinus Torvalds 		return -EFAULT;
10691da177e4SLinus Torvalds 
10701da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
10711da177e4SLinus Torvalds }
10721da177e4SLinus Torvalds 
10731da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
10741da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
10751da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
10761da177e4SLinus Torvalds {
10771da177e4SLinus Torvalds 	long err = 0;
10781da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10791da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1080dfcd3c0dSAndi Kleen 	nodemask_t bm;
10811da177e4SLinus Torvalds 
10821da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10831da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10841da177e4SLinus Torvalds 
10851da177e4SLinus Torvalds 	if (nmask) {
1086dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
10871da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1088dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
10891da177e4SLinus Torvalds 	}
10901da177e4SLinus Torvalds 
10911da177e4SLinus Torvalds 	if (err)
10921da177e4SLinus Torvalds 		return -EFAULT;
10931da177e4SLinus Torvalds 
10941da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
10951da177e4SLinus Torvalds }
10961da177e4SLinus Torvalds 
10971da177e4SLinus Torvalds #endif
10981da177e4SLinus Torvalds 
1099480eccf9SLee Schermerhorn /*
1100480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1101480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1102480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1103480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1104480eccf9SLee Schermerhorn  *
1105480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1106480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
1107480eccf9SLee Schermerhorn  * Returned policy has extra reference count if shared, vma,
1108480eccf9SLee Schermerhorn  * or some other task's policy [show_numa_maps() can pass
1109480eccf9SLee Schermerhorn  * @task != current].  It is the caller's responsibility to
1110480eccf9SLee Schermerhorn  * free the reference in these cases.
1111480eccf9SLee Schermerhorn  */
111248fce342SChristoph Lameter static struct mempolicy * get_vma_policy(struct task_struct *task,
111348fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
11141da177e4SLinus Torvalds {
11156e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
1116480eccf9SLee Schermerhorn 	int shared_pol = 0;
11171da177e4SLinus Torvalds 
11181da177e4SLinus Torvalds 	if (vma) {
1119480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
11201da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
1121480eccf9SLee Schermerhorn 			shared_pol = 1;	/* if pol non-NULL, add ref below */
1122480eccf9SLee Schermerhorn 		} else if (vma->vm_policy &&
11231da177e4SLinus Torvalds 				vma->vm_policy->policy != MPOL_DEFAULT)
11241da177e4SLinus Torvalds 			pol = vma->vm_policy;
11251da177e4SLinus Torvalds 	}
11261da177e4SLinus Torvalds 	if (!pol)
11271da177e4SLinus Torvalds 		pol = &default_policy;
1128480eccf9SLee Schermerhorn 	else if (!shared_pol && pol != current->mempolicy)
1129480eccf9SLee Schermerhorn 		mpol_get(pol);	/* vma or other task's policy */
11301da177e4SLinus Torvalds 	return pol;
11311da177e4SLinus Torvalds }
11321da177e4SLinus Torvalds 
11331da177e4SLinus Torvalds /* Return a zonelist representing a mempolicy */
1134dd0fc66fSAl Viro static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
11351da177e4SLinus Torvalds {
11361da177e4SLinus Torvalds 	int nd;
11371da177e4SLinus Torvalds 
11381da177e4SLinus Torvalds 	switch (policy->policy) {
11391da177e4SLinus Torvalds 	case MPOL_PREFERRED:
11401da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
11411da177e4SLinus Torvalds 		if (nd < 0)
11421da177e4SLinus Torvalds 			nd = numa_node_id();
11431da177e4SLinus Torvalds 		break;
11441da177e4SLinus Torvalds 	case MPOL_BIND:
11451da177e4SLinus Torvalds 		/* Lower zones don't get a policy applied */
11461da177e4SLinus Torvalds 		/* Careful: current->mems_allowed might have moved */
114719655d34SChristoph Lameter 		if (gfp_zone(gfp) >= policy_zone)
11481da177e4SLinus Torvalds 			if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
11491da177e4SLinus Torvalds 				return policy->v.zonelist;
11501da177e4SLinus Torvalds 		/*FALL THROUGH*/
11511da177e4SLinus Torvalds 	case MPOL_INTERLEAVE: /* should not happen */
11521da177e4SLinus Torvalds 	case MPOL_DEFAULT:
11531da177e4SLinus Torvalds 		nd = numa_node_id();
11541da177e4SLinus Torvalds 		break;
11551da177e4SLinus Torvalds 	default:
11561da177e4SLinus Torvalds 		nd = 0;
11571da177e4SLinus Torvalds 		BUG();
11581da177e4SLinus Torvalds 	}
1159af4ca457SAl Viro 	return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
11601da177e4SLinus Torvalds }
11611da177e4SLinus Torvalds 
11621da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
11631da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
11641da177e4SLinus Torvalds {
11651da177e4SLinus Torvalds 	unsigned nid, next;
11661da177e4SLinus Torvalds 	struct task_struct *me = current;
11671da177e4SLinus Torvalds 
11681da177e4SLinus Torvalds 	nid = me->il_next;
1169dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
11701da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1171dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
11721da177e4SLinus Torvalds 	me->il_next = next;
11731da177e4SLinus Torvalds 	return nid;
11741da177e4SLinus Torvalds }
11751da177e4SLinus Torvalds 
1176dc85da15SChristoph Lameter /*
1177dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1178dc85da15SChristoph Lameter  * next slab entry.
1179dc85da15SChristoph Lameter  */
1180dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy)
1181dc85da15SChristoph Lameter {
1182765c4507SChristoph Lameter 	int pol = policy ? policy->policy : MPOL_DEFAULT;
1183765c4507SChristoph Lameter 
1184765c4507SChristoph Lameter 	switch (pol) {
1185dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1186dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1187dc85da15SChristoph Lameter 
1188dc85da15SChristoph Lameter 	case MPOL_BIND:
1189dc85da15SChristoph Lameter 		/*
1190dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1191dc85da15SChristoph Lameter 		 * first node.
1192dc85da15SChristoph Lameter 		 */
119389fa3024SChristoph Lameter 		return zone_to_nid(policy->v.zonelist->zones[0]);
1194dc85da15SChristoph Lameter 
1195dc85da15SChristoph Lameter 	case MPOL_PREFERRED:
1196dc85da15SChristoph Lameter 		if (policy->v.preferred_node >= 0)
1197dc85da15SChristoph Lameter 			return policy->v.preferred_node;
1198dc85da15SChristoph Lameter 		/* Fall through */
1199dc85da15SChristoph Lameter 
1200dc85da15SChristoph Lameter 	default:
1201dc85da15SChristoph Lameter 		return numa_node_id();
1202dc85da15SChristoph Lameter 	}
1203dc85da15SChristoph Lameter }
1204dc85da15SChristoph Lameter 
12051da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
12061da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
12071da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
12081da177e4SLinus Torvalds {
1209dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
12101da177e4SLinus Torvalds 	unsigned target = (unsigned)off % nnodes;
12111da177e4SLinus Torvalds 	int c;
12121da177e4SLinus Torvalds 	int nid = -1;
12131da177e4SLinus Torvalds 
12141da177e4SLinus Torvalds 	c = 0;
12151da177e4SLinus Torvalds 	do {
1216dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
12171da177e4SLinus Torvalds 		c++;
12181da177e4SLinus Torvalds 	} while (c <= target);
12191da177e4SLinus Torvalds 	return nid;
12201da177e4SLinus Torvalds }
12211da177e4SLinus Torvalds 
12225da7ca86SChristoph Lameter /* Determine a node number for interleave */
12235da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
12245da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
12255da7ca86SChristoph Lameter {
12265da7ca86SChristoph Lameter 	if (vma) {
12275da7ca86SChristoph Lameter 		unsigned long off;
12285da7ca86SChristoph Lameter 
12293b98b087SNishanth Aravamudan 		/*
12303b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
12313b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
12323b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
12333b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
12343b98b087SNishanth Aravamudan 		 * a useful offset.
12353b98b087SNishanth Aravamudan 		 */
12363b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
12373b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
12385da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
12395da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
12405da7ca86SChristoph Lameter 	} else
12415da7ca86SChristoph Lameter 		return interleave_nodes(pol);
12425da7ca86SChristoph Lameter }
12435da7ca86SChristoph Lameter 
124400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1245480eccf9SLee Schermerhorn /*
1246480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1247480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1248480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1249480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
1250480eccf9SLee Schermerhorn  * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
1251480eccf9SLee Schermerhorn  *
1252480eccf9SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation.
1253480eccf9SLee Schermerhorn  * If the effective policy is 'BIND, returns pointer to policy's zonelist.
1254480eccf9SLee Schermerhorn  * If it is also a policy for which get_vma_policy() returns an extra
1255480eccf9SLee Schermerhorn  * reference, we must hold that reference until after allocation.
1256480eccf9SLee Schermerhorn  * In that case, return policy via @mpol so hugetlb allocation can drop
1257480eccf9SLee Schermerhorn  * the reference.  For non-'BIND referenced policies, we can/do drop the
1258480eccf9SLee Schermerhorn  * reference here, so the caller doesn't need to know about the special case
1259480eccf9SLee Schermerhorn  * for default and current task policy.
1260480eccf9SLee Schermerhorn  */
1261396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1262480eccf9SLee Schermerhorn 				gfp_t gfp_flags, struct mempolicy **mpol)
12635da7ca86SChristoph Lameter {
12645da7ca86SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1265480eccf9SLee Schermerhorn 	struct zonelist *zl;
12665da7ca86SChristoph Lameter 
1267480eccf9SLee Schermerhorn 	*mpol = NULL;		/* probably no unref needed */
12685da7ca86SChristoph Lameter 	if (pol->policy == MPOL_INTERLEAVE) {
12695da7ca86SChristoph Lameter 		unsigned nid;
12705da7ca86SChristoph Lameter 
12715da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1272480eccf9SLee Schermerhorn 		__mpol_free(pol);		/* finished with pol */
1273396faf03SMel Gorman 		return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
12745da7ca86SChristoph Lameter 	}
1275480eccf9SLee Schermerhorn 
1276480eccf9SLee Schermerhorn 	zl = zonelist_policy(GFP_HIGHUSER, pol);
1277480eccf9SLee Schermerhorn 	if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1278480eccf9SLee Schermerhorn 		if (pol->policy != MPOL_BIND)
1279480eccf9SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
1280480eccf9SLee Schermerhorn 		else
1281480eccf9SLee Schermerhorn 			*mpol = pol;	/* unref needed after allocation */
1282480eccf9SLee Schermerhorn 	}
1283480eccf9SLee Schermerhorn 	return zl;
12845da7ca86SChristoph Lameter }
128500ac59adSChen, Kenneth W #endif
12865da7ca86SChristoph Lameter 
12871da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
12881da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1289662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1290662f3a0bSAndi Kleen 					unsigned nid)
12911da177e4SLinus Torvalds {
12921da177e4SLinus Torvalds 	struct zonelist *zl;
12931da177e4SLinus Torvalds 	struct page *page;
12941da177e4SLinus Torvalds 
1295af4ca457SAl Viro 	zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
12961da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1297ca889e6cSChristoph Lameter 	if (page && page_zone(page) == zl->zones[0])
1298ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
12991da177e4SLinus Torvalds 	return page;
13001da177e4SLinus Torvalds }
13011da177e4SLinus Torvalds 
13021da177e4SLinus Torvalds /**
13031da177e4SLinus Torvalds  * 	alloc_page_vma	- Allocate a page for a VMA.
13041da177e4SLinus Torvalds  *
13051da177e4SLinus Torvalds  * 	@gfp:
13061da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
13071da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
13081da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
13091da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
13101da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
13111da177e4SLinus Torvalds  *
13121da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
13131da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
13141da177e4SLinus Torvalds  *
13151da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
13161da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
13171da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
13181da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
13191da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
13201da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
13211da177e4SLinus Torvalds  *
13221da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
13231da177e4SLinus Torvalds  */
13241da177e4SLinus Torvalds struct page *
1325dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
13261da177e4SLinus Torvalds {
13276e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1328480eccf9SLee Schermerhorn 	struct zonelist *zl;
13291da177e4SLinus Torvalds 
1330cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
13311da177e4SLinus Torvalds 
13321da177e4SLinus Torvalds 	if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
13331da177e4SLinus Torvalds 		unsigned nid;
13345da7ca86SChristoph Lameter 
13355da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
13361da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, 0, nid);
13371da177e4SLinus Torvalds 	}
1338480eccf9SLee Schermerhorn 	zl = zonelist_policy(gfp, pol);
1339480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy) {
1340480eccf9SLee Schermerhorn 		/*
1341480eccf9SLee Schermerhorn 		 * slow path: ref counted policy -- shared or vma
1342480eccf9SLee Schermerhorn 		 */
1343480eccf9SLee Schermerhorn 		struct page *page =  __alloc_pages(gfp, 0, zl);
1344480eccf9SLee Schermerhorn 		__mpol_free(pol);
1345480eccf9SLee Schermerhorn 		return page;
1346480eccf9SLee Schermerhorn 	}
1347480eccf9SLee Schermerhorn 	/*
1348480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
1349480eccf9SLee Schermerhorn 	 */
1350480eccf9SLee Schermerhorn 	return __alloc_pages(gfp, 0, zl);
13511da177e4SLinus Torvalds }
13521da177e4SLinus Torvalds 
13531da177e4SLinus Torvalds /**
13541da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
13551da177e4SLinus Torvalds  *
13561da177e4SLinus Torvalds  *	@gfp:
13571da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
13581da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
13591da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
13601da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
13611da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
13621da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
13631da177e4SLinus Torvalds  *
13641da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
13651da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
13661da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
13671da177e4SLinus Torvalds  *
1368cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
13691da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
13701da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
13711da177e4SLinus Torvalds  */
1372dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
13731da177e4SLinus Torvalds {
13741da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
13751da177e4SLinus Torvalds 
13761da177e4SLinus Torvalds 	if ((gfp & __GFP_WAIT) && !in_interrupt())
1377cf2a473cSPaul Jackson 		cpuset_update_task_memory_state();
13789b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
13791da177e4SLinus Torvalds 		pol = &default_policy;
13801da177e4SLinus Torvalds 	if (pol->policy == MPOL_INTERLEAVE)
13811da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
13821da177e4SLinus Torvalds 	return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
13831da177e4SLinus Torvalds }
13841da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
13851da177e4SLinus Torvalds 
13864225399aSPaul Jackson /*
13874225399aSPaul Jackson  * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
13884225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
13894225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
13904225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
13914225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
13924225399aSPaul Jackson  */
13934225399aSPaul Jackson 
13941da177e4SLinus Torvalds /* Slow path of a mempolicy copy */
13951da177e4SLinus Torvalds struct mempolicy *__mpol_copy(struct mempolicy *old)
13961da177e4SLinus Torvalds {
13971da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
13981da177e4SLinus Torvalds 
13991da177e4SLinus Torvalds 	if (!new)
14001da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
14014225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
14024225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
14034225399aSPaul Jackson 		mpol_rebind_policy(old, &mems);
14044225399aSPaul Jackson 	}
14051da177e4SLinus Torvalds 	*new = *old;
14061da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
14071da177e4SLinus Torvalds 	if (new->policy == MPOL_BIND) {
14081da177e4SLinus Torvalds 		int sz = ksize(old->v.zonelist);
1409e94b1766SChristoph Lameter 		new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
14101da177e4SLinus Torvalds 		if (!new->v.zonelist) {
14111da177e4SLinus Torvalds 			kmem_cache_free(policy_cache, new);
14121da177e4SLinus Torvalds 			return ERR_PTR(-ENOMEM);
14131da177e4SLinus Torvalds 		}
14141da177e4SLinus Torvalds 	}
14151da177e4SLinus Torvalds 	return new;
14161da177e4SLinus Torvalds }
14171da177e4SLinus Torvalds 
14181da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
14191da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
14201da177e4SLinus Torvalds {
14211da177e4SLinus Torvalds 	if (!a || !b)
14221da177e4SLinus Torvalds 		return 0;
14231da177e4SLinus Torvalds 	if (a->policy != b->policy)
14241da177e4SLinus Torvalds 		return 0;
14251da177e4SLinus Torvalds 	switch (a->policy) {
14261da177e4SLinus Torvalds 	case MPOL_DEFAULT:
14271da177e4SLinus Torvalds 		return 1;
14281da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1429dfcd3c0dSAndi Kleen 		return nodes_equal(a->v.nodes, b->v.nodes);
14301da177e4SLinus Torvalds 	case MPOL_PREFERRED:
14311da177e4SLinus Torvalds 		return a->v.preferred_node == b->v.preferred_node;
14321da177e4SLinus Torvalds 	case MPOL_BIND: {
14331da177e4SLinus Torvalds 		int i;
14341da177e4SLinus Torvalds 		for (i = 0; a->v.zonelist->zones[i]; i++)
14351da177e4SLinus Torvalds 			if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
14361da177e4SLinus Torvalds 				return 0;
14371da177e4SLinus Torvalds 		return b->v.zonelist->zones[i] == NULL;
14381da177e4SLinus Torvalds 	}
14391da177e4SLinus Torvalds 	default:
14401da177e4SLinus Torvalds 		BUG();
14411da177e4SLinus Torvalds 		return 0;
14421da177e4SLinus Torvalds 	}
14431da177e4SLinus Torvalds }
14441da177e4SLinus Torvalds 
14451da177e4SLinus Torvalds /* Slow path of a mpol destructor. */
14461da177e4SLinus Torvalds void __mpol_free(struct mempolicy *p)
14471da177e4SLinus Torvalds {
14481da177e4SLinus Torvalds 	if (!atomic_dec_and_test(&p->refcnt))
14491da177e4SLinus Torvalds 		return;
14501da177e4SLinus Torvalds 	if (p->policy == MPOL_BIND)
14511da177e4SLinus Torvalds 		kfree(p->v.zonelist);
14521da177e4SLinus Torvalds 	p->policy = MPOL_DEFAULT;
14531da177e4SLinus Torvalds 	kmem_cache_free(policy_cache, p);
14541da177e4SLinus Torvalds }
14551da177e4SLinus Torvalds 
14561da177e4SLinus Torvalds /*
14571da177e4SLinus Torvalds  * Shared memory backing store policy support.
14581da177e4SLinus Torvalds  *
14591da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
14601da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
14611da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
14621da177e4SLinus Torvalds  * for any accesses to the tree.
14631da177e4SLinus Torvalds  */
14641da177e4SLinus Torvalds 
14651da177e4SLinus Torvalds /* lookup first element intersecting start-end */
14661da177e4SLinus Torvalds /* Caller holds sp->lock */
14671da177e4SLinus Torvalds static struct sp_node *
14681da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
14691da177e4SLinus Torvalds {
14701da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
14711da177e4SLinus Torvalds 
14721da177e4SLinus Torvalds 	while (n) {
14731da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
14741da177e4SLinus Torvalds 
14751da177e4SLinus Torvalds 		if (start >= p->end)
14761da177e4SLinus Torvalds 			n = n->rb_right;
14771da177e4SLinus Torvalds 		else if (end <= p->start)
14781da177e4SLinus Torvalds 			n = n->rb_left;
14791da177e4SLinus Torvalds 		else
14801da177e4SLinus Torvalds 			break;
14811da177e4SLinus Torvalds 	}
14821da177e4SLinus Torvalds 	if (!n)
14831da177e4SLinus Torvalds 		return NULL;
14841da177e4SLinus Torvalds 	for (;;) {
14851da177e4SLinus Torvalds 		struct sp_node *w = NULL;
14861da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
14871da177e4SLinus Torvalds 		if (!prev)
14881da177e4SLinus Torvalds 			break;
14891da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
14901da177e4SLinus Torvalds 		if (w->end <= start)
14911da177e4SLinus Torvalds 			break;
14921da177e4SLinus Torvalds 		n = prev;
14931da177e4SLinus Torvalds 	}
14941da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
14951da177e4SLinus Torvalds }
14961da177e4SLinus Torvalds 
14971da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
14981da177e4SLinus Torvalds /* Caller holds sp->lock */
14991da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
15001da177e4SLinus Torvalds {
15011da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
15021da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
15031da177e4SLinus Torvalds 	struct sp_node *nd;
15041da177e4SLinus Torvalds 
15051da177e4SLinus Torvalds 	while (*p) {
15061da177e4SLinus Torvalds 		parent = *p;
15071da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
15081da177e4SLinus Torvalds 		if (new->start < nd->start)
15091da177e4SLinus Torvalds 			p = &(*p)->rb_left;
15101da177e4SLinus Torvalds 		else if (new->end > nd->end)
15111da177e4SLinus Torvalds 			p = &(*p)->rb_right;
15121da177e4SLinus Torvalds 		else
15131da177e4SLinus Torvalds 			BUG();
15141da177e4SLinus Torvalds 	}
15151da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
15161da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
1517140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
15181da177e4SLinus Torvalds 		 new->policy ? new->policy->policy : 0);
15191da177e4SLinus Torvalds }
15201da177e4SLinus Torvalds 
15211da177e4SLinus Torvalds /* Find shared policy intersecting idx */
15221da177e4SLinus Torvalds struct mempolicy *
15231da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
15241da177e4SLinus Torvalds {
15251da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
15261da177e4SLinus Torvalds 	struct sp_node *sn;
15271da177e4SLinus Torvalds 
15281da177e4SLinus Torvalds 	if (!sp->root.rb_node)
15291da177e4SLinus Torvalds 		return NULL;
15301da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15311da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
15321da177e4SLinus Torvalds 	if (sn) {
15331da177e4SLinus Torvalds 		mpol_get(sn->policy);
15341da177e4SLinus Torvalds 		pol = sn->policy;
15351da177e4SLinus Torvalds 	}
15361da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
15371da177e4SLinus Torvalds 	return pol;
15381da177e4SLinus Torvalds }
15391da177e4SLinus Torvalds 
15401da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
15411da177e4SLinus Torvalds {
1542140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
15431da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
15441da177e4SLinus Torvalds 	mpol_free(n->policy);
15451da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
15461da177e4SLinus Torvalds }
15471da177e4SLinus Torvalds 
1548dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1549dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
15501da177e4SLinus Torvalds {
15511da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
15521da177e4SLinus Torvalds 
15531da177e4SLinus Torvalds 	if (!n)
15541da177e4SLinus Torvalds 		return NULL;
15551da177e4SLinus Torvalds 	n->start = start;
15561da177e4SLinus Torvalds 	n->end = end;
15571da177e4SLinus Torvalds 	mpol_get(pol);
15581da177e4SLinus Torvalds 	n->policy = pol;
15591da177e4SLinus Torvalds 	return n;
15601da177e4SLinus Torvalds }
15611da177e4SLinus Torvalds 
15621da177e4SLinus Torvalds /* Replace a policy range. */
15631da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
15641da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
15651da177e4SLinus Torvalds {
15661da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
15671da177e4SLinus Torvalds 
15681da177e4SLinus Torvalds restart:
15691da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15701da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
15711da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
15721da177e4SLinus Torvalds 	while (n && n->start < end) {
15731da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
15741da177e4SLinus Torvalds 		if (n->start >= start) {
15751da177e4SLinus Torvalds 			if (n->end <= end)
15761da177e4SLinus Torvalds 				sp_delete(sp, n);
15771da177e4SLinus Torvalds 			else
15781da177e4SLinus Torvalds 				n->start = end;
15791da177e4SLinus Torvalds 		} else {
15801da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
15811da177e4SLinus Torvalds 			if (n->end > end) {
15821da177e4SLinus Torvalds 				if (!new2) {
15831da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
15841da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
15851da177e4SLinus Torvalds 					if (!new2)
15861da177e4SLinus Torvalds 						return -ENOMEM;
15871da177e4SLinus Torvalds 					goto restart;
15881da177e4SLinus Torvalds 				}
15891da177e4SLinus Torvalds 				n->end = start;
15901da177e4SLinus Torvalds 				sp_insert(sp, new2);
15911da177e4SLinus Torvalds 				new2 = NULL;
15921da177e4SLinus Torvalds 				break;
15931da177e4SLinus Torvalds 			} else
15941da177e4SLinus Torvalds 				n->end = start;
15951da177e4SLinus Torvalds 		}
15961da177e4SLinus Torvalds 		if (!next)
15971da177e4SLinus Torvalds 			break;
15981da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
15991da177e4SLinus Torvalds 	}
16001da177e4SLinus Torvalds 	if (new)
16011da177e4SLinus Torvalds 		sp_insert(sp, new);
16021da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
16031da177e4SLinus Torvalds 	if (new2) {
16041da177e4SLinus Torvalds 		mpol_free(new2->policy);
16051da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
16061da177e4SLinus Torvalds 	}
16071da177e4SLinus Torvalds 	return 0;
16081da177e4SLinus Torvalds }
16091da177e4SLinus Torvalds 
16107339ff83SRobin Holt void mpol_shared_policy_init(struct shared_policy *info, int policy,
16117339ff83SRobin Holt 				nodemask_t *policy_nodes)
16127339ff83SRobin Holt {
16137339ff83SRobin Holt 	info->root = RB_ROOT;
16147339ff83SRobin Holt 	spin_lock_init(&info->lock);
16157339ff83SRobin Holt 
16167339ff83SRobin Holt 	if (policy != MPOL_DEFAULT) {
16177339ff83SRobin Holt 		struct mempolicy *newpol;
16187339ff83SRobin Holt 
16197339ff83SRobin Holt 		/* Falls back to MPOL_DEFAULT on any error */
16207339ff83SRobin Holt 		newpol = mpol_new(policy, policy_nodes);
16217339ff83SRobin Holt 		if (!IS_ERR(newpol)) {
16227339ff83SRobin Holt 			/* Create pseudo-vma that contains just the policy */
16237339ff83SRobin Holt 			struct vm_area_struct pvma;
16247339ff83SRobin Holt 
16257339ff83SRobin Holt 			memset(&pvma, 0, sizeof(struct vm_area_struct));
16267339ff83SRobin Holt 			/* Policy covers entire file */
16277339ff83SRobin Holt 			pvma.vm_end = TASK_SIZE;
16287339ff83SRobin Holt 			mpol_set_shared_policy(info, &pvma, newpol);
16297339ff83SRobin Holt 			mpol_free(newpol);
16307339ff83SRobin Holt 		}
16317339ff83SRobin Holt 	}
16327339ff83SRobin Holt }
16337339ff83SRobin Holt 
16341da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
16351da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
16361da177e4SLinus Torvalds {
16371da177e4SLinus Torvalds 	int err;
16381da177e4SLinus Torvalds 	struct sp_node *new = NULL;
16391da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
16401da177e4SLinus Torvalds 
1641140d5a49SPaul Mundt 	pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
16421da177e4SLinus Torvalds 		 vma->vm_pgoff,
16431da177e4SLinus Torvalds 		 sz, npol? npol->policy : -1,
1644dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
16451da177e4SLinus Torvalds 
16461da177e4SLinus Torvalds 	if (npol) {
16471da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
16481da177e4SLinus Torvalds 		if (!new)
16491da177e4SLinus Torvalds 			return -ENOMEM;
16501da177e4SLinus Torvalds 	}
16511da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
16521da177e4SLinus Torvalds 	if (err && new)
16531da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
16541da177e4SLinus Torvalds 	return err;
16551da177e4SLinus Torvalds }
16561da177e4SLinus Torvalds 
16571da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
16581da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
16591da177e4SLinus Torvalds {
16601da177e4SLinus Torvalds 	struct sp_node *n;
16611da177e4SLinus Torvalds 	struct rb_node *next;
16621da177e4SLinus Torvalds 
16631da177e4SLinus Torvalds 	if (!p->root.rb_node)
16641da177e4SLinus Torvalds 		return;
16651da177e4SLinus Torvalds 	spin_lock(&p->lock);
16661da177e4SLinus Torvalds 	next = rb_first(&p->root);
16671da177e4SLinus Torvalds 	while (next) {
16681da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
16691da177e4SLinus Torvalds 		next = rb_next(&n->nd);
167090c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
16711da177e4SLinus Torvalds 		mpol_free(n->policy);
16721da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
16731da177e4SLinus Torvalds 	}
16741da177e4SLinus Torvalds 	spin_unlock(&p->lock);
16751da177e4SLinus Torvalds }
16761da177e4SLinus Torvalds 
16771da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
16781da177e4SLinus Torvalds void __init numa_policy_init(void)
16791da177e4SLinus Torvalds {
1680b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
1681b71636e2SPaul Mundt 	unsigned long largest = 0;
1682b71636e2SPaul Mundt 	int nid, prefer = 0;
1683b71636e2SPaul Mundt 
16841da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
16851da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
168620c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
16871da177e4SLinus Torvalds 
16881da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
16891da177e4SLinus Torvalds 				     sizeof(struct sp_node),
169020c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
16911da177e4SLinus Torvalds 
1692b71636e2SPaul Mundt 	/*
1693b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
1694b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
1695b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
1696b71636e2SPaul Mundt 	 */
1697b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
169856bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
1699b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
17001da177e4SLinus Torvalds 
1701b71636e2SPaul Mundt 		/* Preserve the largest node */
1702b71636e2SPaul Mundt 		if (largest < total_pages) {
1703b71636e2SPaul Mundt 			largest = total_pages;
1704b71636e2SPaul Mundt 			prefer = nid;
1705b71636e2SPaul Mundt 		}
1706b71636e2SPaul Mundt 
1707b71636e2SPaul Mundt 		/* Interleave this node? */
1708b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1709b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
1710b71636e2SPaul Mundt 	}
1711b71636e2SPaul Mundt 
1712b71636e2SPaul Mundt 	/* All too small, use the largest */
1713b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
1714b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
1715b71636e2SPaul Mundt 
1716b71636e2SPaul Mundt 	if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
17171da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
17181da177e4SLinus Torvalds }
17191da177e4SLinus Torvalds 
17208bccd85fSChristoph Lameter /* Reset policy of current process to default */
17211da177e4SLinus Torvalds void numa_default_policy(void)
17221da177e4SLinus Torvalds {
17238bccd85fSChristoph Lameter 	do_set_mempolicy(MPOL_DEFAULT, NULL);
17241da177e4SLinus Torvalds }
172568860ec1SPaul Jackson 
172668860ec1SPaul Jackson /* Migrate a policy to a different set of nodes */
1727dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
1728dbcb0f19SAdrian Bunk 			       const nodemask_t *newmask)
172968860ec1SPaul Jackson {
173074cb2155SPaul Jackson 	nodemask_t *mpolmask;
173168860ec1SPaul Jackson 	nodemask_t tmp;
173268860ec1SPaul Jackson 
173368860ec1SPaul Jackson 	if (!pol)
173468860ec1SPaul Jackson 		return;
173574cb2155SPaul Jackson 	mpolmask = &pol->cpuset_mems_allowed;
173674cb2155SPaul Jackson 	if (nodes_equal(*mpolmask, *newmask))
173774cb2155SPaul Jackson 		return;
173868860ec1SPaul Jackson 
173968860ec1SPaul Jackson 	switch (pol->policy) {
174068860ec1SPaul Jackson 	case MPOL_DEFAULT:
174168860ec1SPaul Jackson 		break;
174268860ec1SPaul Jackson 	case MPOL_INTERLEAVE:
174374cb2155SPaul Jackson 		nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
174468860ec1SPaul Jackson 		pol->v.nodes = tmp;
174574cb2155SPaul Jackson 		*mpolmask = *newmask;
174674cb2155SPaul Jackson 		current->il_next = node_remap(current->il_next,
174774cb2155SPaul Jackson 						*mpolmask, *newmask);
174868860ec1SPaul Jackson 		break;
174968860ec1SPaul Jackson 	case MPOL_PREFERRED:
175068860ec1SPaul Jackson 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
175174cb2155SPaul Jackson 						*mpolmask, *newmask);
175274cb2155SPaul Jackson 		*mpolmask = *newmask;
175368860ec1SPaul Jackson 		break;
175468860ec1SPaul Jackson 	case MPOL_BIND: {
175568860ec1SPaul Jackson 		nodemask_t nodes;
175668860ec1SPaul Jackson 		struct zone **z;
175768860ec1SPaul Jackson 		struct zonelist *zonelist;
175868860ec1SPaul Jackson 
175968860ec1SPaul Jackson 		nodes_clear(nodes);
176068860ec1SPaul Jackson 		for (z = pol->v.zonelist->zones; *z; z++)
176189fa3024SChristoph Lameter 			node_set(zone_to_nid(*z), nodes);
176274cb2155SPaul Jackson 		nodes_remap(tmp, nodes, *mpolmask, *newmask);
176368860ec1SPaul Jackson 		nodes = tmp;
176468860ec1SPaul Jackson 
176568860ec1SPaul Jackson 		zonelist = bind_zonelist(&nodes);
176668860ec1SPaul Jackson 
176768860ec1SPaul Jackson 		/* If no mem, then zonelist is NULL and we keep old zonelist.
176868860ec1SPaul Jackson 		 * If that old zonelist has no remaining mems_allowed nodes,
176968860ec1SPaul Jackson 		 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
177068860ec1SPaul Jackson 		 */
177168860ec1SPaul Jackson 
17728af5e2ebSKAMEZAWA Hiroyuki 		if (!IS_ERR(zonelist)) {
177368860ec1SPaul Jackson 			/* Good - got mem - substitute new zonelist */
177468860ec1SPaul Jackson 			kfree(pol->v.zonelist);
177568860ec1SPaul Jackson 			pol->v.zonelist = zonelist;
177668860ec1SPaul Jackson 		}
177774cb2155SPaul Jackson 		*mpolmask = *newmask;
177868860ec1SPaul Jackson 		break;
177968860ec1SPaul Jackson 	}
178068860ec1SPaul Jackson 	default:
178168860ec1SPaul Jackson 		BUG();
178268860ec1SPaul Jackson 		break;
178368860ec1SPaul Jackson 	}
178468860ec1SPaul Jackson }
178568860ec1SPaul Jackson 
178668860ec1SPaul Jackson /*
178774cb2155SPaul Jackson  * Wrapper for mpol_rebind_policy() that just requires task
178874cb2155SPaul Jackson  * pointer, and updates task mempolicy.
178968860ec1SPaul Jackson  */
179074cb2155SPaul Jackson 
179174cb2155SPaul Jackson void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
179268860ec1SPaul Jackson {
179374cb2155SPaul Jackson 	mpol_rebind_policy(tsk->mempolicy, new);
179468860ec1SPaul Jackson }
17951a75a6c8SChristoph Lameter 
17961a75a6c8SChristoph Lameter /*
17974225399aSPaul Jackson  * Rebind each vma in mm to new nodemask.
17984225399aSPaul Jackson  *
17994225399aSPaul Jackson  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
18004225399aSPaul Jackson  */
18014225399aSPaul Jackson 
18024225399aSPaul Jackson void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
18034225399aSPaul Jackson {
18044225399aSPaul Jackson 	struct vm_area_struct *vma;
18054225399aSPaul Jackson 
18064225399aSPaul Jackson 	down_write(&mm->mmap_sem);
18074225399aSPaul Jackson 	for (vma = mm->mmap; vma; vma = vma->vm_next)
18084225399aSPaul Jackson 		mpol_rebind_policy(vma->vm_policy, new);
18094225399aSPaul Jackson 	up_write(&mm->mmap_sem);
18104225399aSPaul Jackson }
18114225399aSPaul Jackson 
18124225399aSPaul Jackson /*
18131a75a6c8SChristoph Lameter  * Display pages allocated per node and memory policy via /proc.
18141a75a6c8SChristoph Lameter  */
18151a75a6c8SChristoph Lameter 
181615ad7cdcSHelge Deller static const char * const policy_types[] =
181715ad7cdcSHelge Deller 	{ "default", "prefer", "bind", "interleave" };
18181a75a6c8SChristoph Lameter 
18191a75a6c8SChristoph Lameter /*
18201a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
18211a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
18221a75a6c8SChristoph Lameter  * or an error (negative)
18231a75a6c8SChristoph Lameter  */
18241a75a6c8SChristoph Lameter static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
18251a75a6c8SChristoph Lameter {
18261a75a6c8SChristoph Lameter 	char *p = buffer;
18271a75a6c8SChristoph Lameter 	int l;
18281a75a6c8SChristoph Lameter 	nodemask_t nodes;
18291a75a6c8SChristoph Lameter 	int mode = pol ? pol->policy : MPOL_DEFAULT;
18301a75a6c8SChristoph Lameter 
18311a75a6c8SChristoph Lameter 	switch (mode) {
18321a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
18331a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18341a75a6c8SChristoph Lameter 		break;
18351a75a6c8SChristoph Lameter 
18361a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
18371a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18381a75a6c8SChristoph Lameter 		node_set(pol->v.preferred_node, nodes);
18391a75a6c8SChristoph Lameter 		break;
18401a75a6c8SChristoph Lameter 
18411a75a6c8SChristoph Lameter 	case MPOL_BIND:
18421a75a6c8SChristoph Lameter 		get_zonemask(pol, &nodes);
18431a75a6c8SChristoph Lameter 		break;
18441a75a6c8SChristoph Lameter 
18451a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
18461a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
18471a75a6c8SChristoph Lameter 		break;
18481a75a6c8SChristoph Lameter 
18491a75a6c8SChristoph Lameter 	default:
18501a75a6c8SChristoph Lameter 		BUG();
18511a75a6c8SChristoph Lameter 		return -EFAULT;
18521a75a6c8SChristoph Lameter 	}
18531a75a6c8SChristoph Lameter 
18541a75a6c8SChristoph Lameter 	l = strlen(policy_types[mode]);
18551a75a6c8SChristoph Lameter  	if (buffer + maxlen < p + l + 1)
18561a75a6c8SChristoph Lameter  		return -ENOSPC;
18571a75a6c8SChristoph Lameter 
18581a75a6c8SChristoph Lameter 	strcpy(p, policy_types[mode]);
18591a75a6c8SChristoph Lameter 	p += l;
18601a75a6c8SChristoph Lameter 
18611a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
18621a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
18631a75a6c8SChristoph Lameter 			return -ENOSPC;
18641a75a6c8SChristoph Lameter 		*p++ = '=';
18651a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
18661a75a6c8SChristoph Lameter 	}
18671a75a6c8SChristoph Lameter 	return p - buffer;
18681a75a6c8SChristoph Lameter }
18691a75a6c8SChristoph Lameter 
18701a75a6c8SChristoph Lameter struct numa_maps {
18711a75a6c8SChristoph Lameter 	unsigned long pages;
18721a75a6c8SChristoph Lameter 	unsigned long anon;
1873397874dfSChristoph Lameter 	unsigned long active;
1874397874dfSChristoph Lameter 	unsigned long writeback;
18751a75a6c8SChristoph Lameter 	unsigned long mapcount_max;
1876397874dfSChristoph Lameter 	unsigned long dirty;
1877397874dfSChristoph Lameter 	unsigned long swapcache;
18781a75a6c8SChristoph Lameter 	unsigned long node[MAX_NUMNODES];
18791a75a6c8SChristoph Lameter };
18801a75a6c8SChristoph Lameter 
1881397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty)
18821a75a6c8SChristoph Lameter {
18831a75a6c8SChristoph Lameter 	struct numa_maps *md = private;
18841a75a6c8SChristoph Lameter 	int count = page_mapcount(page);
18851a75a6c8SChristoph Lameter 
18861a75a6c8SChristoph Lameter 	md->pages++;
1887397874dfSChristoph Lameter 	if (pte_dirty || PageDirty(page))
1888397874dfSChristoph Lameter 		md->dirty++;
1889397874dfSChristoph Lameter 
1890397874dfSChristoph Lameter 	if (PageSwapCache(page))
1891397874dfSChristoph Lameter 		md->swapcache++;
1892397874dfSChristoph Lameter 
1893397874dfSChristoph Lameter 	if (PageActive(page))
1894397874dfSChristoph Lameter 		md->active++;
1895397874dfSChristoph Lameter 
1896397874dfSChristoph Lameter 	if (PageWriteback(page))
1897397874dfSChristoph Lameter 		md->writeback++;
18981a75a6c8SChristoph Lameter 
18991a75a6c8SChristoph Lameter 	if (PageAnon(page))
19001a75a6c8SChristoph Lameter 		md->anon++;
19011a75a6c8SChristoph Lameter 
1902397874dfSChristoph Lameter 	if (count > md->mapcount_max)
1903397874dfSChristoph Lameter 		md->mapcount_max = count;
1904397874dfSChristoph Lameter 
19051a75a6c8SChristoph Lameter 	md->node[page_to_nid(page)]++;
19061a75a6c8SChristoph Lameter }
19071a75a6c8SChristoph Lameter 
19087f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE
1909397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma,
1910397874dfSChristoph Lameter 		unsigned long start, unsigned long end,
1911397874dfSChristoph Lameter 		struct numa_maps *md)
1912397874dfSChristoph Lameter {
1913397874dfSChristoph Lameter 	unsigned long addr;
1914397874dfSChristoph Lameter 	struct page *page;
1915397874dfSChristoph Lameter 
1916397874dfSChristoph Lameter 	for (addr = start; addr < end; addr += HPAGE_SIZE) {
1917397874dfSChristoph Lameter 		pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1918397874dfSChristoph Lameter 		pte_t pte;
1919397874dfSChristoph Lameter 
1920397874dfSChristoph Lameter 		if (!ptep)
1921397874dfSChristoph Lameter 			continue;
1922397874dfSChristoph Lameter 
1923397874dfSChristoph Lameter 		pte = *ptep;
1924397874dfSChristoph Lameter 		if (pte_none(pte))
1925397874dfSChristoph Lameter 			continue;
1926397874dfSChristoph Lameter 
1927397874dfSChristoph Lameter 		page = pte_page(pte);
1928397874dfSChristoph Lameter 		if (!page)
1929397874dfSChristoph Lameter 			continue;
1930397874dfSChristoph Lameter 
1931397874dfSChristoph Lameter 		gather_stats(page, md, pte_dirty(*ptep));
1932397874dfSChristoph Lameter 	}
1933397874dfSChristoph Lameter }
19347f709ed0SAndrew Morton #else
19357f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma,
19367f709ed0SAndrew Morton 		unsigned long start, unsigned long end,
19377f709ed0SAndrew Morton 		struct numa_maps *md)
19387f709ed0SAndrew Morton {
19397f709ed0SAndrew Morton }
19407f709ed0SAndrew Morton #endif
1941397874dfSChristoph Lameter 
19421a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v)
19431a75a6c8SChristoph Lameter {
194499f89551SEric W. Biederman 	struct proc_maps_private *priv = m->private;
19451a75a6c8SChristoph Lameter 	struct vm_area_struct *vma = v;
19461a75a6c8SChristoph Lameter 	struct numa_maps *md;
1947397874dfSChristoph Lameter 	struct file *file = vma->vm_file;
1948397874dfSChristoph Lameter 	struct mm_struct *mm = vma->vm_mm;
1949480eccf9SLee Schermerhorn 	struct mempolicy *pol;
19501a75a6c8SChristoph Lameter 	int n;
19511a75a6c8SChristoph Lameter 	char buffer[50];
19521a75a6c8SChristoph Lameter 
1953397874dfSChristoph Lameter 	if (!mm)
19541a75a6c8SChristoph Lameter 		return 0;
19551a75a6c8SChristoph Lameter 
19561a75a6c8SChristoph Lameter 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
19571a75a6c8SChristoph Lameter 	if (!md)
19581a75a6c8SChristoph Lameter 		return 0;
19591a75a6c8SChristoph Lameter 
1960480eccf9SLee Schermerhorn 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
1961480eccf9SLee Schermerhorn 	mpol_to_str(buffer, sizeof(buffer), pol);
1962480eccf9SLee Schermerhorn 	/*
1963480eccf9SLee Schermerhorn 	 * unref shared or other task's mempolicy
1964480eccf9SLee Schermerhorn 	 */
1965480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy)
1966480eccf9SLee Schermerhorn 		__mpol_free(pol);
19671a75a6c8SChristoph Lameter 
1968397874dfSChristoph Lameter 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1969397874dfSChristoph Lameter 
1970397874dfSChristoph Lameter 	if (file) {
1971397874dfSChristoph Lameter 		seq_printf(m, " file=");
1972e9536ae7SJosef Sipek 		seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= ");
1973397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1974397874dfSChristoph Lameter 		seq_printf(m, " heap");
1975397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->start_stack &&
1976397874dfSChristoph Lameter 			vma->vm_end >= mm->start_stack) {
1977397874dfSChristoph Lameter 		seq_printf(m, " stack");
1978397874dfSChristoph Lameter 	}
1979397874dfSChristoph Lameter 
1980397874dfSChristoph Lameter 	if (is_vm_hugetlb_page(vma)) {
1981397874dfSChristoph Lameter 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1982397874dfSChristoph Lameter 		seq_printf(m, " huge");
1983397874dfSChristoph Lameter 	} else {
1984397874dfSChristoph Lameter 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
198556bbd65dSChristoph Lameter 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
1986397874dfSChristoph Lameter 	}
1987397874dfSChristoph Lameter 
1988397874dfSChristoph Lameter 	if (!md->pages)
1989397874dfSChristoph Lameter 		goto out;
19901a75a6c8SChristoph Lameter 
19911a75a6c8SChristoph Lameter 	if (md->anon)
19921a75a6c8SChristoph Lameter 		seq_printf(m," anon=%lu",md->anon);
19931a75a6c8SChristoph Lameter 
1994397874dfSChristoph Lameter 	if (md->dirty)
1995397874dfSChristoph Lameter 		seq_printf(m," dirty=%lu",md->dirty);
1996397874dfSChristoph Lameter 
1997397874dfSChristoph Lameter 	if (md->pages != md->anon && md->pages != md->dirty)
1998397874dfSChristoph Lameter 		seq_printf(m, " mapped=%lu", md->pages);
1999397874dfSChristoph Lameter 
2000397874dfSChristoph Lameter 	if (md->mapcount_max > 1)
2001397874dfSChristoph Lameter 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2002397874dfSChristoph Lameter 
2003397874dfSChristoph Lameter 	if (md->swapcache)
2004397874dfSChristoph Lameter 		seq_printf(m," swapcache=%lu", md->swapcache);
2005397874dfSChristoph Lameter 
2006397874dfSChristoph Lameter 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2007397874dfSChristoph Lameter 		seq_printf(m," active=%lu", md->active);
2008397874dfSChristoph Lameter 
2009397874dfSChristoph Lameter 	if (md->writeback)
2010397874dfSChristoph Lameter 		seq_printf(m," writeback=%lu", md->writeback);
2011397874dfSChristoph Lameter 
201256bbd65dSChristoph Lameter 	for_each_node_state(n, N_HIGH_MEMORY)
20131a75a6c8SChristoph Lameter 		if (md->node[n])
20141a75a6c8SChristoph Lameter 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2015397874dfSChristoph Lameter out:
20161a75a6c8SChristoph Lameter 	seq_putc(m, '\n');
20171a75a6c8SChristoph Lameter 	kfree(md);
20181a75a6c8SChristoph Lameter 
20191a75a6c8SChristoph Lameter 	if (m->count < m->size)
202099f89551SEric W. Biederman 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
20211a75a6c8SChristoph Lameter 	return 0;
20221a75a6c8SChristoph Lameter }
2023