xref: /openbmc/linux/mm/mempolicy.c (revision 3ad33b2436b545cbe8b28e53f3710432cad457ab)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds    could replace all the switch()es with a mempolicy_ops structure.
671da177e4SLinus Torvalds */
681da177e4SLinus Torvalds 
691da177e4SLinus Torvalds #include <linux/mempolicy.h>
701da177e4SLinus Torvalds #include <linux/mm.h>
711da177e4SLinus Torvalds #include <linux/highmem.h>
721da177e4SLinus Torvalds #include <linux/hugetlb.h>
731da177e4SLinus Torvalds #include <linux/kernel.h>
741da177e4SLinus Torvalds #include <linux/sched.h>
751da177e4SLinus Torvalds #include <linux/nodemask.h>
761da177e4SLinus Torvalds #include <linux/cpuset.h>
771da177e4SLinus Torvalds #include <linux/gfp.h>
781da177e4SLinus Torvalds #include <linux/slab.h>
791da177e4SLinus Torvalds #include <linux/string.h>
801da177e4SLinus Torvalds #include <linux/module.h>
81b488893aSPavel Emelyanov #include <linux/nsproxy.h>
821da177e4SLinus Torvalds #include <linux/interrupt.h>
831da177e4SLinus Torvalds #include <linux/init.h>
841da177e4SLinus Torvalds #include <linux/compat.h>
85dc9aa5b9SChristoph Lameter #include <linux/swap.h>
861a75a6c8SChristoph Lameter #include <linux/seq_file.h>
871a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
88b20a3503SChristoph Lameter #include <linux/migrate.h>
8995a402c3SChristoph Lameter #include <linux/rmap.h>
9086c3a764SDavid Quigley #include <linux/security.h>
91dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
92dc9aa5b9SChristoph Lameter 
931da177e4SLinus Torvalds #include <asm/tlbflush.h>
941da177e4SLinus Torvalds #include <asm/uaccess.h>
951da177e4SLinus Torvalds 
9638e35860SChristoph Lameter /* Internal flags */
97dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
9838e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
991a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
100dc9aa5b9SChristoph Lameter 
101fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
102fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1051da177e4SLinus Torvalds    policied. */
1066267276fSChristoph Lameter enum zone_type policy_zone = 0;
1071da177e4SLinus Torvalds 
108d42c6997SAndi Kleen struct mempolicy default_policy = {
1091da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1101da177e4SLinus Torvalds 	.policy = MPOL_DEFAULT,
1111da177e4SLinus Torvalds };
1121da177e4SLinus Torvalds 
113dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
114dbcb0f19SAdrian Bunk                                const nodemask_t *newmask);
115dbcb0f19SAdrian Bunk 
1161da177e4SLinus Torvalds /* Do sanity checking on a policy */
117dfcd3c0dSAndi Kleen static int mpol_check_policy(int mode, nodemask_t *nodes)
1181da177e4SLinus Torvalds {
119dfcd3c0dSAndi Kleen 	int empty = nodes_empty(*nodes);
1201da177e4SLinus Torvalds 
1211da177e4SLinus Torvalds 	switch (mode) {
1221da177e4SLinus Torvalds 	case MPOL_DEFAULT:
1231da177e4SLinus Torvalds 		if (!empty)
1241da177e4SLinus Torvalds 			return -EINVAL;
1251da177e4SLinus Torvalds 		break;
1261da177e4SLinus Torvalds 	case MPOL_BIND:
1271da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1281da177e4SLinus Torvalds 		/* Preferred will only use the first bit, but allow
1291da177e4SLinus Torvalds 		   more for now. */
1301da177e4SLinus Torvalds 		if (empty)
1311da177e4SLinus Torvalds 			return -EINVAL;
1321da177e4SLinus Torvalds 		break;
1331da177e4SLinus Torvalds 	}
13437b07e41SLee Schermerhorn  	return nodes_subset(*nodes, node_states[N_HIGH_MEMORY]) ? 0 : -EINVAL;
1351da177e4SLinus Torvalds }
136dd942ae3SAndi Kleen 
1371da177e4SLinus Torvalds /* Generate a custom zonelist for the BIND policy. */
138dfcd3c0dSAndi Kleen static struct zonelist *bind_zonelist(nodemask_t *nodes)
1391da177e4SLinus Torvalds {
1401da177e4SLinus Torvalds 	struct zonelist *zl;
1412f6726e5SChristoph Lameter 	int num, max, nd;
1422f6726e5SChristoph Lameter 	enum zone_type k;
1431da177e4SLinus Torvalds 
144dfcd3c0dSAndi Kleen 	max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
1459276b1bcSPaul Jackson 	max++;			/* space for zlcache_ptr (see mmzone.h) */
146dd942ae3SAndi Kleen 	zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
1471da177e4SLinus Torvalds 	if (!zl)
1488af5e2ebSKAMEZAWA Hiroyuki 		return ERR_PTR(-ENOMEM);
1499276b1bcSPaul Jackson 	zl->zlcache_ptr = NULL;
1501da177e4SLinus Torvalds 	num = 0;
151dd942ae3SAndi Kleen 	/* First put in the highest zones from all nodes, then all the next
152dd942ae3SAndi Kleen 	   lower zones etc. Avoid empty zones because the memory allocator
153dd942ae3SAndi Kleen 	   doesn't like them. If you implement node hot removal you
154dd942ae3SAndi Kleen 	   have to fix that. */
155b377fd39SMel Gorman 	k = MAX_NR_ZONES - 1;
1562f6726e5SChristoph Lameter 	while (1) {
157dd942ae3SAndi Kleen 		for_each_node_mask(nd, *nodes) {
158dd942ae3SAndi Kleen 			struct zone *z = &NODE_DATA(nd)->node_zones[k];
159dd942ae3SAndi Kleen 			if (z->present_pages > 0)
160dd942ae3SAndi Kleen 				zl->zones[num++] = z;
161dd942ae3SAndi Kleen 		}
1622f6726e5SChristoph Lameter 		if (k == 0)
1632f6726e5SChristoph Lameter 			break;
1642f6726e5SChristoph Lameter 		k--;
165dd942ae3SAndi Kleen 	}
1668af5e2ebSKAMEZAWA Hiroyuki 	if (num == 0) {
1678af5e2ebSKAMEZAWA Hiroyuki 		kfree(zl);
1688af5e2ebSKAMEZAWA Hiroyuki 		return ERR_PTR(-EINVAL);
1698af5e2ebSKAMEZAWA Hiroyuki 	}
1701da177e4SLinus Torvalds 	zl->zones[num] = NULL;
1711da177e4SLinus Torvalds 	return zl;
1721da177e4SLinus Torvalds }
1731da177e4SLinus Torvalds 
1741da177e4SLinus Torvalds /* Create a new policy */
175dfcd3c0dSAndi Kleen static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
1761da177e4SLinus Torvalds {
1771da177e4SLinus Torvalds 	struct mempolicy *policy;
1781da177e4SLinus Torvalds 
179140d5a49SPaul Mundt 	pr_debug("setting mode %d nodes[0] %lx\n",
180140d5a49SPaul Mundt 		 mode, nodes ? nodes_addr(*nodes)[0] : -1);
181140d5a49SPaul Mundt 
1821da177e4SLinus Torvalds 	if (mode == MPOL_DEFAULT)
1831da177e4SLinus Torvalds 		return NULL;
1841da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1851da177e4SLinus Torvalds 	if (!policy)
1861da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
1871da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
1881da177e4SLinus Torvalds 	switch (mode) {
1891da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
190dfcd3c0dSAndi Kleen 		policy->v.nodes = *nodes;
1916eaf806aSChristoph Lameter 		nodes_and(policy->v.nodes, policy->v.nodes,
1926eaf806aSChristoph Lameter 					node_states[N_HIGH_MEMORY]);
1936eaf806aSChristoph Lameter 		if (nodes_weight(policy->v.nodes) == 0) {
1948f493d79SAndi Kleen 			kmem_cache_free(policy_cache, policy);
1958f493d79SAndi Kleen 			return ERR_PTR(-EINVAL);
1968f493d79SAndi Kleen 		}
1971da177e4SLinus Torvalds 		break;
1981da177e4SLinus Torvalds 	case MPOL_PREFERRED:
199dfcd3c0dSAndi Kleen 		policy->v.preferred_node = first_node(*nodes);
2001da177e4SLinus Torvalds 		if (policy->v.preferred_node >= MAX_NUMNODES)
2011da177e4SLinus Torvalds 			policy->v.preferred_node = -1;
2021da177e4SLinus Torvalds 		break;
2031da177e4SLinus Torvalds 	case MPOL_BIND:
2041da177e4SLinus Torvalds 		policy->v.zonelist = bind_zonelist(nodes);
2058af5e2ebSKAMEZAWA Hiroyuki 		if (IS_ERR(policy->v.zonelist)) {
2068af5e2ebSKAMEZAWA Hiroyuki 			void *error_code = policy->v.zonelist;
2071da177e4SLinus Torvalds 			kmem_cache_free(policy_cache, policy);
2088af5e2ebSKAMEZAWA Hiroyuki 			return error_code;
2091da177e4SLinus Torvalds 		}
2101da177e4SLinus Torvalds 		break;
2111da177e4SLinus Torvalds 	}
2121da177e4SLinus Torvalds 	policy->policy = mode;
21374cb2155SPaul Jackson 	policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
2141da177e4SLinus Torvalds 	return policy;
2151da177e4SLinus Torvalds }
2161da177e4SLinus Torvalds 
217397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty);
218fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
219fc301289SChristoph Lameter 				unsigned long flags);
2201a75a6c8SChristoph Lameter 
22138e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
222b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
223dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
224dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
22538e35860SChristoph Lameter 		void *private)
2261da177e4SLinus Torvalds {
22791612e0dSHugh Dickins 	pte_t *orig_pte;
22891612e0dSHugh Dickins 	pte_t *pte;
229705e87c0SHugh Dickins 	spinlock_t *ptl;
230941150a3SHugh Dickins 
231705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
23291612e0dSHugh Dickins 	do {
2336aab341eSLinus Torvalds 		struct page *page;
23425ba77c1SAndy Whitcroft 		int nid;
23591612e0dSHugh Dickins 
23691612e0dSHugh Dickins 		if (!pte_present(*pte))
23791612e0dSHugh Dickins 			continue;
2386aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
2396aab341eSLinus Torvalds 		if (!page)
24091612e0dSHugh Dickins 			continue;
241053837fcSNick Piggin 		/*
242053837fcSNick Piggin 		 * The check for PageReserved here is important to avoid
243053837fcSNick Piggin 		 * handling zero pages and other pages that may have been
244053837fcSNick Piggin 		 * marked special by the system.
245053837fcSNick Piggin 		 *
246053837fcSNick Piggin 		 * If the PageReserved would not be checked here then f.e.
247053837fcSNick Piggin 		 * the location of the zero page could have an influence
248053837fcSNick Piggin 		 * on MPOL_MF_STRICT, zero pages would be counted for
249053837fcSNick Piggin 		 * the per node stats, and there would be useless attempts
250053837fcSNick Piggin 		 * to put zero pages on the migration list.
251053837fcSNick Piggin 		 */
252f4598c8bSChristoph Lameter 		if (PageReserved(page))
253f4598c8bSChristoph Lameter 			continue;
2546aab341eSLinus Torvalds 		nid = page_to_nid(page);
25538e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
25638e35860SChristoph Lameter 			continue;
25738e35860SChristoph Lameter 
2581a75a6c8SChristoph Lameter 		if (flags & MPOL_MF_STATS)
259397874dfSChristoph Lameter 			gather_stats(page, private, pte_dirty(*pte));
260053837fcSNick Piggin 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
261fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
262dc9aa5b9SChristoph Lameter 		else
2631da177e4SLinus Torvalds 			break;
26491612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
265705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
26691612e0dSHugh Dickins 	return addr != end;
26791612e0dSHugh Dickins }
26891612e0dSHugh Dickins 
269b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
270dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
271dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
27238e35860SChristoph Lameter 		void *private)
27391612e0dSHugh Dickins {
27491612e0dSHugh Dickins 	pmd_t *pmd;
27591612e0dSHugh Dickins 	unsigned long next;
27691612e0dSHugh Dickins 
27791612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
27891612e0dSHugh Dickins 	do {
27991612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
28091612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
28191612e0dSHugh Dickins 			continue;
282dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
28338e35860SChristoph Lameter 				    flags, private))
28491612e0dSHugh Dickins 			return -EIO;
28591612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
28691612e0dSHugh Dickins 	return 0;
28791612e0dSHugh Dickins }
28891612e0dSHugh Dickins 
289b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
290dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
291dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
29238e35860SChristoph Lameter 		void *private)
29391612e0dSHugh Dickins {
29491612e0dSHugh Dickins 	pud_t *pud;
29591612e0dSHugh Dickins 	unsigned long next;
29691612e0dSHugh Dickins 
29791612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
29891612e0dSHugh Dickins 	do {
29991612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
30091612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
30191612e0dSHugh Dickins 			continue;
302dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
30338e35860SChristoph Lameter 				    flags, private))
30491612e0dSHugh Dickins 			return -EIO;
30591612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
30691612e0dSHugh Dickins 	return 0;
30791612e0dSHugh Dickins }
30891612e0dSHugh Dickins 
309b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
310dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
311dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
31238e35860SChristoph Lameter 		void *private)
31391612e0dSHugh Dickins {
31491612e0dSHugh Dickins 	pgd_t *pgd;
31591612e0dSHugh Dickins 	unsigned long next;
31691612e0dSHugh Dickins 
317b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
31891612e0dSHugh Dickins 	do {
31991612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
32091612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
32191612e0dSHugh Dickins 			continue;
322dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
32338e35860SChristoph Lameter 				    flags, private))
32491612e0dSHugh Dickins 			return -EIO;
32591612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
32691612e0dSHugh Dickins 	return 0;
3271da177e4SLinus Torvalds }
3281da177e4SLinus Torvalds 
329dc9aa5b9SChristoph Lameter /*
330dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
331dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
332dc9aa5b9SChristoph Lameter  * put them on the pagelist.
333dc9aa5b9SChristoph Lameter  */
3341da177e4SLinus Torvalds static struct vm_area_struct *
3351da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
33638e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
3371da177e4SLinus Torvalds {
3381da177e4SLinus Torvalds 	int err;
3391da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
3401da177e4SLinus Torvalds 
34190036ee5SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
34290036ee5SChristoph Lameter 
343b20a3503SChristoph Lameter 		err = migrate_prep();
344b20a3503SChristoph Lameter 		if (err)
345b20a3503SChristoph Lameter 			return ERR_PTR(err);
34690036ee5SChristoph Lameter 	}
347053837fcSNick Piggin 
3481da177e4SLinus Torvalds 	first = find_vma(mm, start);
3491da177e4SLinus Torvalds 	if (!first)
3501da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
3511da177e4SLinus Torvalds 	prev = NULL;
3521da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
353dc9aa5b9SChristoph Lameter 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
3541da177e4SLinus Torvalds 			if (!vma->vm_next && vma->vm_end < end)
3551da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
3561da177e4SLinus Torvalds 			if (prev && prev->vm_end < vma->vm_start)
3571da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
358dc9aa5b9SChristoph Lameter 		}
359dc9aa5b9SChristoph Lameter 		if (!is_vm_hugetlb_page(vma) &&
360dc9aa5b9SChristoph Lameter 		    ((flags & MPOL_MF_STRICT) ||
361dc9aa5b9SChristoph Lameter 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
362dc9aa5b9SChristoph Lameter 				vma_migratable(vma)))) {
3635b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
364dc9aa5b9SChristoph Lameter 
3655b952b3cSAndi Kleen 			if (endvma > end)
3665b952b3cSAndi Kleen 				endvma = end;
3675b952b3cSAndi Kleen 			if (vma->vm_start > start)
3685b952b3cSAndi Kleen 				start = vma->vm_start;
369dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
37038e35860SChristoph Lameter 						flags, private);
3711da177e4SLinus Torvalds 			if (err) {
3721da177e4SLinus Torvalds 				first = ERR_PTR(err);
3731da177e4SLinus Torvalds 				break;
3741da177e4SLinus Torvalds 			}
3751da177e4SLinus Torvalds 		}
3761da177e4SLinus Torvalds 		prev = vma;
3771da177e4SLinus Torvalds 	}
3781da177e4SLinus Torvalds 	return first;
3791da177e4SLinus Torvalds }
3801da177e4SLinus Torvalds 
3811da177e4SLinus Torvalds /* Apply policy to a single VMA */
3821da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
3831da177e4SLinus Torvalds {
3841da177e4SLinus Torvalds 	int err = 0;
3851da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
3861da177e4SLinus Torvalds 
387140d5a49SPaul Mundt 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
3881da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
3891da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
3901da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
3911da177e4SLinus Torvalds 
3921da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
3931da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
3941da177e4SLinus Torvalds 	if (!err) {
3951da177e4SLinus Torvalds 		mpol_get(new);
3961da177e4SLinus Torvalds 		vma->vm_policy = new;
3971da177e4SLinus Torvalds 		mpol_free(old);
3981da177e4SLinus Torvalds 	}
3991da177e4SLinus Torvalds 	return err;
4001da177e4SLinus Torvalds }
4011da177e4SLinus Torvalds 
4021da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
4031da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start,
4041da177e4SLinus Torvalds 		       unsigned long end, struct mempolicy *new)
4051da177e4SLinus Torvalds {
4061da177e4SLinus Torvalds 	struct vm_area_struct *next;
4071da177e4SLinus Torvalds 	int err;
4081da177e4SLinus Torvalds 
4091da177e4SLinus Torvalds 	err = 0;
4101da177e4SLinus Torvalds 	for (; vma && vma->vm_start < end; vma = next) {
4111da177e4SLinus Torvalds 		next = vma->vm_next;
4121da177e4SLinus Torvalds 		if (vma->vm_start < start)
4131da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, start, 1);
4141da177e4SLinus Torvalds 		if (!err && vma->vm_end > end)
4151da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, end, 0);
4161da177e4SLinus Torvalds 		if (!err)
4171da177e4SLinus Torvalds 			err = policy_vma(vma, new);
4181da177e4SLinus Torvalds 		if (err)
4191da177e4SLinus Torvalds 			break;
4201da177e4SLinus Torvalds 	}
4211da177e4SLinus Torvalds 	return err;
4221da177e4SLinus Torvalds }
4231da177e4SLinus Torvalds 
4248bccd85fSChristoph Lameter static int contextualize_policy(int mode, nodemask_t *nodes)
4258bccd85fSChristoph Lameter {
4268bccd85fSChristoph Lameter 	if (!nodes)
4278bccd85fSChristoph Lameter 		return 0;
4288bccd85fSChristoph Lameter 
429cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
4305966514dSPaul Jackson 	if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
4315966514dSPaul Jackson 		return -EINVAL;
4328bccd85fSChristoph Lameter 	return mpol_check_policy(mode, nodes);
4338bccd85fSChristoph Lameter }
4348bccd85fSChristoph Lameter 
435c61afb18SPaul Jackson 
436c61afb18SPaul Jackson /*
437c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
438c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
439c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
440c61afb18SPaul Jackson  *
441c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
442c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
443c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
444c61afb18SPaul Jackson  *
445c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
446c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
447c61afb18SPaul Jackson  *
448c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
449c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
450c61afb18SPaul Jackson  * for use within this file.
451c61afb18SPaul Jackson  */
452c61afb18SPaul Jackson 
453c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
454c61afb18SPaul Jackson {
455c61afb18SPaul Jackson 	if (p->mempolicy)
456c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
457c61afb18SPaul Jackson 	else
458c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
459c61afb18SPaul Jackson }
460c61afb18SPaul Jackson 
461c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
462c61afb18SPaul Jackson {
463c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
464c61afb18SPaul Jackson }
465c61afb18SPaul Jackson 
4661da177e4SLinus Torvalds /* Set the process memory policy */
467dbcb0f19SAdrian Bunk static long do_set_mempolicy(int mode, nodemask_t *nodes)
4681da177e4SLinus Torvalds {
4691da177e4SLinus Torvalds 	struct mempolicy *new;
4701da177e4SLinus Torvalds 
4718bccd85fSChristoph Lameter 	if (contextualize_policy(mode, nodes))
4721da177e4SLinus Torvalds 		return -EINVAL;
4738bccd85fSChristoph Lameter 	new = mpol_new(mode, nodes);
4741da177e4SLinus Torvalds 	if (IS_ERR(new))
4751da177e4SLinus Torvalds 		return PTR_ERR(new);
4761da177e4SLinus Torvalds 	mpol_free(current->mempolicy);
4771da177e4SLinus Torvalds 	current->mempolicy = new;
478c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
4791da177e4SLinus Torvalds 	if (new && new->policy == MPOL_INTERLEAVE)
480dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
4811da177e4SLinus Torvalds 	return 0;
4821da177e4SLinus Torvalds }
4831da177e4SLinus Torvalds 
4841da177e4SLinus Torvalds /* Fill a zone bitmap for a policy */
485dfcd3c0dSAndi Kleen static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
4861da177e4SLinus Torvalds {
4871da177e4SLinus Torvalds 	int i;
4881da177e4SLinus Torvalds 
489dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
4901da177e4SLinus Torvalds 	switch (p->policy) {
4911da177e4SLinus Torvalds 	case MPOL_BIND:
4921da177e4SLinus Torvalds 		for (i = 0; p->v.zonelist->zones[i]; i++)
49389fa3024SChristoph Lameter 			node_set(zone_to_nid(p->v.zonelist->zones[i]),
4948bccd85fSChristoph Lameter 				*nodes);
4951da177e4SLinus Torvalds 		break;
4961da177e4SLinus Torvalds 	case MPOL_DEFAULT:
4971da177e4SLinus Torvalds 		break;
4981da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
499dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
5001da177e4SLinus Torvalds 		break;
5011da177e4SLinus Torvalds 	case MPOL_PREFERRED:
50256bbd65dSChristoph Lameter 		/* or use current node instead of memory_map? */
5031da177e4SLinus Torvalds 		if (p->v.preferred_node < 0)
50456bbd65dSChristoph Lameter 			*nodes = node_states[N_HIGH_MEMORY];
5051da177e4SLinus Torvalds 		else
506dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
5071da177e4SLinus Torvalds 		break;
5081da177e4SLinus Torvalds 	default:
5091da177e4SLinus Torvalds 		BUG();
5101da177e4SLinus Torvalds 	}
5111da177e4SLinus Torvalds }
5121da177e4SLinus Torvalds 
5131da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
5141da177e4SLinus Torvalds {
5151da177e4SLinus Torvalds 	struct page *p;
5161da177e4SLinus Torvalds 	int err;
5171da177e4SLinus Torvalds 
5181da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
5191da177e4SLinus Torvalds 	if (err >= 0) {
5201da177e4SLinus Torvalds 		err = page_to_nid(p);
5211da177e4SLinus Torvalds 		put_page(p);
5221da177e4SLinus Torvalds 	}
5231da177e4SLinus Torvalds 	return err;
5241da177e4SLinus Torvalds }
5251da177e4SLinus Torvalds 
5261da177e4SLinus Torvalds /* Retrieve NUMA policy */
527dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
5281da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
5291da177e4SLinus Torvalds {
5308bccd85fSChristoph Lameter 	int err;
5311da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
5321da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
5331da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
5341da177e4SLinus Torvalds 
535cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
536754af6f5SLee Schermerhorn 	if (flags &
537754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
5381da177e4SLinus Torvalds 		return -EINVAL;
539754af6f5SLee Schermerhorn 
540754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
541754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
542754af6f5SLee Schermerhorn 			return -EINVAL;
543754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
544754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
545754af6f5SLee Schermerhorn 		return 0;
546754af6f5SLee Schermerhorn 	}
547754af6f5SLee Schermerhorn 
5481da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
5491da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
5501da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
5511da177e4SLinus Torvalds 		if (!vma) {
5521da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
5531da177e4SLinus Torvalds 			return -EFAULT;
5541da177e4SLinus Torvalds 		}
5551da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
5561da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
5571da177e4SLinus Torvalds 		else
5581da177e4SLinus Torvalds 			pol = vma->vm_policy;
5591da177e4SLinus Torvalds 	} else if (addr)
5601da177e4SLinus Torvalds 		return -EINVAL;
5611da177e4SLinus Torvalds 
5621da177e4SLinus Torvalds 	if (!pol)
5631da177e4SLinus Torvalds 		pol = &default_policy;
5641da177e4SLinus Torvalds 
5651da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
5661da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
5671da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
5681da177e4SLinus Torvalds 			if (err < 0)
5691da177e4SLinus Torvalds 				goto out;
5708bccd85fSChristoph Lameter 			*policy = err;
5711da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
5721da177e4SLinus Torvalds 				pol->policy == MPOL_INTERLEAVE) {
5738bccd85fSChristoph Lameter 			*policy = current->il_next;
5741da177e4SLinus Torvalds 		} else {
5751da177e4SLinus Torvalds 			err = -EINVAL;
5761da177e4SLinus Torvalds 			goto out;
5771da177e4SLinus Torvalds 		}
5781da177e4SLinus Torvalds 	} else
5798bccd85fSChristoph Lameter 		*policy = pol->policy;
5801da177e4SLinus Torvalds 
5811da177e4SLinus Torvalds 	if (vma) {
5821da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5831da177e4SLinus Torvalds 		vma = NULL;
5841da177e4SLinus Torvalds 	}
5851da177e4SLinus Torvalds 
5861da177e4SLinus Torvalds 	err = 0;
5878bccd85fSChristoph Lameter 	if (nmask)
5888bccd85fSChristoph Lameter 		get_zonemask(pol, nmask);
5891da177e4SLinus Torvalds 
5901da177e4SLinus Torvalds  out:
5911da177e4SLinus Torvalds 	if (vma)
5921da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5931da177e4SLinus Torvalds 	return err;
5941da177e4SLinus Torvalds }
5951da177e4SLinus Torvalds 
596b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
5978bccd85fSChristoph Lameter /*
5986ce3c4c0SChristoph Lameter  * page migration
5996ce3c4c0SChristoph Lameter  */
600fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
601fc301289SChristoph Lameter 				unsigned long flags)
6026ce3c4c0SChristoph Lameter {
6036ce3c4c0SChristoph Lameter 	/*
604fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
6056ce3c4c0SChristoph Lameter 	 */
606b20a3503SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
607b20a3503SChristoph Lameter 		isolate_lru_page(page, pagelist);
6086ce3c4c0SChristoph Lameter }
6096ce3c4c0SChristoph Lameter 
610742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
61195a402c3SChristoph Lameter {
612769848c0SMel Gorman 	return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
61395a402c3SChristoph Lameter }
61495a402c3SChristoph Lameter 
6156ce3c4c0SChristoph Lameter /*
6167e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
6177e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
6187e2ab150SChristoph Lameter  */
619dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
620dbcb0f19SAdrian Bunk 			   int flags)
6217e2ab150SChristoph Lameter {
6227e2ab150SChristoph Lameter 	nodemask_t nmask;
6237e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
6247e2ab150SChristoph Lameter 	int err = 0;
6257e2ab150SChristoph Lameter 
6267e2ab150SChristoph Lameter 	nodes_clear(nmask);
6277e2ab150SChristoph Lameter 	node_set(source, nmask);
6287e2ab150SChristoph Lameter 
6297e2ab150SChristoph Lameter 	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
6307e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
6317e2ab150SChristoph Lameter 
6327e2ab150SChristoph Lameter 	if (!list_empty(&pagelist))
63395a402c3SChristoph Lameter 		err = migrate_pages(&pagelist, new_node_page, dest);
63495a402c3SChristoph Lameter 
6357e2ab150SChristoph Lameter 	return err;
6367e2ab150SChristoph Lameter }
6377e2ab150SChristoph Lameter 
6387e2ab150SChristoph Lameter /*
6397e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
6407e2ab150SChristoph Lameter  * layout as much as possible.
64139743889SChristoph Lameter  *
64239743889SChristoph Lameter  * Returns the number of page that could not be moved.
64339743889SChristoph Lameter  */
64439743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
64539743889SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
64639743889SChristoph Lameter {
64739743889SChristoph Lameter 	LIST_HEAD(pagelist);
6487e2ab150SChristoph Lameter 	int busy = 0;
6497e2ab150SChristoph Lameter 	int err = 0;
6507e2ab150SChristoph Lameter 	nodemask_t tmp;
65139743889SChristoph Lameter 
65239743889SChristoph Lameter   	down_read(&mm->mmap_sem);
653d4984711SChristoph Lameter 
6547b2259b3SChristoph Lameter 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
6557b2259b3SChristoph Lameter 	if (err)
6567b2259b3SChristoph Lameter 		goto out;
6577b2259b3SChristoph Lameter 
6587e2ab150SChristoph Lameter /*
6597e2ab150SChristoph Lameter  * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
6607e2ab150SChristoph Lameter  * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
6617e2ab150SChristoph Lameter  * bit in 'tmp', and return that <source, dest> pair for migration.
6627e2ab150SChristoph Lameter  * The pair of nodemasks 'to' and 'from' define the map.
6637e2ab150SChristoph Lameter  *
6647e2ab150SChristoph Lameter  * If no pair of bits is found that way, fallback to picking some
6657e2ab150SChristoph Lameter  * pair of 'source' and 'dest' bits that are not the same.  If the
6667e2ab150SChristoph Lameter  * 'source' and 'dest' bits are the same, this represents a node
6677e2ab150SChristoph Lameter  * that will be migrating to itself, so no pages need move.
6687e2ab150SChristoph Lameter  *
6697e2ab150SChristoph Lameter  * If no bits are left in 'tmp', or if all remaining bits left
6707e2ab150SChristoph Lameter  * in 'tmp' correspond to the same bit in 'to', return false
6717e2ab150SChristoph Lameter  * (nothing left to migrate).
6727e2ab150SChristoph Lameter  *
6737e2ab150SChristoph Lameter  * This lets us pick a pair of nodes to migrate between, such that
6747e2ab150SChristoph Lameter  * if possible the dest node is not already occupied by some other
6757e2ab150SChristoph Lameter  * source node, minimizing the risk of overloading the memory on a
6767e2ab150SChristoph Lameter  * node that would happen if we migrated incoming memory to a node
6777e2ab150SChristoph Lameter  * before migrating outgoing memory source that same node.
6787e2ab150SChristoph Lameter  *
6797e2ab150SChristoph Lameter  * A single scan of tmp is sufficient.  As we go, we remember the
6807e2ab150SChristoph Lameter  * most recent <s, d> pair that moved (s != d).  If we find a pair
6817e2ab150SChristoph Lameter  * that not only moved, but what's better, moved to an empty slot
6827e2ab150SChristoph Lameter  * (d is not set in tmp), then we break out then, with that pair.
6837e2ab150SChristoph Lameter  * Otherwise when we finish scannng from_tmp, we at least have the
6847e2ab150SChristoph Lameter  * most recent <s, d> pair that moved.  If we get all the way through
6857e2ab150SChristoph Lameter  * the scan of tmp without finding any node that moved, much less
6867e2ab150SChristoph Lameter  * moved to an empty node, then there is nothing left worth migrating.
6877e2ab150SChristoph Lameter  */
6887e2ab150SChristoph Lameter 
6897e2ab150SChristoph Lameter 	tmp = *from_nodes;
6907e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
6917e2ab150SChristoph Lameter 		int s,d;
6927e2ab150SChristoph Lameter 		int source = -1;
6937e2ab150SChristoph Lameter 		int dest = 0;
6947e2ab150SChristoph Lameter 
6957e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
6967e2ab150SChristoph Lameter 			d = node_remap(s, *from_nodes, *to_nodes);
6977e2ab150SChristoph Lameter 			if (s == d)
6987e2ab150SChristoph Lameter 				continue;
6997e2ab150SChristoph Lameter 
7007e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
7017e2ab150SChristoph Lameter 			dest = d;
7027e2ab150SChristoph Lameter 
7037e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
7047e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
7057e2ab150SChristoph Lameter 				break;
7067e2ab150SChristoph Lameter 		}
7077e2ab150SChristoph Lameter 		if (source == -1)
7087e2ab150SChristoph Lameter 			break;
7097e2ab150SChristoph Lameter 
7107e2ab150SChristoph Lameter 		node_clear(source, tmp);
7117e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
7127e2ab150SChristoph Lameter 		if (err > 0)
7137e2ab150SChristoph Lameter 			busy += err;
7147e2ab150SChristoph Lameter 		if (err < 0)
7157e2ab150SChristoph Lameter 			break;
71639743889SChristoph Lameter 	}
7177b2259b3SChristoph Lameter out:
71839743889SChristoph Lameter 	up_read(&mm->mmap_sem);
7197e2ab150SChristoph Lameter 	if (err < 0)
7207e2ab150SChristoph Lameter 		return err;
7217e2ab150SChristoph Lameter 	return busy;
722b20a3503SChristoph Lameter 
72339743889SChristoph Lameter }
72439743889SChristoph Lameter 
725*3ad33b24SLee Schermerhorn /*
726*3ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
727*3ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
728*3ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
729*3ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
730*3ad33b24SLee Schermerhorn  * is in virtual address order.
731*3ad33b24SLee Schermerhorn  */
732742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
73395a402c3SChristoph Lameter {
73495a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
735*3ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
73695a402c3SChristoph Lameter 
737*3ad33b24SLee Schermerhorn 	while (vma) {
738*3ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
739*3ad33b24SLee Schermerhorn 		if (address != -EFAULT)
740*3ad33b24SLee Schermerhorn 			break;
741*3ad33b24SLee Schermerhorn 		vma = vma->vm_next;
742*3ad33b24SLee Schermerhorn 	}
743*3ad33b24SLee Schermerhorn 
744*3ad33b24SLee Schermerhorn 	/*
745*3ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
746*3ad33b24SLee Schermerhorn 	 */
747*3ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
74895a402c3SChristoph Lameter }
749b20a3503SChristoph Lameter #else
750b20a3503SChristoph Lameter 
751b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
752b20a3503SChristoph Lameter 				unsigned long flags)
753b20a3503SChristoph Lameter {
754b20a3503SChristoph Lameter }
755b20a3503SChristoph Lameter 
756b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
757b20a3503SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
758b20a3503SChristoph Lameter {
759b20a3503SChristoph Lameter 	return -ENOSYS;
760b20a3503SChristoph Lameter }
76195a402c3SChristoph Lameter 
76269939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
76395a402c3SChristoph Lameter {
76495a402c3SChristoph Lameter 	return NULL;
76595a402c3SChristoph Lameter }
766b20a3503SChristoph Lameter #endif
767b20a3503SChristoph Lameter 
768dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
769dbcb0f19SAdrian Bunk 		     unsigned long mode, nodemask_t *nmask,
770dbcb0f19SAdrian Bunk 		     unsigned long flags)
7716ce3c4c0SChristoph Lameter {
7726ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
7736ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
7746ce3c4c0SChristoph Lameter 	struct mempolicy *new;
7756ce3c4c0SChristoph Lameter 	unsigned long end;
7766ce3c4c0SChristoph Lameter 	int err;
7776ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
7786ce3c4c0SChristoph Lameter 
7796ce3c4c0SChristoph Lameter 	if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
7806ce3c4c0SChristoph Lameter 				      MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
7816ce3c4c0SChristoph Lameter 	    || mode > MPOL_MAX)
7826ce3c4c0SChristoph Lameter 		return -EINVAL;
78374c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
7846ce3c4c0SChristoph Lameter 		return -EPERM;
7856ce3c4c0SChristoph Lameter 
7866ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
7876ce3c4c0SChristoph Lameter 		return -EINVAL;
7886ce3c4c0SChristoph Lameter 
7896ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
7906ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
7916ce3c4c0SChristoph Lameter 
7926ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
7936ce3c4c0SChristoph Lameter 	end = start + len;
7946ce3c4c0SChristoph Lameter 
7956ce3c4c0SChristoph Lameter 	if (end < start)
7966ce3c4c0SChristoph Lameter 		return -EINVAL;
7976ce3c4c0SChristoph Lameter 	if (end == start)
7986ce3c4c0SChristoph Lameter 		return 0;
7996ce3c4c0SChristoph Lameter 
8006ce3c4c0SChristoph Lameter 	if (mpol_check_policy(mode, nmask))
8016ce3c4c0SChristoph Lameter 		return -EINVAL;
8026ce3c4c0SChristoph Lameter 
8036ce3c4c0SChristoph Lameter 	new = mpol_new(mode, nmask);
8046ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
8056ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
8066ce3c4c0SChristoph Lameter 
8076ce3c4c0SChristoph Lameter 	/*
8086ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
8096ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
8106ce3c4c0SChristoph Lameter 	 */
8116ce3c4c0SChristoph Lameter 	if (!new)
8126ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
8136ce3c4c0SChristoph Lameter 
814140d5a49SPaul Mundt 	pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
815140d5a49SPaul Mundt 		 mode, nmask ? nodes_addr(*nmask)[0] : -1);
8166ce3c4c0SChristoph Lameter 
8176ce3c4c0SChristoph Lameter 	down_write(&mm->mmap_sem);
8186ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
8196ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
8206ce3c4c0SChristoph Lameter 
8216ce3c4c0SChristoph Lameter 	err = PTR_ERR(vma);
8226ce3c4c0SChristoph Lameter 	if (!IS_ERR(vma)) {
8236ce3c4c0SChristoph Lameter 		int nr_failed = 0;
8246ce3c4c0SChristoph Lameter 
8256ce3c4c0SChristoph Lameter 		err = mbind_range(vma, start, end, new);
8267e2ab150SChristoph Lameter 
8276ce3c4c0SChristoph Lameter 		if (!list_empty(&pagelist))
82895a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
82995a402c3SChristoph Lameter 						(unsigned long)vma);
8306ce3c4c0SChristoph Lameter 
8316ce3c4c0SChristoph Lameter 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
8326ce3c4c0SChristoph Lameter 			err = -EIO;
8336ce3c4c0SChristoph Lameter 	}
834b20a3503SChristoph Lameter 
8356ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
8366ce3c4c0SChristoph Lameter 	mpol_free(new);
8376ce3c4c0SChristoph Lameter 	return err;
8386ce3c4c0SChristoph Lameter }
8396ce3c4c0SChristoph Lameter 
84039743889SChristoph Lameter /*
8418bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
8428bccd85fSChristoph Lameter  */
8438bccd85fSChristoph Lameter 
8448bccd85fSChristoph Lameter /* Copy a node mask from user space. */
84539743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8468bccd85fSChristoph Lameter 		     unsigned long maxnode)
8478bccd85fSChristoph Lameter {
8488bccd85fSChristoph Lameter 	unsigned long k;
8498bccd85fSChristoph Lameter 	unsigned long nlongs;
8508bccd85fSChristoph Lameter 	unsigned long endmask;
8518bccd85fSChristoph Lameter 
8528bccd85fSChristoph Lameter 	--maxnode;
8538bccd85fSChristoph Lameter 	nodes_clear(*nodes);
8548bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
8558bccd85fSChristoph Lameter 		return 0;
856a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
857636f13c1SChris Wright 		return -EINVAL;
8588bccd85fSChristoph Lameter 
8598bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
8608bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
8618bccd85fSChristoph Lameter 		endmask = ~0UL;
8628bccd85fSChristoph Lameter 	else
8638bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
8648bccd85fSChristoph Lameter 
8658bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
8668bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
8678bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8688bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
8698bccd85fSChristoph Lameter 			return -EINVAL;
8708bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8718bccd85fSChristoph Lameter 			unsigned long t;
8728bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
8738bccd85fSChristoph Lameter 				return -EFAULT;
8748bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
8758bccd85fSChristoph Lameter 				if (t & endmask)
8768bccd85fSChristoph Lameter 					return -EINVAL;
8778bccd85fSChristoph Lameter 			} else if (t)
8788bccd85fSChristoph Lameter 				return -EINVAL;
8798bccd85fSChristoph Lameter 		}
8808bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
8818bccd85fSChristoph Lameter 		endmask = ~0UL;
8828bccd85fSChristoph Lameter 	}
8838bccd85fSChristoph Lameter 
8848bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
8858bccd85fSChristoph Lameter 		return -EFAULT;
8868bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
8878bccd85fSChristoph Lameter 	return 0;
8888bccd85fSChristoph Lameter }
8898bccd85fSChristoph Lameter 
8908bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
8918bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
8928bccd85fSChristoph Lameter 			      nodemask_t *nodes)
8938bccd85fSChristoph Lameter {
8948bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
8958bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
8968bccd85fSChristoph Lameter 
8978bccd85fSChristoph Lameter 	if (copy > nbytes) {
8988bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
8998bccd85fSChristoph Lameter 			return -EINVAL;
9008bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
9018bccd85fSChristoph Lameter 			return -EFAULT;
9028bccd85fSChristoph Lameter 		copy = nbytes;
9038bccd85fSChristoph Lameter 	}
9048bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
9058bccd85fSChristoph Lameter }
9068bccd85fSChristoph Lameter 
9078bccd85fSChristoph Lameter asmlinkage long sys_mbind(unsigned long start, unsigned long len,
9088bccd85fSChristoph Lameter 			unsigned long mode,
9098bccd85fSChristoph Lameter 			unsigned long __user *nmask, unsigned long maxnode,
9108bccd85fSChristoph Lameter 			unsigned flags)
9118bccd85fSChristoph Lameter {
9128bccd85fSChristoph Lameter 	nodemask_t nodes;
9138bccd85fSChristoph Lameter 	int err;
9148bccd85fSChristoph Lameter 
9158bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
9168bccd85fSChristoph Lameter 	if (err)
9178bccd85fSChristoph Lameter 		return err;
91830150f8dSChristoph Lameter #ifdef CONFIG_CPUSETS
91930150f8dSChristoph Lameter 	/* Restrict the nodes to the allowed nodes in the cpuset */
92030150f8dSChristoph Lameter 	nodes_and(nodes, nodes, current->mems_allowed);
92130150f8dSChristoph Lameter #endif
9228bccd85fSChristoph Lameter 	return do_mbind(start, len, mode, &nodes, flags);
9238bccd85fSChristoph Lameter }
9248bccd85fSChristoph Lameter 
9258bccd85fSChristoph Lameter /* Set the process memory policy */
9268bccd85fSChristoph Lameter asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
9278bccd85fSChristoph Lameter 		unsigned long maxnode)
9288bccd85fSChristoph Lameter {
9298bccd85fSChristoph Lameter 	int err;
9308bccd85fSChristoph Lameter 	nodemask_t nodes;
9318bccd85fSChristoph Lameter 
9328bccd85fSChristoph Lameter 	if (mode < 0 || mode > MPOL_MAX)
9338bccd85fSChristoph Lameter 		return -EINVAL;
9348bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
9358bccd85fSChristoph Lameter 	if (err)
9368bccd85fSChristoph Lameter 		return err;
9378bccd85fSChristoph Lameter 	return do_set_mempolicy(mode, &nodes);
9388bccd85fSChristoph Lameter }
9398bccd85fSChristoph Lameter 
94039743889SChristoph Lameter asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
94139743889SChristoph Lameter 		const unsigned long __user *old_nodes,
94239743889SChristoph Lameter 		const unsigned long __user *new_nodes)
94339743889SChristoph Lameter {
94439743889SChristoph Lameter 	struct mm_struct *mm;
94539743889SChristoph Lameter 	struct task_struct *task;
94639743889SChristoph Lameter 	nodemask_t old;
94739743889SChristoph Lameter 	nodemask_t new;
94839743889SChristoph Lameter 	nodemask_t task_nodes;
94939743889SChristoph Lameter 	int err;
95039743889SChristoph Lameter 
95139743889SChristoph Lameter 	err = get_nodes(&old, old_nodes, maxnode);
95239743889SChristoph Lameter 	if (err)
95339743889SChristoph Lameter 		return err;
95439743889SChristoph Lameter 
95539743889SChristoph Lameter 	err = get_nodes(&new, new_nodes, maxnode);
95639743889SChristoph Lameter 	if (err)
95739743889SChristoph Lameter 		return err;
95839743889SChristoph Lameter 
95939743889SChristoph Lameter 	/* Find the mm_struct */
96039743889SChristoph Lameter 	read_lock(&tasklist_lock);
961228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
96239743889SChristoph Lameter 	if (!task) {
96339743889SChristoph Lameter 		read_unlock(&tasklist_lock);
96439743889SChristoph Lameter 		return -ESRCH;
96539743889SChristoph Lameter 	}
96639743889SChristoph Lameter 	mm = get_task_mm(task);
96739743889SChristoph Lameter 	read_unlock(&tasklist_lock);
96839743889SChristoph Lameter 
96939743889SChristoph Lameter 	if (!mm)
97039743889SChristoph Lameter 		return -EINVAL;
97139743889SChristoph Lameter 
97239743889SChristoph Lameter 	/*
97339743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
97439743889SChristoph Lameter 	 * process. The right exists if the process has administrative
9757f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
97639743889SChristoph Lameter 	 * userid as the target process.
97739743889SChristoph Lameter 	 */
97839743889SChristoph Lameter 	if ((current->euid != task->suid) && (current->euid != task->uid) &&
97939743889SChristoph Lameter 	    (current->uid != task->suid) && (current->uid != task->uid) &&
98074c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
98139743889SChristoph Lameter 		err = -EPERM;
98239743889SChristoph Lameter 		goto out;
98339743889SChristoph Lameter 	}
98439743889SChristoph Lameter 
98539743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
98639743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
98774c00241SChristoph Lameter 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
98839743889SChristoph Lameter 		err = -EPERM;
98939743889SChristoph Lameter 		goto out;
99039743889SChristoph Lameter 	}
99139743889SChristoph Lameter 
99237b07e41SLee Schermerhorn 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
9933b42d28bSChristoph Lameter 		err = -EINVAL;
9943b42d28bSChristoph Lameter 		goto out;
9953b42d28bSChristoph Lameter 	}
9963b42d28bSChristoph Lameter 
99786c3a764SDavid Quigley 	err = security_task_movememory(task);
99886c3a764SDavid Quigley 	if (err)
99986c3a764SDavid Quigley 		goto out;
100086c3a764SDavid Quigley 
1001511030bcSChristoph Lameter 	err = do_migrate_pages(mm, &old, &new,
100274c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
100339743889SChristoph Lameter out:
100439743889SChristoph Lameter 	mmput(mm);
100539743889SChristoph Lameter 	return err;
100639743889SChristoph Lameter }
100739743889SChristoph Lameter 
100839743889SChristoph Lameter 
10098bccd85fSChristoph Lameter /* Retrieve NUMA policy */
10108bccd85fSChristoph Lameter asmlinkage long sys_get_mempolicy(int __user *policy,
10118bccd85fSChristoph Lameter 				unsigned long __user *nmask,
10128bccd85fSChristoph Lameter 				unsigned long maxnode,
10138bccd85fSChristoph Lameter 				unsigned long addr, unsigned long flags)
10148bccd85fSChristoph Lameter {
1015dbcb0f19SAdrian Bunk 	int err;
1016dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
10178bccd85fSChristoph Lameter 	nodemask_t nodes;
10188bccd85fSChristoph Lameter 
10198bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
10208bccd85fSChristoph Lameter 		return -EINVAL;
10218bccd85fSChristoph Lameter 
10228bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
10238bccd85fSChristoph Lameter 
10248bccd85fSChristoph Lameter 	if (err)
10258bccd85fSChristoph Lameter 		return err;
10268bccd85fSChristoph Lameter 
10278bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
10288bccd85fSChristoph Lameter 		return -EFAULT;
10298bccd85fSChristoph Lameter 
10308bccd85fSChristoph Lameter 	if (nmask)
10318bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
10328bccd85fSChristoph Lameter 
10338bccd85fSChristoph Lameter 	return err;
10348bccd85fSChristoph Lameter }
10358bccd85fSChristoph Lameter 
10361da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
10371da177e4SLinus Torvalds 
10381da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
10391da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
10401da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
10411da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
10421da177e4SLinus Torvalds {
10431da177e4SLinus Torvalds 	long err;
10441da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10451da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10461da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10471da177e4SLinus Torvalds 
10481da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10491da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10501da177e4SLinus Torvalds 
10511da177e4SLinus Torvalds 	if (nmask)
10521da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10531da177e4SLinus Torvalds 
10541da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
10551da177e4SLinus Torvalds 
10561da177e4SLinus Torvalds 	if (!err && nmask) {
10571da177e4SLinus Torvalds 		err = copy_from_user(bm, nm, alloc_size);
10581da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
10591da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
10601da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
10611da177e4SLinus Torvalds 	}
10621da177e4SLinus Torvalds 
10631da177e4SLinus Torvalds 	return err;
10641da177e4SLinus Torvalds }
10651da177e4SLinus Torvalds 
10661da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
10671da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
10681da177e4SLinus Torvalds {
10691da177e4SLinus Torvalds 	long err = 0;
10701da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10711da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10721da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10731da177e4SLinus Torvalds 
10741da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10751da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10761da177e4SLinus Torvalds 
10771da177e4SLinus Torvalds 	if (nmask) {
10781da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
10791da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10801da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
10811da177e4SLinus Torvalds 	}
10821da177e4SLinus Torvalds 
10831da177e4SLinus Torvalds 	if (err)
10841da177e4SLinus Torvalds 		return -EFAULT;
10851da177e4SLinus Torvalds 
10861da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
10871da177e4SLinus Torvalds }
10881da177e4SLinus Torvalds 
10891da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
10901da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
10911da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
10921da177e4SLinus Torvalds {
10931da177e4SLinus Torvalds 	long err = 0;
10941da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10951da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1096dfcd3c0dSAndi Kleen 	nodemask_t bm;
10971da177e4SLinus Torvalds 
10981da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10991da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
11001da177e4SLinus Torvalds 
11011da177e4SLinus Torvalds 	if (nmask) {
1102dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
11031da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1104dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
11051da177e4SLinus Torvalds 	}
11061da177e4SLinus Torvalds 
11071da177e4SLinus Torvalds 	if (err)
11081da177e4SLinus Torvalds 		return -EFAULT;
11091da177e4SLinus Torvalds 
11101da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
11111da177e4SLinus Torvalds }
11121da177e4SLinus Torvalds 
11131da177e4SLinus Torvalds #endif
11141da177e4SLinus Torvalds 
1115480eccf9SLee Schermerhorn /*
1116480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1117480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1118480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1119480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1120480eccf9SLee Schermerhorn  *
1121480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1122480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
1123480eccf9SLee Schermerhorn  * Returned policy has extra reference count if shared, vma,
1124480eccf9SLee Schermerhorn  * or some other task's policy [show_numa_maps() can pass
1125480eccf9SLee Schermerhorn  * @task != current].  It is the caller's responsibility to
1126480eccf9SLee Schermerhorn  * free the reference in these cases.
1127480eccf9SLee Schermerhorn  */
112848fce342SChristoph Lameter static struct mempolicy * get_vma_policy(struct task_struct *task,
112948fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
11301da177e4SLinus Torvalds {
11316e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
1132480eccf9SLee Schermerhorn 	int shared_pol = 0;
11331da177e4SLinus Torvalds 
11341da177e4SLinus Torvalds 	if (vma) {
1135480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
11361da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
1137480eccf9SLee Schermerhorn 			shared_pol = 1;	/* if pol non-NULL, add ref below */
1138480eccf9SLee Schermerhorn 		} else if (vma->vm_policy &&
11391da177e4SLinus Torvalds 				vma->vm_policy->policy != MPOL_DEFAULT)
11401da177e4SLinus Torvalds 			pol = vma->vm_policy;
11411da177e4SLinus Torvalds 	}
11421da177e4SLinus Torvalds 	if (!pol)
11431da177e4SLinus Torvalds 		pol = &default_policy;
1144480eccf9SLee Schermerhorn 	else if (!shared_pol && pol != current->mempolicy)
1145480eccf9SLee Schermerhorn 		mpol_get(pol);	/* vma or other task's policy */
11461da177e4SLinus Torvalds 	return pol;
11471da177e4SLinus Torvalds }
11481da177e4SLinus Torvalds 
11491da177e4SLinus Torvalds /* Return a zonelist representing a mempolicy */
1150dd0fc66fSAl Viro static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
11511da177e4SLinus Torvalds {
11521da177e4SLinus Torvalds 	int nd;
11531da177e4SLinus Torvalds 
11541da177e4SLinus Torvalds 	switch (policy->policy) {
11551da177e4SLinus Torvalds 	case MPOL_PREFERRED:
11561da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
11571da177e4SLinus Torvalds 		if (nd < 0)
11581da177e4SLinus Torvalds 			nd = numa_node_id();
11591da177e4SLinus Torvalds 		break;
11601da177e4SLinus Torvalds 	case MPOL_BIND:
11611da177e4SLinus Torvalds 		/* Lower zones don't get a policy applied */
11621da177e4SLinus Torvalds 		/* Careful: current->mems_allowed might have moved */
116319655d34SChristoph Lameter 		if (gfp_zone(gfp) >= policy_zone)
11641da177e4SLinus Torvalds 			if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
11651da177e4SLinus Torvalds 				return policy->v.zonelist;
11661da177e4SLinus Torvalds 		/*FALL THROUGH*/
11671da177e4SLinus Torvalds 	case MPOL_INTERLEAVE: /* should not happen */
11681da177e4SLinus Torvalds 	case MPOL_DEFAULT:
11691da177e4SLinus Torvalds 		nd = numa_node_id();
11701da177e4SLinus Torvalds 		break;
11711da177e4SLinus Torvalds 	default:
11721da177e4SLinus Torvalds 		nd = 0;
11731da177e4SLinus Torvalds 		BUG();
11741da177e4SLinus Torvalds 	}
1175af4ca457SAl Viro 	return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
11761da177e4SLinus Torvalds }
11771da177e4SLinus Torvalds 
11781da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
11791da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
11801da177e4SLinus Torvalds {
11811da177e4SLinus Torvalds 	unsigned nid, next;
11821da177e4SLinus Torvalds 	struct task_struct *me = current;
11831da177e4SLinus Torvalds 
11841da177e4SLinus Torvalds 	nid = me->il_next;
1185dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
11861da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1187dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
11881da177e4SLinus Torvalds 	me->il_next = next;
11891da177e4SLinus Torvalds 	return nid;
11901da177e4SLinus Torvalds }
11911da177e4SLinus Torvalds 
1192dc85da15SChristoph Lameter /*
1193dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1194dc85da15SChristoph Lameter  * next slab entry.
1195dc85da15SChristoph Lameter  */
1196dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy)
1197dc85da15SChristoph Lameter {
1198765c4507SChristoph Lameter 	int pol = policy ? policy->policy : MPOL_DEFAULT;
1199765c4507SChristoph Lameter 
1200765c4507SChristoph Lameter 	switch (pol) {
1201dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1202dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1203dc85da15SChristoph Lameter 
1204dc85da15SChristoph Lameter 	case MPOL_BIND:
1205dc85da15SChristoph Lameter 		/*
1206dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1207dc85da15SChristoph Lameter 		 * first node.
1208dc85da15SChristoph Lameter 		 */
120989fa3024SChristoph Lameter 		return zone_to_nid(policy->v.zonelist->zones[0]);
1210dc85da15SChristoph Lameter 
1211dc85da15SChristoph Lameter 	case MPOL_PREFERRED:
1212dc85da15SChristoph Lameter 		if (policy->v.preferred_node >= 0)
1213dc85da15SChristoph Lameter 			return policy->v.preferred_node;
1214dc85da15SChristoph Lameter 		/* Fall through */
1215dc85da15SChristoph Lameter 
1216dc85da15SChristoph Lameter 	default:
1217dc85da15SChristoph Lameter 		return numa_node_id();
1218dc85da15SChristoph Lameter 	}
1219dc85da15SChristoph Lameter }
1220dc85da15SChristoph Lameter 
12211da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
12221da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
12231da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
12241da177e4SLinus Torvalds {
1225dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
12261da177e4SLinus Torvalds 	unsigned target = (unsigned)off % nnodes;
12271da177e4SLinus Torvalds 	int c;
12281da177e4SLinus Torvalds 	int nid = -1;
12291da177e4SLinus Torvalds 
12301da177e4SLinus Torvalds 	c = 0;
12311da177e4SLinus Torvalds 	do {
1232dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
12331da177e4SLinus Torvalds 		c++;
12341da177e4SLinus Torvalds 	} while (c <= target);
12351da177e4SLinus Torvalds 	return nid;
12361da177e4SLinus Torvalds }
12371da177e4SLinus Torvalds 
12385da7ca86SChristoph Lameter /* Determine a node number for interleave */
12395da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
12405da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
12415da7ca86SChristoph Lameter {
12425da7ca86SChristoph Lameter 	if (vma) {
12435da7ca86SChristoph Lameter 		unsigned long off;
12445da7ca86SChristoph Lameter 
12453b98b087SNishanth Aravamudan 		/*
12463b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
12473b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
12483b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
12493b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
12503b98b087SNishanth Aravamudan 		 * a useful offset.
12513b98b087SNishanth Aravamudan 		 */
12523b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
12533b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
12545da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
12555da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
12565da7ca86SChristoph Lameter 	} else
12575da7ca86SChristoph Lameter 		return interleave_nodes(pol);
12585da7ca86SChristoph Lameter }
12595da7ca86SChristoph Lameter 
126000ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1261480eccf9SLee Schermerhorn /*
1262480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1263480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1264480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1265480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
1266480eccf9SLee Schermerhorn  * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
1267480eccf9SLee Schermerhorn  *
1268480eccf9SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation.
1269480eccf9SLee Schermerhorn  * If the effective policy is 'BIND, returns pointer to policy's zonelist.
1270480eccf9SLee Schermerhorn  * If it is also a policy for which get_vma_policy() returns an extra
1271480eccf9SLee Schermerhorn  * reference, we must hold that reference until after allocation.
1272480eccf9SLee Schermerhorn  * In that case, return policy via @mpol so hugetlb allocation can drop
1273480eccf9SLee Schermerhorn  * the reference.  For non-'BIND referenced policies, we can/do drop the
1274480eccf9SLee Schermerhorn  * reference here, so the caller doesn't need to know about the special case
1275480eccf9SLee Schermerhorn  * for default and current task policy.
1276480eccf9SLee Schermerhorn  */
1277396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1278480eccf9SLee Schermerhorn 				gfp_t gfp_flags, struct mempolicy **mpol)
12795da7ca86SChristoph Lameter {
12805da7ca86SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1281480eccf9SLee Schermerhorn 	struct zonelist *zl;
12825da7ca86SChristoph Lameter 
1283480eccf9SLee Schermerhorn 	*mpol = NULL;		/* probably no unref needed */
12845da7ca86SChristoph Lameter 	if (pol->policy == MPOL_INTERLEAVE) {
12855da7ca86SChristoph Lameter 		unsigned nid;
12865da7ca86SChristoph Lameter 
12875da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1288480eccf9SLee Schermerhorn 		__mpol_free(pol);		/* finished with pol */
1289396faf03SMel Gorman 		return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
12905da7ca86SChristoph Lameter 	}
1291480eccf9SLee Schermerhorn 
1292480eccf9SLee Schermerhorn 	zl = zonelist_policy(GFP_HIGHUSER, pol);
1293480eccf9SLee Schermerhorn 	if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1294480eccf9SLee Schermerhorn 		if (pol->policy != MPOL_BIND)
1295480eccf9SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
1296480eccf9SLee Schermerhorn 		else
1297480eccf9SLee Schermerhorn 			*mpol = pol;	/* unref needed after allocation */
1298480eccf9SLee Schermerhorn 	}
1299480eccf9SLee Schermerhorn 	return zl;
13005da7ca86SChristoph Lameter }
130100ac59adSChen, Kenneth W #endif
13025da7ca86SChristoph Lameter 
13031da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
13041da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1305662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1306662f3a0bSAndi Kleen 					unsigned nid)
13071da177e4SLinus Torvalds {
13081da177e4SLinus Torvalds 	struct zonelist *zl;
13091da177e4SLinus Torvalds 	struct page *page;
13101da177e4SLinus Torvalds 
1311af4ca457SAl Viro 	zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
13121da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1313ca889e6cSChristoph Lameter 	if (page && page_zone(page) == zl->zones[0])
1314ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
13151da177e4SLinus Torvalds 	return page;
13161da177e4SLinus Torvalds }
13171da177e4SLinus Torvalds 
13181da177e4SLinus Torvalds /**
13191da177e4SLinus Torvalds  * 	alloc_page_vma	- Allocate a page for a VMA.
13201da177e4SLinus Torvalds  *
13211da177e4SLinus Torvalds  * 	@gfp:
13221da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
13231da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
13241da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
13251da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
13261da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
13271da177e4SLinus Torvalds  *
13281da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
13291da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
13301da177e4SLinus Torvalds  *
13311da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
13321da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
13331da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
13341da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
13351da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
13361da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
13371da177e4SLinus Torvalds  *
13381da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
13391da177e4SLinus Torvalds  */
13401da177e4SLinus Torvalds struct page *
1341dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
13421da177e4SLinus Torvalds {
13436e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1344480eccf9SLee Schermerhorn 	struct zonelist *zl;
13451da177e4SLinus Torvalds 
1346cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
13471da177e4SLinus Torvalds 
13481da177e4SLinus Torvalds 	if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
13491da177e4SLinus Torvalds 		unsigned nid;
13505da7ca86SChristoph Lameter 
13515da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
13521da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, 0, nid);
13531da177e4SLinus Torvalds 	}
1354480eccf9SLee Schermerhorn 	zl = zonelist_policy(gfp, pol);
1355480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy) {
1356480eccf9SLee Schermerhorn 		/*
1357480eccf9SLee Schermerhorn 		 * slow path: ref counted policy -- shared or vma
1358480eccf9SLee Schermerhorn 		 */
1359480eccf9SLee Schermerhorn 		struct page *page =  __alloc_pages(gfp, 0, zl);
1360480eccf9SLee Schermerhorn 		__mpol_free(pol);
1361480eccf9SLee Schermerhorn 		return page;
1362480eccf9SLee Schermerhorn 	}
1363480eccf9SLee Schermerhorn 	/*
1364480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
1365480eccf9SLee Schermerhorn 	 */
1366480eccf9SLee Schermerhorn 	return __alloc_pages(gfp, 0, zl);
13671da177e4SLinus Torvalds }
13681da177e4SLinus Torvalds 
13691da177e4SLinus Torvalds /**
13701da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
13711da177e4SLinus Torvalds  *
13721da177e4SLinus Torvalds  *	@gfp:
13731da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
13741da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
13751da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
13761da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
13771da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
13781da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
13791da177e4SLinus Torvalds  *
13801da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
13811da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
13821da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
13831da177e4SLinus Torvalds  *
1384cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
13851da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
13861da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
13871da177e4SLinus Torvalds  */
1388dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
13891da177e4SLinus Torvalds {
13901da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
13911da177e4SLinus Torvalds 
13921da177e4SLinus Torvalds 	if ((gfp & __GFP_WAIT) && !in_interrupt())
1393cf2a473cSPaul Jackson 		cpuset_update_task_memory_state();
13949b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
13951da177e4SLinus Torvalds 		pol = &default_policy;
13961da177e4SLinus Torvalds 	if (pol->policy == MPOL_INTERLEAVE)
13971da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
13981da177e4SLinus Torvalds 	return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
13991da177e4SLinus Torvalds }
14001da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
14011da177e4SLinus Torvalds 
14024225399aSPaul Jackson /*
14034225399aSPaul Jackson  * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
14044225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
14054225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
14064225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
14074225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
14084225399aSPaul Jackson  */
14094225399aSPaul Jackson 
14101da177e4SLinus Torvalds /* Slow path of a mempolicy copy */
14111da177e4SLinus Torvalds struct mempolicy *__mpol_copy(struct mempolicy *old)
14121da177e4SLinus Torvalds {
14131da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds 	if (!new)
14161da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
14174225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
14184225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
14194225399aSPaul Jackson 		mpol_rebind_policy(old, &mems);
14204225399aSPaul Jackson 	}
14211da177e4SLinus Torvalds 	*new = *old;
14221da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
14231da177e4SLinus Torvalds 	if (new->policy == MPOL_BIND) {
14241da177e4SLinus Torvalds 		int sz = ksize(old->v.zonelist);
1425e94b1766SChristoph Lameter 		new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
14261da177e4SLinus Torvalds 		if (!new->v.zonelist) {
14271da177e4SLinus Torvalds 			kmem_cache_free(policy_cache, new);
14281da177e4SLinus Torvalds 			return ERR_PTR(-ENOMEM);
14291da177e4SLinus Torvalds 		}
14301da177e4SLinus Torvalds 	}
14311da177e4SLinus Torvalds 	return new;
14321da177e4SLinus Torvalds }
14331da177e4SLinus Torvalds 
14341da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
14351da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
14361da177e4SLinus Torvalds {
14371da177e4SLinus Torvalds 	if (!a || !b)
14381da177e4SLinus Torvalds 		return 0;
14391da177e4SLinus Torvalds 	if (a->policy != b->policy)
14401da177e4SLinus Torvalds 		return 0;
14411da177e4SLinus Torvalds 	switch (a->policy) {
14421da177e4SLinus Torvalds 	case MPOL_DEFAULT:
14431da177e4SLinus Torvalds 		return 1;
14441da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1445dfcd3c0dSAndi Kleen 		return nodes_equal(a->v.nodes, b->v.nodes);
14461da177e4SLinus Torvalds 	case MPOL_PREFERRED:
14471da177e4SLinus Torvalds 		return a->v.preferred_node == b->v.preferred_node;
14481da177e4SLinus Torvalds 	case MPOL_BIND: {
14491da177e4SLinus Torvalds 		int i;
14501da177e4SLinus Torvalds 		for (i = 0; a->v.zonelist->zones[i]; i++)
14511da177e4SLinus Torvalds 			if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
14521da177e4SLinus Torvalds 				return 0;
14531da177e4SLinus Torvalds 		return b->v.zonelist->zones[i] == NULL;
14541da177e4SLinus Torvalds 	}
14551da177e4SLinus Torvalds 	default:
14561da177e4SLinus Torvalds 		BUG();
14571da177e4SLinus Torvalds 		return 0;
14581da177e4SLinus Torvalds 	}
14591da177e4SLinus Torvalds }
14601da177e4SLinus Torvalds 
14611da177e4SLinus Torvalds /* Slow path of a mpol destructor. */
14621da177e4SLinus Torvalds void __mpol_free(struct mempolicy *p)
14631da177e4SLinus Torvalds {
14641da177e4SLinus Torvalds 	if (!atomic_dec_and_test(&p->refcnt))
14651da177e4SLinus Torvalds 		return;
14661da177e4SLinus Torvalds 	if (p->policy == MPOL_BIND)
14671da177e4SLinus Torvalds 		kfree(p->v.zonelist);
14681da177e4SLinus Torvalds 	p->policy = MPOL_DEFAULT;
14691da177e4SLinus Torvalds 	kmem_cache_free(policy_cache, p);
14701da177e4SLinus Torvalds }
14711da177e4SLinus Torvalds 
14721da177e4SLinus Torvalds /*
14731da177e4SLinus Torvalds  * Shared memory backing store policy support.
14741da177e4SLinus Torvalds  *
14751da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
14761da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
14771da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
14781da177e4SLinus Torvalds  * for any accesses to the tree.
14791da177e4SLinus Torvalds  */
14801da177e4SLinus Torvalds 
14811da177e4SLinus Torvalds /* lookup first element intersecting start-end */
14821da177e4SLinus Torvalds /* Caller holds sp->lock */
14831da177e4SLinus Torvalds static struct sp_node *
14841da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
14851da177e4SLinus Torvalds {
14861da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
14871da177e4SLinus Torvalds 
14881da177e4SLinus Torvalds 	while (n) {
14891da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
14901da177e4SLinus Torvalds 
14911da177e4SLinus Torvalds 		if (start >= p->end)
14921da177e4SLinus Torvalds 			n = n->rb_right;
14931da177e4SLinus Torvalds 		else if (end <= p->start)
14941da177e4SLinus Torvalds 			n = n->rb_left;
14951da177e4SLinus Torvalds 		else
14961da177e4SLinus Torvalds 			break;
14971da177e4SLinus Torvalds 	}
14981da177e4SLinus Torvalds 	if (!n)
14991da177e4SLinus Torvalds 		return NULL;
15001da177e4SLinus Torvalds 	for (;;) {
15011da177e4SLinus Torvalds 		struct sp_node *w = NULL;
15021da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
15031da177e4SLinus Torvalds 		if (!prev)
15041da177e4SLinus Torvalds 			break;
15051da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
15061da177e4SLinus Torvalds 		if (w->end <= start)
15071da177e4SLinus Torvalds 			break;
15081da177e4SLinus Torvalds 		n = prev;
15091da177e4SLinus Torvalds 	}
15101da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
15111da177e4SLinus Torvalds }
15121da177e4SLinus Torvalds 
15131da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
15141da177e4SLinus Torvalds /* Caller holds sp->lock */
15151da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
15161da177e4SLinus Torvalds {
15171da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
15181da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
15191da177e4SLinus Torvalds 	struct sp_node *nd;
15201da177e4SLinus Torvalds 
15211da177e4SLinus Torvalds 	while (*p) {
15221da177e4SLinus Torvalds 		parent = *p;
15231da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
15241da177e4SLinus Torvalds 		if (new->start < nd->start)
15251da177e4SLinus Torvalds 			p = &(*p)->rb_left;
15261da177e4SLinus Torvalds 		else if (new->end > nd->end)
15271da177e4SLinus Torvalds 			p = &(*p)->rb_right;
15281da177e4SLinus Torvalds 		else
15291da177e4SLinus Torvalds 			BUG();
15301da177e4SLinus Torvalds 	}
15311da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
15321da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
1533140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
15341da177e4SLinus Torvalds 		 new->policy ? new->policy->policy : 0);
15351da177e4SLinus Torvalds }
15361da177e4SLinus Torvalds 
15371da177e4SLinus Torvalds /* Find shared policy intersecting idx */
15381da177e4SLinus Torvalds struct mempolicy *
15391da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
15401da177e4SLinus Torvalds {
15411da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
15421da177e4SLinus Torvalds 	struct sp_node *sn;
15431da177e4SLinus Torvalds 
15441da177e4SLinus Torvalds 	if (!sp->root.rb_node)
15451da177e4SLinus Torvalds 		return NULL;
15461da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15471da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
15481da177e4SLinus Torvalds 	if (sn) {
15491da177e4SLinus Torvalds 		mpol_get(sn->policy);
15501da177e4SLinus Torvalds 		pol = sn->policy;
15511da177e4SLinus Torvalds 	}
15521da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
15531da177e4SLinus Torvalds 	return pol;
15541da177e4SLinus Torvalds }
15551da177e4SLinus Torvalds 
15561da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
15571da177e4SLinus Torvalds {
1558140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
15591da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
15601da177e4SLinus Torvalds 	mpol_free(n->policy);
15611da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
15621da177e4SLinus Torvalds }
15631da177e4SLinus Torvalds 
1564dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1565dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
15661da177e4SLinus Torvalds {
15671da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
15681da177e4SLinus Torvalds 
15691da177e4SLinus Torvalds 	if (!n)
15701da177e4SLinus Torvalds 		return NULL;
15711da177e4SLinus Torvalds 	n->start = start;
15721da177e4SLinus Torvalds 	n->end = end;
15731da177e4SLinus Torvalds 	mpol_get(pol);
15741da177e4SLinus Torvalds 	n->policy = pol;
15751da177e4SLinus Torvalds 	return n;
15761da177e4SLinus Torvalds }
15771da177e4SLinus Torvalds 
15781da177e4SLinus Torvalds /* Replace a policy range. */
15791da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
15801da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
15811da177e4SLinus Torvalds {
15821da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
15831da177e4SLinus Torvalds 
15841da177e4SLinus Torvalds restart:
15851da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15861da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
15871da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
15881da177e4SLinus Torvalds 	while (n && n->start < end) {
15891da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
15901da177e4SLinus Torvalds 		if (n->start >= start) {
15911da177e4SLinus Torvalds 			if (n->end <= end)
15921da177e4SLinus Torvalds 				sp_delete(sp, n);
15931da177e4SLinus Torvalds 			else
15941da177e4SLinus Torvalds 				n->start = end;
15951da177e4SLinus Torvalds 		} else {
15961da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
15971da177e4SLinus Torvalds 			if (n->end > end) {
15981da177e4SLinus Torvalds 				if (!new2) {
15991da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
16001da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
16011da177e4SLinus Torvalds 					if (!new2)
16021da177e4SLinus Torvalds 						return -ENOMEM;
16031da177e4SLinus Torvalds 					goto restart;
16041da177e4SLinus Torvalds 				}
16051da177e4SLinus Torvalds 				n->end = start;
16061da177e4SLinus Torvalds 				sp_insert(sp, new2);
16071da177e4SLinus Torvalds 				new2 = NULL;
16081da177e4SLinus Torvalds 				break;
16091da177e4SLinus Torvalds 			} else
16101da177e4SLinus Torvalds 				n->end = start;
16111da177e4SLinus Torvalds 		}
16121da177e4SLinus Torvalds 		if (!next)
16131da177e4SLinus Torvalds 			break;
16141da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
16151da177e4SLinus Torvalds 	}
16161da177e4SLinus Torvalds 	if (new)
16171da177e4SLinus Torvalds 		sp_insert(sp, new);
16181da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
16191da177e4SLinus Torvalds 	if (new2) {
16201da177e4SLinus Torvalds 		mpol_free(new2->policy);
16211da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
16221da177e4SLinus Torvalds 	}
16231da177e4SLinus Torvalds 	return 0;
16241da177e4SLinus Torvalds }
16251da177e4SLinus Torvalds 
16267339ff83SRobin Holt void mpol_shared_policy_init(struct shared_policy *info, int policy,
16277339ff83SRobin Holt 				nodemask_t *policy_nodes)
16287339ff83SRobin Holt {
16297339ff83SRobin Holt 	info->root = RB_ROOT;
16307339ff83SRobin Holt 	spin_lock_init(&info->lock);
16317339ff83SRobin Holt 
16327339ff83SRobin Holt 	if (policy != MPOL_DEFAULT) {
16337339ff83SRobin Holt 		struct mempolicy *newpol;
16347339ff83SRobin Holt 
16357339ff83SRobin Holt 		/* Falls back to MPOL_DEFAULT on any error */
16367339ff83SRobin Holt 		newpol = mpol_new(policy, policy_nodes);
16377339ff83SRobin Holt 		if (!IS_ERR(newpol)) {
16387339ff83SRobin Holt 			/* Create pseudo-vma that contains just the policy */
16397339ff83SRobin Holt 			struct vm_area_struct pvma;
16407339ff83SRobin Holt 
16417339ff83SRobin Holt 			memset(&pvma, 0, sizeof(struct vm_area_struct));
16427339ff83SRobin Holt 			/* Policy covers entire file */
16437339ff83SRobin Holt 			pvma.vm_end = TASK_SIZE;
16447339ff83SRobin Holt 			mpol_set_shared_policy(info, &pvma, newpol);
16457339ff83SRobin Holt 			mpol_free(newpol);
16467339ff83SRobin Holt 		}
16477339ff83SRobin Holt 	}
16487339ff83SRobin Holt }
16497339ff83SRobin Holt 
16501da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
16511da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
16521da177e4SLinus Torvalds {
16531da177e4SLinus Torvalds 	int err;
16541da177e4SLinus Torvalds 	struct sp_node *new = NULL;
16551da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
16561da177e4SLinus Torvalds 
1657140d5a49SPaul Mundt 	pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
16581da177e4SLinus Torvalds 		 vma->vm_pgoff,
16591da177e4SLinus Torvalds 		 sz, npol? npol->policy : -1,
1660dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
16611da177e4SLinus Torvalds 
16621da177e4SLinus Torvalds 	if (npol) {
16631da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
16641da177e4SLinus Torvalds 		if (!new)
16651da177e4SLinus Torvalds 			return -ENOMEM;
16661da177e4SLinus Torvalds 	}
16671da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
16681da177e4SLinus Torvalds 	if (err && new)
16691da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
16701da177e4SLinus Torvalds 	return err;
16711da177e4SLinus Torvalds }
16721da177e4SLinus Torvalds 
16731da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
16741da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
16751da177e4SLinus Torvalds {
16761da177e4SLinus Torvalds 	struct sp_node *n;
16771da177e4SLinus Torvalds 	struct rb_node *next;
16781da177e4SLinus Torvalds 
16791da177e4SLinus Torvalds 	if (!p->root.rb_node)
16801da177e4SLinus Torvalds 		return;
16811da177e4SLinus Torvalds 	spin_lock(&p->lock);
16821da177e4SLinus Torvalds 	next = rb_first(&p->root);
16831da177e4SLinus Torvalds 	while (next) {
16841da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
16851da177e4SLinus Torvalds 		next = rb_next(&n->nd);
168690c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
16871da177e4SLinus Torvalds 		mpol_free(n->policy);
16881da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
16891da177e4SLinus Torvalds 	}
16901da177e4SLinus Torvalds 	spin_unlock(&p->lock);
16911da177e4SLinus Torvalds }
16921da177e4SLinus Torvalds 
16931da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
16941da177e4SLinus Torvalds void __init numa_policy_init(void)
16951da177e4SLinus Torvalds {
1696b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
1697b71636e2SPaul Mundt 	unsigned long largest = 0;
1698b71636e2SPaul Mundt 	int nid, prefer = 0;
1699b71636e2SPaul Mundt 
17001da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
17011da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
170220c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
17031da177e4SLinus Torvalds 
17041da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
17051da177e4SLinus Torvalds 				     sizeof(struct sp_node),
170620c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
17071da177e4SLinus Torvalds 
1708b71636e2SPaul Mundt 	/*
1709b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
1710b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
1711b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
1712b71636e2SPaul Mundt 	 */
1713b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
171456bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
1715b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
17161da177e4SLinus Torvalds 
1717b71636e2SPaul Mundt 		/* Preserve the largest node */
1718b71636e2SPaul Mundt 		if (largest < total_pages) {
1719b71636e2SPaul Mundt 			largest = total_pages;
1720b71636e2SPaul Mundt 			prefer = nid;
1721b71636e2SPaul Mundt 		}
1722b71636e2SPaul Mundt 
1723b71636e2SPaul Mundt 		/* Interleave this node? */
1724b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1725b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
1726b71636e2SPaul Mundt 	}
1727b71636e2SPaul Mundt 
1728b71636e2SPaul Mundt 	/* All too small, use the largest */
1729b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
1730b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
1731b71636e2SPaul Mundt 
1732b71636e2SPaul Mundt 	if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
17331da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
17341da177e4SLinus Torvalds }
17351da177e4SLinus Torvalds 
17368bccd85fSChristoph Lameter /* Reset policy of current process to default */
17371da177e4SLinus Torvalds void numa_default_policy(void)
17381da177e4SLinus Torvalds {
17398bccd85fSChristoph Lameter 	do_set_mempolicy(MPOL_DEFAULT, NULL);
17401da177e4SLinus Torvalds }
174168860ec1SPaul Jackson 
174268860ec1SPaul Jackson /* Migrate a policy to a different set of nodes */
1743dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
1744dbcb0f19SAdrian Bunk 			       const nodemask_t *newmask)
174568860ec1SPaul Jackson {
174674cb2155SPaul Jackson 	nodemask_t *mpolmask;
174768860ec1SPaul Jackson 	nodemask_t tmp;
174868860ec1SPaul Jackson 
174968860ec1SPaul Jackson 	if (!pol)
175068860ec1SPaul Jackson 		return;
175174cb2155SPaul Jackson 	mpolmask = &pol->cpuset_mems_allowed;
175274cb2155SPaul Jackson 	if (nodes_equal(*mpolmask, *newmask))
175374cb2155SPaul Jackson 		return;
175468860ec1SPaul Jackson 
175568860ec1SPaul Jackson 	switch (pol->policy) {
175668860ec1SPaul Jackson 	case MPOL_DEFAULT:
175768860ec1SPaul Jackson 		break;
175868860ec1SPaul Jackson 	case MPOL_INTERLEAVE:
175974cb2155SPaul Jackson 		nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
176068860ec1SPaul Jackson 		pol->v.nodes = tmp;
176174cb2155SPaul Jackson 		*mpolmask = *newmask;
176274cb2155SPaul Jackson 		current->il_next = node_remap(current->il_next,
176374cb2155SPaul Jackson 						*mpolmask, *newmask);
176468860ec1SPaul Jackson 		break;
176568860ec1SPaul Jackson 	case MPOL_PREFERRED:
176668860ec1SPaul Jackson 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
176774cb2155SPaul Jackson 						*mpolmask, *newmask);
176874cb2155SPaul Jackson 		*mpolmask = *newmask;
176968860ec1SPaul Jackson 		break;
177068860ec1SPaul Jackson 	case MPOL_BIND: {
177168860ec1SPaul Jackson 		nodemask_t nodes;
177268860ec1SPaul Jackson 		struct zone **z;
177368860ec1SPaul Jackson 		struct zonelist *zonelist;
177468860ec1SPaul Jackson 
177568860ec1SPaul Jackson 		nodes_clear(nodes);
177668860ec1SPaul Jackson 		for (z = pol->v.zonelist->zones; *z; z++)
177789fa3024SChristoph Lameter 			node_set(zone_to_nid(*z), nodes);
177874cb2155SPaul Jackson 		nodes_remap(tmp, nodes, *mpolmask, *newmask);
177968860ec1SPaul Jackson 		nodes = tmp;
178068860ec1SPaul Jackson 
178168860ec1SPaul Jackson 		zonelist = bind_zonelist(&nodes);
178268860ec1SPaul Jackson 
178368860ec1SPaul Jackson 		/* If no mem, then zonelist is NULL and we keep old zonelist.
178468860ec1SPaul Jackson 		 * If that old zonelist has no remaining mems_allowed nodes,
178568860ec1SPaul Jackson 		 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
178668860ec1SPaul Jackson 		 */
178768860ec1SPaul Jackson 
17888af5e2ebSKAMEZAWA Hiroyuki 		if (!IS_ERR(zonelist)) {
178968860ec1SPaul Jackson 			/* Good - got mem - substitute new zonelist */
179068860ec1SPaul Jackson 			kfree(pol->v.zonelist);
179168860ec1SPaul Jackson 			pol->v.zonelist = zonelist;
179268860ec1SPaul Jackson 		}
179374cb2155SPaul Jackson 		*mpolmask = *newmask;
179468860ec1SPaul Jackson 		break;
179568860ec1SPaul Jackson 	}
179668860ec1SPaul Jackson 	default:
179768860ec1SPaul Jackson 		BUG();
179868860ec1SPaul Jackson 		break;
179968860ec1SPaul Jackson 	}
180068860ec1SPaul Jackson }
180168860ec1SPaul Jackson 
180268860ec1SPaul Jackson /*
180374cb2155SPaul Jackson  * Wrapper for mpol_rebind_policy() that just requires task
180474cb2155SPaul Jackson  * pointer, and updates task mempolicy.
180568860ec1SPaul Jackson  */
180674cb2155SPaul Jackson 
180774cb2155SPaul Jackson void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
180868860ec1SPaul Jackson {
180974cb2155SPaul Jackson 	mpol_rebind_policy(tsk->mempolicy, new);
181068860ec1SPaul Jackson }
18111a75a6c8SChristoph Lameter 
18121a75a6c8SChristoph Lameter /*
18134225399aSPaul Jackson  * Rebind each vma in mm to new nodemask.
18144225399aSPaul Jackson  *
18154225399aSPaul Jackson  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
18164225399aSPaul Jackson  */
18174225399aSPaul Jackson 
18184225399aSPaul Jackson void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
18194225399aSPaul Jackson {
18204225399aSPaul Jackson 	struct vm_area_struct *vma;
18214225399aSPaul Jackson 
18224225399aSPaul Jackson 	down_write(&mm->mmap_sem);
18234225399aSPaul Jackson 	for (vma = mm->mmap; vma; vma = vma->vm_next)
18244225399aSPaul Jackson 		mpol_rebind_policy(vma->vm_policy, new);
18254225399aSPaul Jackson 	up_write(&mm->mmap_sem);
18264225399aSPaul Jackson }
18274225399aSPaul Jackson 
18284225399aSPaul Jackson /*
18291a75a6c8SChristoph Lameter  * Display pages allocated per node and memory policy via /proc.
18301a75a6c8SChristoph Lameter  */
18311a75a6c8SChristoph Lameter 
183215ad7cdcSHelge Deller static const char * const policy_types[] =
183315ad7cdcSHelge Deller 	{ "default", "prefer", "bind", "interleave" };
18341a75a6c8SChristoph Lameter 
18351a75a6c8SChristoph Lameter /*
18361a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
18371a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
18381a75a6c8SChristoph Lameter  * or an error (negative)
18391a75a6c8SChristoph Lameter  */
18401a75a6c8SChristoph Lameter static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
18411a75a6c8SChristoph Lameter {
18421a75a6c8SChristoph Lameter 	char *p = buffer;
18431a75a6c8SChristoph Lameter 	int l;
18441a75a6c8SChristoph Lameter 	nodemask_t nodes;
18451a75a6c8SChristoph Lameter 	int mode = pol ? pol->policy : MPOL_DEFAULT;
18461a75a6c8SChristoph Lameter 
18471a75a6c8SChristoph Lameter 	switch (mode) {
18481a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
18491a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18501a75a6c8SChristoph Lameter 		break;
18511a75a6c8SChristoph Lameter 
18521a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
18531a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18541a75a6c8SChristoph Lameter 		node_set(pol->v.preferred_node, nodes);
18551a75a6c8SChristoph Lameter 		break;
18561a75a6c8SChristoph Lameter 
18571a75a6c8SChristoph Lameter 	case MPOL_BIND:
18581a75a6c8SChristoph Lameter 		get_zonemask(pol, &nodes);
18591a75a6c8SChristoph Lameter 		break;
18601a75a6c8SChristoph Lameter 
18611a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
18621a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
18631a75a6c8SChristoph Lameter 		break;
18641a75a6c8SChristoph Lameter 
18651a75a6c8SChristoph Lameter 	default:
18661a75a6c8SChristoph Lameter 		BUG();
18671a75a6c8SChristoph Lameter 		return -EFAULT;
18681a75a6c8SChristoph Lameter 	}
18691a75a6c8SChristoph Lameter 
18701a75a6c8SChristoph Lameter 	l = strlen(policy_types[mode]);
18711a75a6c8SChristoph Lameter  	if (buffer + maxlen < p + l + 1)
18721a75a6c8SChristoph Lameter  		return -ENOSPC;
18731a75a6c8SChristoph Lameter 
18741a75a6c8SChristoph Lameter 	strcpy(p, policy_types[mode]);
18751a75a6c8SChristoph Lameter 	p += l;
18761a75a6c8SChristoph Lameter 
18771a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
18781a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
18791a75a6c8SChristoph Lameter 			return -ENOSPC;
18801a75a6c8SChristoph Lameter 		*p++ = '=';
18811a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
18821a75a6c8SChristoph Lameter 	}
18831a75a6c8SChristoph Lameter 	return p - buffer;
18841a75a6c8SChristoph Lameter }
18851a75a6c8SChristoph Lameter 
18861a75a6c8SChristoph Lameter struct numa_maps {
18871a75a6c8SChristoph Lameter 	unsigned long pages;
18881a75a6c8SChristoph Lameter 	unsigned long anon;
1889397874dfSChristoph Lameter 	unsigned long active;
1890397874dfSChristoph Lameter 	unsigned long writeback;
18911a75a6c8SChristoph Lameter 	unsigned long mapcount_max;
1892397874dfSChristoph Lameter 	unsigned long dirty;
1893397874dfSChristoph Lameter 	unsigned long swapcache;
18941a75a6c8SChristoph Lameter 	unsigned long node[MAX_NUMNODES];
18951a75a6c8SChristoph Lameter };
18961a75a6c8SChristoph Lameter 
1897397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty)
18981a75a6c8SChristoph Lameter {
18991a75a6c8SChristoph Lameter 	struct numa_maps *md = private;
19001a75a6c8SChristoph Lameter 	int count = page_mapcount(page);
19011a75a6c8SChristoph Lameter 
19021a75a6c8SChristoph Lameter 	md->pages++;
1903397874dfSChristoph Lameter 	if (pte_dirty || PageDirty(page))
1904397874dfSChristoph Lameter 		md->dirty++;
1905397874dfSChristoph Lameter 
1906397874dfSChristoph Lameter 	if (PageSwapCache(page))
1907397874dfSChristoph Lameter 		md->swapcache++;
1908397874dfSChristoph Lameter 
1909397874dfSChristoph Lameter 	if (PageActive(page))
1910397874dfSChristoph Lameter 		md->active++;
1911397874dfSChristoph Lameter 
1912397874dfSChristoph Lameter 	if (PageWriteback(page))
1913397874dfSChristoph Lameter 		md->writeback++;
19141a75a6c8SChristoph Lameter 
19151a75a6c8SChristoph Lameter 	if (PageAnon(page))
19161a75a6c8SChristoph Lameter 		md->anon++;
19171a75a6c8SChristoph Lameter 
1918397874dfSChristoph Lameter 	if (count > md->mapcount_max)
1919397874dfSChristoph Lameter 		md->mapcount_max = count;
1920397874dfSChristoph Lameter 
19211a75a6c8SChristoph Lameter 	md->node[page_to_nid(page)]++;
19221a75a6c8SChristoph Lameter }
19231a75a6c8SChristoph Lameter 
19247f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE
1925397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma,
1926397874dfSChristoph Lameter 		unsigned long start, unsigned long end,
1927397874dfSChristoph Lameter 		struct numa_maps *md)
1928397874dfSChristoph Lameter {
1929397874dfSChristoph Lameter 	unsigned long addr;
1930397874dfSChristoph Lameter 	struct page *page;
1931397874dfSChristoph Lameter 
1932397874dfSChristoph Lameter 	for (addr = start; addr < end; addr += HPAGE_SIZE) {
1933397874dfSChristoph Lameter 		pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1934397874dfSChristoph Lameter 		pte_t pte;
1935397874dfSChristoph Lameter 
1936397874dfSChristoph Lameter 		if (!ptep)
1937397874dfSChristoph Lameter 			continue;
1938397874dfSChristoph Lameter 
1939397874dfSChristoph Lameter 		pte = *ptep;
1940397874dfSChristoph Lameter 		if (pte_none(pte))
1941397874dfSChristoph Lameter 			continue;
1942397874dfSChristoph Lameter 
1943397874dfSChristoph Lameter 		page = pte_page(pte);
1944397874dfSChristoph Lameter 		if (!page)
1945397874dfSChristoph Lameter 			continue;
1946397874dfSChristoph Lameter 
1947397874dfSChristoph Lameter 		gather_stats(page, md, pte_dirty(*ptep));
1948397874dfSChristoph Lameter 	}
1949397874dfSChristoph Lameter }
19507f709ed0SAndrew Morton #else
19517f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma,
19527f709ed0SAndrew Morton 		unsigned long start, unsigned long end,
19537f709ed0SAndrew Morton 		struct numa_maps *md)
19547f709ed0SAndrew Morton {
19557f709ed0SAndrew Morton }
19567f709ed0SAndrew Morton #endif
1957397874dfSChristoph Lameter 
19581a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v)
19591a75a6c8SChristoph Lameter {
196099f89551SEric W. Biederman 	struct proc_maps_private *priv = m->private;
19611a75a6c8SChristoph Lameter 	struct vm_area_struct *vma = v;
19621a75a6c8SChristoph Lameter 	struct numa_maps *md;
1963397874dfSChristoph Lameter 	struct file *file = vma->vm_file;
1964397874dfSChristoph Lameter 	struct mm_struct *mm = vma->vm_mm;
1965480eccf9SLee Schermerhorn 	struct mempolicy *pol;
19661a75a6c8SChristoph Lameter 	int n;
19671a75a6c8SChristoph Lameter 	char buffer[50];
19681a75a6c8SChristoph Lameter 
1969397874dfSChristoph Lameter 	if (!mm)
19701a75a6c8SChristoph Lameter 		return 0;
19711a75a6c8SChristoph Lameter 
19721a75a6c8SChristoph Lameter 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
19731a75a6c8SChristoph Lameter 	if (!md)
19741a75a6c8SChristoph Lameter 		return 0;
19751a75a6c8SChristoph Lameter 
1976480eccf9SLee Schermerhorn 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
1977480eccf9SLee Schermerhorn 	mpol_to_str(buffer, sizeof(buffer), pol);
1978480eccf9SLee Schermerhorn 	/*
1979480eccf9SLee Schermerhorn 	 * unref shared or other task's mempolicy
1980480eccf9SLee Schermerhorn 	 */
1981480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy)
1982480eccf9SLee Schermerhorn 		__mpol_free(pol);
19831a75a6c8SChristoph Lameter 
1984397874dfSChristoph Lameter 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1985397874dfSChristoph Lameter 
1986397874dfSChristoph Lameter 	if (file) {
1987397874dfSChristoph Lameter 		seq_printf(m, " file=");
1988e9536ae7SJosef Sipek 		seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= ");
1989397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1990397874dfSChristoph Lameter 		seq_printf(m, " heap");
1991397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->start_stack &&
1992397874dfSChristoph Lameter 			vma->vm_end >= mm->start_stack) {
1993397874dfSChristoph Lameter 		seq_printf(m, " stack");
1994397874dfSChristoph Lameter 	}
1995397874dfSChristoph Lameter 
1996397874dfSChristoph Lameter 	if (is_vm_hugetlb_page(vma)) {
1997397874dfSChristoph Lameter 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1998397874dfSChristoph Lameter 		seq_printf(m, " huge");
1999397874dfSChristoph Lameter 	} else {
2000397874dfSChristoph Lameter 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
200156bbd65dSChristoph Lameter 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2002397874dfSChristoph Lameter 	}
2003397874dfSChristoph Lameter 
2004397874dfSChristoph Lameter 	if (!md->pages)
2005397874dfSChristoph Lameter 		goto out;
20061a75a6c8SChristoph Lameter 
20071a75a6c8SChristoph Lameter 	if (md->anon)
20081a75a6c8SChristoph Lameter 		seq_printf(m," anon=%lu",md->anon);
20091a75a6c8SChristoph Lameter 
2010397874dfSChristoph Lameter 	if (md->dirty)
2011397874dfSChristoph Lameter 		seq_printf(m," dirty=%lu",md->dirty);
2012397874dfSChristoph Lameter 
2013397874dfSChristoph Lameter 	if (md->pages != md->anon && md->pages != md->dirty)
2014397874dfSChristoph Lameter 		seq_printf(m, " mapped=%lu", md->pages);
2015397874dfSChristoph Lameter 
2016397874dfSChristoph Lameter 	if (md->mapcount_max > 1)
2017397874dfSChristoph Lameter 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2018397874dfSChristoph Lameter 
2019397874dfSChristoph Lameter 	if (md->swapcache)
2020397874dfSChristoph Lameter 		seq_printf(m," swapcache=%lu", md->swapcache);
2021397874dfSChristoph Lameter 
2022397874dfSChristoph Lameter 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2023397874dfSChristoph Lameter 		seq_printf(m," active=%lu", md->active);
2024397874dfSChristoph Lameter 
2025397874dfSChristoph Lameter 	if (md->writeback)
2026397874dfSChristoph Lameter 		seq_printf(m," writeback=%lu", md->writeback);
2027397874dfSChristoph Lameter 
202856bbd65dSChristoph Lameter 	for_each_node_state(n, N_HIGH_MEMORY)
20291a75a6c8SChristoph Lameter 		if (md->node[n])
20301a75a6c8SChristoph Lameter 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2031397874dfSChristoph Lameter out:
20321a75a6c8SChristoph Lameter 	seq_putc(m, '\n');
20331a75a6c8SChristoph Lameter 	kfree(md);
20341a75a6c8SChristoph Lameter 
20351a75a6c8SChristoph Lameter 	if (m->count < m->size)
203699f89551SEric W. Biederman 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
20371a75a6c8SChristoph Lameter 	return 0;
20381a75a6c8SChristoph Lameter }
2039