xref: /openbmc/linux/mm/mempolicy.c (revision f5b087b52f1710eb0bf15a2d2b030c51a6a1ca9e)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds    could replace all the switch()es with a mempolicy_ops structure.
671da177e4SLinus Torvalds */
681da177e4SLinus Torvalds 
691da177e4SLinus Torvalds #include <linux/mempolicy.h>
701da177e4SLinus Torvalds #include <linux/mm.h>
711da177e4SLinus Torvalds #include <linux/highmem.h>
721da177e4SLinus Torvalds #include <linux/hugetlb.h>
731da177e4SLinus Torvalds #include <linux/kernel.h>
741da177e4SLinus Torvalds #include <linux/sched.h>
751da177e4SLinus Torvalds #include <linux/nodemask.h>
761da177e4SLinus Torvalds #include <linux/cpuset.h>
771da177e4SLinus Torvalds #include <linux/gfp.h>
781da177e4SLinus Torvalds #include <linux/slab.h>
791da177e4SLinus Torvalds #include <linux/string.h>
801da177e4SLinus Torvalds #include <linux/module.h>
81b488893aSPavel Emelyanov #include <linux/nsproxy.h>
821da177e4SLinus Torvalds #include <linux/interrupt.h>
831da177e4SLinus Torvalds #include <linux/init.h>
841da177e4SLinus Torvalds #include <linux/compat.h>
85dc9aa5b9SChristoph Lameter #include <linux/swap.h>
861a75a6c8SChristoph Lameter #include <linux/seq_file.h>
871a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
88b20a3503SChristoph Lameter #include <linux/migrate.h>
8995a402c3SChristoph Lameter #include <linux/rmap.h>
9086c3a764SDavid Quigley #include <linux/security.h>
91dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
92dc9aa5b9SChristoph Lameter 
931da177e4SLinus Torvalds #include <asm/tlbflush.h>
941da177e4SLinus Torvalds #include <asm/uaccess.h>
951da177e4SLinus Torvalds 
9638e35860SChristoph Lameter /* Internal flags */
97dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
9838e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
991a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
100dc9aa5b9SChristoph Lameter 
101fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
102fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1051da177e4SLinus Torvalds    policied. */
1066267276fSChristoph Lameter enum zone_type policy_zone = 0;
1071da177e4SLinus Torvalds 
108d42c6997SAndi Kleen struct mempolicy default_policy = {
1091da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1101da177e4SLinus Torvalds 	.policy = MPOL_DEFAULT,
1111da177e4SLinus Torvalds };
1121da177e4SLinus Torvalds 
113dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
114dbcb0f19SAdrian Bunk                                const nodemask_t *newmask);
115dbcb0f19SAdrian Bunk 
11619770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
11719770b32SMel Gorman static int is_valid_nodemask(nodemask_t *nodemask)
1181da177e4SLinus Torvalds {
11919770b32SMel Gorman 	int nd, k;
1201da177e4SLinus Torvalds 
12119770b32SMel Gorman 	/* Check that there is something useful in this mask */
12219770b32SMel Gorman 	k = policy_zone;
12319770b32SMel Gorman 
12419770b32SMel Gorman 	for_each_node_mask(nd, *nodemask) {
12519770b32SMel Gorman 		struct zone *z;
12619770b32SMel Gorman 
12719770b32SMel Gorman 		for (k = 0; k <= policy_zone; k++) {
12819770b32SMel Gorman 			z = &NODE_DATA(nd)->node_zones[k];
129dd942ae3SAndi Kleen 			if (z->present_pages > 0)
13019770b32SMel Gorman 				return 1;
131dd942ae3SAndi Kleen 		}
132dd942ae3SAndi Kleen 	}
13319770b32SMel Gorman 
13419770b32SMel Gorman 	return 0;
1351da177e4SLinus Torvalds }
1361da177e4SLinus Torvalds 
137*f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
138*f5b087b5SDavid Rientjes {
139*f5b087b5SDavid Rientjes 	return pol->flags & MPOL_F_STATIC_NODES;
140*f5b087b5SDavid Rientjes }
141*f5b087b5SDavid Rientjes 
1421da177e4SLinus Torvalds /* Create a new policy */
143028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
144028fec41SDavid Rientjes 				  nodemask_t *nodes)
1451da177e4SLinus Torvalds {
1461da177e4SLinus Torvalds 	struct mempolicy *policy;
147*f5b087b5SDavid Rientjes 	nodemask_t cpuset_context_nmask;
1481da177e4SLinus Torvalds 
149028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
150028fec41SDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
151140d5a49SPaul Mundt 
1521da177e4SLinus Torvalds 	if (mode == MPOL_DEFAULT)
153*f5b087b5SDavid Rientjes 		return (nodes && nodes_weight(*nodes)) ? ERR_PTR(-EINVAL) :
154*f5b087b5SDavid Rientjes 							 NULL;
1551da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1561da177e4SLinus Torvalds 	if (!policy)
1571da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
1581da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
159*f5b087b5SDavid Rientjes 	cpuset_update_task_memory_state();
160*f5b087b5SDavid Rientjes 	nodes_and(cpuset_context_nmask, *nodes, cpuset_current_mems_allowed);
1611da177e4SLinus Torvalds 	switch (mode) {
1621da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
163*f5b087b5SDavid Rientjes 		if (nodes_empty(*nodes) || nodes_empty(cpuset_context_nmask))
164*f5b087b5SDavid Rientjes 			goto free;
165*f5b087b5SDavid Rientjes 		policy->v.nodes = cpuset_context_nmask;
1661da177e4SLinus Torvalds 		break;
1671da177e4SLinus Torvalds 	case MPOL_PREFERRED:
168*f5b087b5SDavid Rientjes 		policy->v.preferred_node = first_node(cpuset_context_nmask);
1691da177e4SLinus Torvalds 		if (policy->v.preferred_node >= MAX_NUMNODES)
170*f5b087b5SDavid Rientjes 			goto free;
1711da177e4SLinus Torvalds 		break;
1721da177e4SLinus Torvalds 	case MPOL_BIND:
173*f5b087b5SDavid Rientjes 		if (!is_valid_nodemask(&cpuset_context_nmask))
174*f5b087b5SDavid Rientjes 			goto free;
175*f5b087b5SDavid Rientjes 		policy->v.nodes = cpuset_context_nmask;
1761da177e4SLinus Torvalds 		break;
177a3b51e01SDavid Rientjes 	default:
178a3b51e01SDavid Rientjes 		BUG();
1791da177e4SLinus Torvalds 	}
1801da177e4SLinus Torvalds 	policy->policy = mode;
181028fec41SDavid Rientjes 	policy->flags = flags;
182*f5b087b5SDavid Rientjes 	if (mpol_store_user_nodemask(policy))
183*f5b087b5SDavid Rientjes 		policy->w.user_nodemask = *nodes;
184*f5b087b5SDavid Rientjes 	else
185*f5b087b5SDavid Rientjes 		policy->w.cpuset_mems_allowed = cpuset_mems_allowed(current);
1861da177e4SLinus Torvalds 	return policy;
187*f5b087b5SDavid Rientjes 
188*f5b087b5SDavid Rientjes free:
189*f5b087b5SDavid Rientjes 	kmem_cache_free(policy_cache, policy);
190*f5b087b5SDavid Rientjes 	return ERR_PTR(-EINVAL);
1911da177e4SLinus Torvalds }
1921da177e4SLinus Torvalds 
193397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty);
194fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
195fc301289SChristoph Lameter 				unsigned long flags);
1961a75a6c8SChristoph Lameter 
19738e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
198b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
199dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
200dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
20138e35860SChristoph Lameter 		void *private)
2021da177e4SLinus Torvalds {
20391612e0dSHugh Dickins 	pte_t *orig_pte;
20491612e0dSHugh Dickins 	pte_t *pte;
205705e87c0SHugh Dickins 	spinlock_t *ptl;
206941150a3SHugh Dickins 
207705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
20891612e0dSHugh Dickins 	do {
2096aab341eSLinus Torvalds 		struct page *page;
21025ba77c1SAndy Whitcroft 		int nid;
21191612e0dSHugh Dickins 
21291612e0dSHugh Dickins 		if (!pte_present(*pte))
21391612e0dSHugh Dickins 			continue;
2146aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
2156aab341eSLinus Torvalds 		if (!page)
21691612e0dSHugh Dickins 			continue;
217053837fcSNick Piggin 		/*
218053837fcSNick Piggin 		 * The check for PageReserved here is important to avoid
219053837fcSNick Piggin 		 * handling zero pages and other pages that may have been
220053837fcSNick Piggin 		 * marked special by the system.
221053837fcSNick Piggin 		 *
222053837fcSNick Piggin 		 * If the PageReserved would not be checked here then f.e.
223053837fcSNick Piggin 		 * the location of the zero page could have an influence
224053837fcSNick Piggin 		 * on MPOL_MF_STRICT, zero pages would be counted for
225053837fcSNick Piggin 		 * the per node stats, and there would be useless attempts
226053837fcSNick Piggin 		 * to put zero pages on the migration list.
227053837fcSNick Piggin 		 */
228f4598c8bSChristoph Lameter 		if (PageReserved(page))
229f4598c8bSChristoph Lameter 			continue;
2306aab341eSLinus Torvalds 		nid = page_to_nid(page);
23138e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
23238e35860SChristoph Lameter 			continue;
23338e35860SChristoph Lameter 
2341a75a6c8SChristoph Lameter 		if (flags & MPOL_MF_STATS)
235397874dfSChristoph Lameter 			gather_stats(page, private, pte_dirty(*pte));
236053837fcSNick Piggin 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
237fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
238dc9aa5b9SChristoph Lameter 		else
2391da177e4SLinus Torvalds 			break;
24091612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
241705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
24291612e0dSHugh Dickins 	return addr != end;
24391612e0dSHugh Dickins }
24491612e0dSHugh Dickins 
245b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
246dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
247dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
24838e35860SChristoph Lameter 		void *private)
24991612e0dSHugh Dickins {
25091612e0dSHugh Dickins 	pmd_t *pmd;
25191612e0dSHugh Dickins 	unsigned long next;
25291612e0dSHugh Dickins 
25391612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
25491612e0dSHugh Dickins 	do {
25591612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
25691612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
25791612e0dSHugh Dickins 			continue;
258dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
25938e35860SChristoph Lameter 				    flags, private))
26091612e0dSHugh Dickins 			return -EIO;
26191612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
26291612e0dSHugh Dickins 	return 0;
26391612e0dSHugh Dickins }
26491612e0dSHugh Dickins 
265b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
266dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
267dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
26838e35860SChristoph Lameter 		void *private)
26991612e0dSHugh Dickins {
27091612e0dSHugh Dickins 	pud_t *pud;
27191612e0dSHugh Dickins 	unsigned long next;
27291612e0dSHugh Dickins 
27391612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
27491612e0dSHugh Dickins 	do {
27591612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
27691612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
27791612e0dSHugh Dickins 			continue;
278dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
27938e35860SChristoph Lameter 				    flags, private))
28091612e0dSHugh Dickins 			return -EIO;
28191612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
28291612e0dSHugh Dickins 	return 0;
28391612e0dSHugh Dickins }
28491612e0dSHugh Dickins 
285b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
286dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
287dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
28838e35860SChristoph Lameter 		void *private)
28991612e0dSHugh Dickins {
29091612e0dSHugh Dickins 	pgd_t *pgd;
29191612e0dSHugh Dickins 	unsigned long next;
29291612e0dSHugh Dickins 
293b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
29491612e0dSHugh Dickins 	do {
29591612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
29691612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
29791612e0dSHugh Dickins 			continue;
298dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
29938e35860SChristoph Lameter 				    flags, private))
30091612e0dSHugh Dickins 			return -EIO;
30191612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
30291612e0dSHugh Dickins 	return 0;
3031da177e4SLinus Torvalds }
3041da177e4SLinus Torvalds 
305dc9aa5b9SChristoph Lameter /*
306dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
307dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
308dc9aa5b9SChristoph Lameter  * put them on the pagelist.
309dc9aa5b9SChristoph Lameter  */
3101da177e4SLinus Torvalds static struct vm_area_struct *
3111da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
31238e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
3131da177e4SLinus Torvalds {
3141da177e4SLinus Torvalds 	int err;
3151da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
3161da177e4SLinus Torvalds 
31790036ee5SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
31890036ee5SChristoph Lameter 
319b20a3503SChristoph Lameter 		err = migrate_prep();
320b20a3503SChristoph Lameter 		if (err)
321b20a3503SChristoph Lameter 			return ERR_PTR(err);
32290036ee5SChristoph Lameter 	}
323053837fcSNick Piggin 
3241da177e4SLinus Torvalds 	first = find_vma(mm, start);
3251da177e4SLinus Torvalds 	if (!first)
3261da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
3271da177e4SLinus Torvalds 	prev = NULL;
3281da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
329dc9aa5b9SChristoph Lameter 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
3301da177e4SLinus Torvalds 			if (!vma->vm_next && vma->vm_end < end)
3311da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
3321da177e4SLinus Torvalds 			if (prev && prev->vm_end < vma->vm_start)
3331da177e4SLinus Torvalds 				return ERR_PTR(-EFAULT);
334dc9aa5b9SChristoph Lameter 		}
335dc9aa5b9SChristoph Lameter 		if (!is_vm_hugetlb_page(vma) &&
336dc9aa5b9SChristoph Lameter 		    ((flags & MPOL_MF_STRICT) ||
337dc9aa5b9SChristoph Lameter 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
338dc9aa5b9SChristoph Lameter 				vma_migratable(vma)))) {
3395b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
340dc9aa5b9SChristoph Lameter 
3415b952b3cSAndi Kleen 			if (endvma > end)
3425b952b3cSAndi Kleen 				endvma = end;
3435b952b3cSAndi Kleen 			if (vma->vm_start > start)
3445b952b3cSAndi Kleen 				start = vma->vm_start;
345dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
34638e35860SChristoph Lameter 						flags, private);
3471da177e4SLinus Torvalds 			if (err) {
3481da177e4SLinus Torvalds 				first = ERR_PTR(err);
3491da177e4SLinus Torvalds 				break;
3501da177e4SLinus Torvalds 			}
3511da177e4SLinus Torvalds 		}
3521da177e4SLinus Torvalds 		prev = vma;
3531da177e4SLinus Torvalds 	}
3541da177e4SLinus Torvalds 	return first;
3551da177e4SLinus Torvalds }
3561da177e4SLinus Torvalds 
3571da177e4SLinus Torvalds /* Apply policy to a single VMA */
3581da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
3591da177e4SLinus Torvalds {
3601da177e4SLinus Torvalds 	int err = 0;
3611da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
3621da177e4SLinus Torvalds 
363140d5a49SPaul Mundt 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
3641da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
3651da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
3661da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
3671da177e4SLinus Torvalds 
3681da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
3691da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
3701da177e4SLinus Torvalds 	if (!err) {
3711da177e4SLinus Torvalds 		mpol_get(new);
3721da177e4SLinus Torvalds 		vma->vm_policy = new;
3731da177e4SLinus Torvalds 		mpol_free(old);
3741da177e4SLinus Torvalds 	}
3751da177e4SLinus Torvalds 	return err;
3761da177e4SLinus Torvalds }
3771da177e4SLinus Torvalds 
3781da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
3791da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start,
3801da177e4SLinus Torvalds 		       unsigned long end, struct mempolicy *new)
3811da177e4SLinus Torvalds {
3821da177e4SLinus Torvalds 	struct vm_area_struct *next;
3831da177e4SLinus Torvalds 	int err;
3841da177e4SLinus Torvalds 
3851da177e4SLinus Torvalds 	err = 0;
3861da177e4SLinus Torvalds 	for (; vma && vma->vm_start < end; vma = next) {
3871da177e4SLinus Torvalds 		next = vma->vm_next;
3881da177e4SLinus Torvalds 		if (vma->vm_start < start)
3891da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, start, 1);
3901da177e4SLinus Torvalds 		if (!err && vma->vm_end > end)
3911da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, end, 0);
3921da177e4SLinus Torvalds 		if (!err)
3931da177e4SLinus Torvalds 			err = policy_vma(vma, new);
3941da177e4SLinus Torvalds 		if (err)
3951da177e4SLinus Torvalds 			break;
3961da177e4SLinus Torvalds 	}
3971da177e4SLinus Torvalds 	return err;
3981da177e4SLinus Torvalds }
3991da177e4SLinus Torvalds 
400c61afb18SPaul Jackson /*
401c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
402c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
403c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
404c61afb18SPaul Jackson  *
405c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
406c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
407c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
408c61afb18SPaul Jackson  *
409c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
410c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
411c61afb18SPaul Jackson  *
412c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
413c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
414c61afb18SPaul Jackson  * for use within this file.
415c61afb18SPaul Jackson  */
416c61afb18SPaul Jackson 
417c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
418c61afb18SPaul Jackson {
419c61afb18SPaul Jackson 	if (p->mempolicy)
420c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
421c61afb18SPaul Jackson 	else
422c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
423c61afb18SPaul Jackson }
424c61afb18SPaul Jackson 
425c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
426c61afb18SPaul Jackson {
427c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
428c61afb18SPaul Jackson }
429c61afb18SPaul Jackson 
4301da177e4SLinus Torvalds /* Set the process memory policy */
431028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
432028fec41SDavid Rientjes 			     nodemask_t *nodes)
4331da177e4SLinus Torvalds {
4341da177e4SLinus Torvalds 	struct mempolicy *new;
4351da177e4SLinus Torvalds 
436028fec41SDavid Rientjes 	new = mpol_new(mode, flags, nodes);
4371da177e4SLinus Torvalds 	if (IS_ERR(new))
4381da177e4SLinus Torvalds 		return PTR_ERR(new);
4391da177e4SLinus Torvalds 	mpol_free(current->mempolicy);
4401da177e4SLinus Torvalds 	current->mempolicy = new;
441c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
442*f5b087b5SDavid Rientjes 	if (new && new->policy == MPOL_INTERLEAVE &&
443*f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
444dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
4451da177e4SLinus Torvalds 	return 0;
4461da177e4SLinus Torvalds }
4471da177e4SLinus Torvalds 
4481da177e4SLinus Torvalds /* Fill a zone bitmap for a policy */
449dfcd3c0dSAndi Kleen static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
4501da177e4SLinus Torvalds {
451dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
4521da177e4SLinus Torvalds 	switch (p->policy) {
4531da177e4SLinus Torvalds 	case MPOL_DEFAULT:
4541da177e4SLinus Torvalds 		break;
45519770b32SMel Gorman 	case MPOL_BIND:
45619770b32SMel Gorman 		/* Fall through */
4571da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
458dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
4591da177e4SLinus Torvalds 		break;
4601da177e4SLinus Torvalds 	case MPOL_PREFERRED:
46156bbd65dSChristoph Lameter 		/* or use current node instead of memory_map? */
4621da177e4SLinus Torvalds 		if (p->v.preferred_node < 0)
46356bbd65dSChristoph Lameter 			*nodes = node_states[N_HIGH_MEMORY];
4641da177e4SLinus Torvalds 		else
465dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
4661da177e4SLinus Torvalds 		break;
4671da177e4SLinus Torvalds 	default:
4681da177e4SLinus Torvalds 		BUG();
4691da177e4SLinus Torvalds 	}
4701da177e4SLinus Torvalds }
4711da177e4SLinus Torvalds 
4721da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
4731da177e4SLinus Torvalds {
4741da177e4SLinus Torvalds 	struct page *p;
4751da177e4SLinus Torvalds 	int err;
4761da177e4SLinus Torvalds 
4771da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
4781da177e4SLinus Torvalds 	if (err >= 0) {
4791da177e4SLinus Torvalds 		err = page_to_nid(p);
4801da177e4SLinus Torvalds 		put_page(p);
4811da177e4SLinus Torvalds 	}
4821da177e4SLinus Torvalds 	return err;
4831da177e4SLinus Torvalds }
4841da177e4SLinus Torvalds 
4851da177e4SLinus Torvalds /* Retrieve NUMA policy */
486dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
4871da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
4881da177e4SLinus Torvalds {
4898bccd85fSChristoph Lameter 	int err;
4901da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
4911da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
4921da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
4931da177e4SLinus Torvalds 
494cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
495754af6f5SLee Schermerhorn 	if (flags &
496754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
4971da177e4SLinus Torvalds 		return -EINVAL;
498754af6f5SLee Schermerhorn 
499754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
500754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
501754af6f5SLee Schermerhorn 			return -EINVAL;
502754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
503754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
504754af6f5SLee Schermerhorn 		return 0;
505754af6f5SLee Schermerhorn 	}
506754af6f5SLee Schermerhorn 
5071da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
5081da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
5091da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
5101da177e4SLinus Torvalds 		if (!vma) {
5111da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
5121da177e4SLinus Torvalds 			return -EFAULT;
5131da177e4SLinus Torvalds 		}
5141da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
5151da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
5161da177e4SLinus Torvalds 		else
5171da177e4SLinus Torvalds 			pol = vma->vm_policy;
5181da177e4SLinus Torvalds 	} else if (addr)
5191da177e4SLinus Torvalds 		return -EINVAL;
5201da177e4SLinus Torvalds 
5211da177e4SLinus Torvalds 	if (!pol)
5221da177e4SLinus Torvalds 		pol = &default_policy;
5231da177e4SLinus Torvalds 
5241da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
5251da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
5261da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
5271da177e4SLinus Torvalds 			if (err < 0)
5281da177e4SLinus Torvalds 				goto out;
5298bccd85fSChristoph Lameter 			*policy = err;
5301da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
5311da177e4SLinus Torvalds 				pol->policy == MPOL_INTERLEAVE) {
5328bccd85fSChristoph Lameter 			*policy = current->il_next;
5331da177e4SLinus Torvalds 		} else {
5341da177e4SLinus Torvalds 			err = -EINVAL;
5351da177e4SLinus Torvalds 			goto out;
5361da177e4SLinus Torvalds 		}
5371da177e4SLinus Torvalds 	} else
538028fec41SDavid Rientjes 		*policy = pol->policy | pol->flags;
5391da177e4SLinus Torvalds 
5401da177e4SLinus Torvalds 	if (vma) {
5411da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5421da177e4SLinus Torvalds 		vma = NULL;
5431da177e4SLinus Torvalds 	}
5441da177e4SLinus Torvalds 
5451da177e4SLinus Torvalds 	err = 0;
5468bccd85fSChristoph Lameter 	if (nmask)
5478bccd85fSChristoph Lameter 		get_zonemask(pol, nmask);
5481da177e4SLinus Torvalds 
5491da177e4SLinus Torvalds  out:
5501da177e4SLinus Torvalds 	if (vma)
5511da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5521da177e4SLinus Torvalds 	return err;
5531da177e4SLinus Torvalds }
5541da177e4SLinus Torvalds 
555b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
5568bccd85fSChristoph Lameter /*
5576ce3c4c0SChristoph Lameter  * page migration
5586ce3c4c0SChristoph Lameter  */
559fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
560fc301289SChristoph Lameter 				unsigned long flags)
5616ce3c4c0SChristoph Lameter {
5626ce3c4c0SChristoph Lameter 	/*
563fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
5646ce3c4c0SChristoph Lameter 	 */
565b20a3503SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
566b20a3503SChristoph Lameter 		isolate_lru_page(page, pagelist);
5676ce3c4c0SChristoph Lameter }
5686ce3c4c0SChristoph Lameter 
569742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
57095a402c3SChristoph Lameter {
571769848c0SMel Gorman 	return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
57295a402c3SChristoph Lameter }
57395a402c3SChristoph Lameter 
5746ce3c4c0SChristoph Lameter /*
5757e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
5767e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
5777e2ab150SChristoph Lameter  */
578dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
579dbcb0f19SAdrian Bunk 			   int flags)
5807e2ab150SChristoph Lameter {
5817e2ab150SChristoph Lameter 	nodemask_t nmask;
5827e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
5837e2ab150SChristoph Lameter 	int err = 0;
5847e2ab150SChristoph Lameter 
5857e2ab150SChristoph Lameter 	nodes_clear(nmask);
5867e2ab150SChristoph Lameter 	node_set(source, nmask);
5877e2ab150SChristoph Lameter 
5887e2ab150SChristoph Lameter 	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
5897e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
5907e2ab150SChristoph Lameter 
5917e2ab150SChristoph Lameter 	if (!list_empty(&pagelist))
59295a402c3SChristoph Lameter 		err = migrate_pages(&pagelist, new_node_page, dest);
59395a402c3SChristoph Lameter 
5947e2ab150SChristoph Lameter 	return err;
5957e2ab150SChristoph Lameter }
5967e2ab150SChristoph Lameter 
5977e2ab150SChristoph Lameter /*
5987e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
5997e2ab150SChristoph Lameter  * layout as much as possible.
60039743889SChristoph Lameter  *
60139743889SChristoph Lameter  * Returns the number of page that could not be moved.
60239743889SChristoph Lameter  */
60339743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
60439743889SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
60539743889SChristoph Lameter {
60639743889SChristoph Lameter 	LIST_HEAD(pagelist);
6077e2ab150SChristoph Lameter 	int busy = 0;
6087e2ab150SChristoph Lameter 	int err = 0;
6097e2ab150SChristoph Lameter 	nodemask_t tmp;
61039743889SChristoph Lameter 
61139743889SChristoph Lameter   	down_read(&mm->mmap_sem);
612d4984711SChristoph Lameter 
6137b2259b3SChristoph Lameter 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
6147b2259b3SChristoph Lameter 	if (err)
6157b2259b3SChristoph Lameter 		goto out;
6167b2259b3SChristoph Lameter 
6177e2ab150SChristoph Lameter /*
6187e2ab150SChristoph Lameter  * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
6197e2ab150SChristoph Lameter  * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
6207e2ab150SChristoph Lameter  * bit in 'tmp', and return that <source, dest> pair for migration.
6217e2ab150SChristoph Lameter  * The pair of nodemasks 'to' and 'from' define the map.
6227e2ab150SChristoph Lameter  *
6237e2ab150SChristoph Lameter  * If no pair of bits is found that way, fallback to picking some
6247e2ab150SChristoph Lameter  * pair of 'source' and 'dest' bits that are not the same.  If the
6257e2ab150SChristoph Lameter  * 'source' and 'dest' bits are the same, this represents a node
6267e2ab150SChristoph Lameter  * that will be migrating to itself, so no pages need move.
6277e2ab150SChristoph Lameter  *
6287e2ab150SChristoph Lameter  * If no bits are left in 'tmp', or if all remaining bits left
6297e2ab150SChristoph Lameter  * in 'tmp' correspond to the same bit in 'to', return false
6307e2ab150SChristoph Lameter  * (nothing left to migrate).
6317e2ab150SChristoph Lameter  *
6327e2ab150SChristoph Lameter  * This lets us pick a pair of nodes to migrate between, such that
6337e2ab150SChristoph Lameter  * if possible the dest node is not already occupied by some other
6347e2ab150SChristoph Lameter  * source node, minimizing the risk of overloading the memory on a
6357e2ab150SChristoph Lameter  * node that would happen if we migrated incoming memory to a node
6367e2ab150SChristoph Lameter  * before migrating outgoing memory source that same node.
6377e2ab150SChristoph Lameter  *
6387e2ab150SChristoph Lameter  * A single scan of tmp is sufficient.  As we go, we remember the
6397e2ab150SChristoph Lameter  * most recent <s, d> pair that moved (s != d).  If we find a pair
6407e2ab150SChristoph Lameter  * that not only moved, but what's better, moved to an empty slot
6417e2ab150SChristoph Lameter  * (d is not set in tmp), then we break out then, with that pair.
6427e2ab150SChristoph Lameter  * Otherwise when we finish scannng from_tmp, we at least have the
6437e2ab150SChristoph Lameter  * most recent <s, d> pair that moved.  If we get all the way through
6447e2ab150SChristoph Lameter  * the scan of tmp without finding any node that moved, much less
6457e2ab150SChristoph Lameter  * moved to an empty node, then there is nothing left worth migrating.
6467e2ab150SChristoph Lameter  */
6477e2ab150SChristoph Lameter 
6487e2ab150SChristoph Lameter 	tmp = *from_nodes;
6497e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
6507e2ab150SChristoph Lameter 		int s,d;
6517e2ab150SChristoph Lameter 		int source = -1;
6527e2ab150SChristoph Lameter 		int dest = 0;
6537e2ab150SChristoph Lameter 
6547e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
6557e2ab150SChristoph Lameter 			d = node_remap(s, *from_nodes, *to_nodes);
6567e2ab150SChristoph Lameter 			if (s == d)
6577e2ab150SChristoph Lameter 				continue;
6587e2ab150SChristoph Lameter 
6597e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
6607e2ab150SChristoph Lameter 			dest = d;
6617e2ab150SChristoph Lameter 
6627e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
6637e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
6647e2ab150SChristoph Lameter 				break;
6657e2ab150SChristoph Lameter 		}
6667e2ab150SChristoph Lameter 		if (source == -1)
6677e2ab150SChristoph Lameter 			break;
6687e2ab150SChristoph Lameter 
6697e2ab150SChristoph Lameter 		node_clear(source, tmp);
6707e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
6717e2ab150SChristoph Lameter 		if (err > 0)
6727e2ab150SChristoph Lameter 			busy += err;
6737e2ab150SChristoph Lameter 		if (err < 0)
6747e2ab150SChristoph Lameter 			break;
67539743889SChristoph Lameter 	}
6767b2259b3SChristoph Lameter out:
67739743889SChristoph Lameter 	up_read(&mm->mmap_sem);
6787e2ab150SChristoph Lameter 	if (err < 0)
6797e2ab150SChristoph Lameter 		return err;
6807e2ab150SChristoph Lameter 	return busy;
681b20a3503SChristoph Lameter 
68239743889SChristoph Lameter }
68339743889SChristoph Lameter 
6843ad33b24SLee Schermerhorn /*
6853ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
6863ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
6873ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
6883ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
6893ad33b24SLee Schermerhorn  * is in virtual address order.
6903ad33b24SLee Schermerhorn  */
691742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
69295a402c3SChristoph Lameter {
69395a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
6943ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
69595a402c3SChristoph Lameter 
6963ad33b24SLee Schermerhorn 	while (vma) {
6973ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
6983ad33b24SLee Schermerhorn 		if (address != -EFAULT)
6993ad33b24SLee Schermerhorn 			break;
7003ad33b24SLee Schermerhorn 		vma = vma->vm_next;
7013ad33b24SLee Schermerhorn 	}
7023ad33b24SLee Schermerhorn 
7033ad33b24SLee Schermerhorn 	/*
7043ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
7053ad33b24SLee Schermerhorn 	 */
7063ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
70795a402c3SChristoph Lameter }
708b20a3503SChristoph Lameter #else
709b20a3503SChristoph Lameter 
710b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
711b20a3503SChristoph Lameter 				unsigned long flags)
712b20a3503SChristoph Lameter {
713b20a3503SChristoph Lameter }
714b20a3503SChristoph Lameter 
715b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm,
716b20a3503SChristoph Lameter 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
717b20a3503SChristoph Lameter {
718b20a3503SChristoph Lameter 	return -ENOSYS;
719b20a3503SChristoph Lameter }
72095a402c3SChristoph Lameter 
72169939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
72295a402c3SChristoph Lameter {
72395a402c3SChristoph Lameter 	return NULL;
72495a402c3SChristoph Lameter }
725b20a3503SChristoph Lameter #endif
726b20a3503SChristoph Lameter 
727dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
728028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
729028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
7306ce3c4c0SChristoph Lameter {
7316ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
7326ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
7336ce3c4c0SChristoph Lameter 	struct mempolicy *new;
7346ce3c4c0SChristoph Lameter 	unsigned long end;
7356ce3c4c0SChristoph Lameter 	int err;
7366ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
7376ce3c4c0SChristoph Lameter 
738a3b51e01SDavid Rientjes 	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
7396ce3c4c0SChristoph Lameter 				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
7406ce3c4c0SChristoph Lameter 		return -EINVAL;
74174c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
7426ce3c4c0SChristoph Lameter 		return -EPERM;
7436ce3c4c0SChristoph Lameter 
7446ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
7456ce3c4c0SChristoph Lameter 		return -EINVAL;
7466ce3c4c0SChristoph Lameter 
7476ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
7486ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
7496ce3c4c0SChristoph Lameter 
7506ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
7516ce3c4c0SChristoph Lameter 	end = start + len;
7526ce3c4c0SChristoph Lameter 
7536ce3c4c0SChristoph Lameter 	if (end < start)
7546ce3c4c0SChristoph Lameter 		return -EINVAL;
7556ce3c4c0SChristoph Lameter 	if (end == start)
7566ce3c4c0SChristoph Lameter 		return 0;
7576ce3c4c0SChristoph Lameter 
758028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
7596ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
7606ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
7616ce3c4c0SChristoph Lameter 
7626ce3c4c0SChristoph Lameter 	/*
7636ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
7646ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
7656ce3c4c0SChristoph Lameter 	 */
7666ce3c4c0SChristoph Lameter 	if (!new)
7676ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
7686ce3c4c0SChristoph Lameter 
769028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
770028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
771028fec41SDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : -1);
7726ce3c4c0SChristoph Lameter 
7736ce3c4c0SChristoph Lameter 	down_write(&mm->mmap_sem);
7746ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
7756ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
7766ce3c4c0SChristoph Lameter 
7776ce3c4c0SChristoph Lameter 	err = PTR_ERR(vma);
7786ce3c4c0SChristoph Lameter 	if (!IS_ERR(vma)) {
7796ce3c4c0SChristoph Lameter 		int nr_failed = 0;
7806ce3c4c0SChristoph Lameter 
7816ce3c4c0SChristoph Lameter 		err = mbind_range(vma, start, end, new);
7827e2ab150SChristoph Lameter 
7836ce3c4c0SChristoph Lameter 		if (!list_empty(&pagelist))
78495a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
78595a402c3SChristoph Lameter 						(unsigned long)vma);
7866ce3c4c0SChristoph Lameter 
7876ce3c4c0SChristoph Lameter 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
7886ce3c4c0SChristoph Lameter 			err = -EIO;
7896ce3c4c0SChristoph Lameter 	}
790b20a3503SChristoph Lameter 
7916ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
7926ce3c4c0SChristoph Lameter 	mpol_free(new);
7936ce3c4c0SChristoph Lameter 	return err;
7946ce3c4c0SChristoph Lameter }
7956ce3c4c0SChristoph Lameter 
79639743889SChristoph Lameter /*
7978bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
7988bccd85fSChristoph Lameter  */
7998bccd85fSChristoph Lameter 
8008bccd85fSChristoph Lameter /* Copy a node mask from user space. */
80139743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8028bccd85fSChristoph Lameter 		     unsigned long maxnode)
8038bccd85fSChristoph Lameter {
8048bccd85fSChristoph Lameter 	unsigned long k;
8058bccd85fSChristoph Lameter 	unsigned long nlongs;
8068bccd85fSChristoph Lameter 	unsigned long endmask;
8078bccd85fSChristoph Lameter 
8088bccd85fSChristoph Lameter 	--maxnode;
8098bccd85fSChristoph Lameter 	nodes_clear(*nodes);
8108bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
8118bccd85fSChristoph Lameter 		return 0;
812a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
813636f13c1SChris Wright 		return -EINVAL;
8148bccd85fSChristoph Lameter 
8158bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
8168bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
8178bccd85fSChristoph Lameter 		endmask = ~0UL;
8188bccd85fSChristoph Lameter 	else
8198bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
8208bccd85fSChristoph Lameter 
8218bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
8228bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
8238bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8248bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
8258bccd85fSChristoph Lameter 			return -EINVAL;
8268bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8278bccd85fSChristoph Lameter 			unsigned long t;
8288bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
8298bccd85fSChristoph Lameter 				return -EFAULT;
8308bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
8318bccd85fSChristoph Lameter 				if (t & endmask)
8328bccd85fSChristoph Lameter 					return -EINVAL;
8338bccd85fSChristoph Lameter 			} else if (t)
8348bccd85fSChristoph Lameter 				return -EINVAL;
8358bccd85fSChristoph Lameter 		}
8368bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
8378bccd85fSChristoph Lameter 		endmask = ~0UL;
8388bccd85fSChristoph Lameter 	}
8398bccd85fSChristoph Lameter 
8408bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
8418bccd85fSChristoph Lameter 		return -EFAULT;
8428bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
8438bccd85fSChristoph Lameter 	return 0;
8448bccd85fSChristoph Lameter }
8458bccd85fSChristoph Lameter 
8468bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
8478bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
8488bccd85fSChristoph Lameter 			      nodemask_t *nodes)
8498bccd85fSChristoph Lameter {
8508bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
8518bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
8528bccd85fSChristoph Lameter 
8538bccd85fSChristoph Lameter 	if (copy > nbytes) {
8548bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
8558bccd85fSChristoph Lameter 			return -EINVAL;
8568bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
8578bccd85fSChristoph Lameter 			return -EFAULT;
8588bccd85fSChristoph Lameter 		copy = nbytes;
8598bccd85fSChristoph Lameter 	}
8608bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
8618bccd85fSChristoph Lameter }
8628bccd85fSChristoph Lameter 
8638bccd85fSChristoph Lameter asmlinkage long sys_mbind(unsigned long start, unsigned long len,
8648bccd85fSChristoph Lameter 			unsigned long mode,
8658bccd85fSChristoph Lameter 			unsigned long __user *nmask, unsigned long maxnode,
8668bccd85fSChristoph Lameter 			unsigned flags)
8678bccd85fSChristoph Lameter {
8688bccd85fSChristoph Lameter 	nodemask_t nodes;
8698bccd85fSChristoph Lameter 	int err;
870028fec41SDavid Rientjes 	unsigned short mode_flags;
8718bccd85fSChristoph Lameter 
872028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
873028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
874a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
875a3b51e01SDavid Rientjes 		return -EINVAL;
8768bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
8778bccd85fSChristoph Lameter 	if (err)
8788bccd85fSChristoph Lameter 		return err;
879028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8808bccd85fSChristoph Lameter }
8818bccd85fSChristoph Lameter 
8828bccd85fSChristoph Lameter /* Set the process memory policy */
8838bccd85fSChristoph Lameter asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
8848bccd85fSChristoph Lameter 		unsigned long maxnode)
8858bccd85fSChristoph Lameter {
8868bccd85fSChristoph Lameter 	int err;
8878bccd85fSChristoph Lameter 	nodemask_t nodes;
888028fec41SDavid Rientjes 	unsigned short flags;
8898bccd85fSChristoph Lameter 
890028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
891028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
892028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
8938bccd85fSChristoph Lameter 		return -EINVAL;
8948bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
8958bccd85fSChristoph Lameter 	if (err)
8968bccd85fSChristoph Lameter 		return err;
897028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
8988bccd85fSChristoph Lameter }
8998bccd85fSChristoph Lameter 
90039743889SChristoph Lameter asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
90139743889SChristoph Lameter 		const unsigned long __user *old_nodes,
90239743889SChristoph Lameter 		const unsigned long __user *new_nodes)
90339743889SChristoph Lameter {
90439743889SChristoph Lameter 	struct mm_struct *mm;
90539743889SChristoph Lameter 	struct task_struct *task;
90639743889SChristoph Lameter 	nodemask_t old;
90739743889SChristoph Lameter 	nodemask_t new;
90839743889SChristoph Lameter 	nodemask_t task_nodes;
90939743889SChristoph Lameter 	int err;
91039743889SChristoph Lameter 
91139743889SChristoph Lameter 	err = get_nodes(&old, old_nodes, maxnode);
91239743889SChristoph Lameter 	if (err)
91339743889SChristoph Lameter 		return err;
91439743889SChristoph Lameter 
91539743889SChristoph Lameter 	err = get_nodes(&new, new_nodes, maxnode);
91639743889SChristoph Lameter 	if (err)
91739743889SChristoph Lameter 		return err;
91839743889SChristoph Lameter 
91939743889SChristoph Lameter 	/* Find the mm_struct */
92039743889SChristoph Lameter 	read_lock(&tasklist_lock);
921228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
92239743889SChristoph Lameter 	if (!task) {
92339743889SChristoph Lameter 		read_unlock(&tasklist_lock);
92439743889SChristoph Lameter 		return -ESRCH;
92539743889SChristoph Lameter 	}
92639743889SChristoph Lameter 	mm = get_task_mm(task);
92739743889SChristoph Lameter 	read_unlock(&tasklist_lock);
92839743889SChristoph Lameter 
92939743889SChristoph Lameter 	if (!mm)
93039743889SChristoph Lameter 		return -EINVAL;
93139743889SChristoph Lameter 
93239743889SChristoph Lameter 	/*
93339743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
93439743889SChristoph Lameter 	 * process. The right exists if the process has administrative
9357f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
93639743889SChristoph Lameter 	 * userid as the target process.
93739743889SChristoph Lameter 	 */
93839743889SChristoph Lameter 	if ((current->euid != task->suid) && (current->euid != task->uid) &&
93939743889SChristoph Lameter 	    (current->uid != task->suid) && (current->uid != task->uid) &&
94074c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
94139743889SChristoph Lameter 		err = -EPERM;
94239743889SChristoph Lameter 		goto out;
94339743889SChristoph Lameter 	}
94439743889SChristoph Lameter 
94539743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
94639743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
94774c00241SChristoph Lameter 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
94839743889SChristoph Lameter 		err = -EPERM;
94939743889SChristoph Lameter 		goto out;
95039743889SChristoph Lameter 	}
95139743889SChristoph Lameter 
95237b07e41SLee Schermerhorn 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
9533b42d28bSChristoph Lameter 		err = -EINVAL;
9543b42d28bSChristoph Lameter 		goto out;
9553b42d28bSChristoph Lameter 	}
9563b42d28bSChristoph Lameter 
95786c3a764SDavid Quigley 	err = security_task_movememory(task);
95886c3a764SDavid Quigley 	if (err)
95986c3a764SDavid Quigley 		goto out;
96086c3a764SDavid Quigley 
961511030bcSChristoph Lameter 	err = do_migrate_pages(mm, &old, &new,
96274c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
96339743889SChristoph Lameter out:
96439743889SChristoph Lameter 	mmput(mm);
96539743889SChristoph Lameter 	return err;
96639743889SChristoph Lameter }
96739743889SChristoph Lameter 
96839743889SChristoph Lameter 
9698bccd85fSChristoph Lameter /* Retrieve NUMA policy */
9708bccd85fSChristoph Lameter asmlinkage long sys_get_mempolicy(int __user *policy,
9718bccd85fSChristoph Lameter 				unsigned long __user *nmask,
9728bccd85fSChristoph Lameter 				unsigned long maxnode,
9738bccd85fSChristoph Lameter 				unsigned long addr, unsigned long flags)
9748bccd85fSChristoph Lameter {
975dbcb0f19SAdrian Bunk 	int err;
976dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
9778bccd85fSChristoph Lameter 	nodemask_t nodes;
9788bccd85fSChristoph Lameter 
9798bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
9808bccd85fSChristoph Lameter 		return -EINVAL;
9818bccd85fSChristoph Lameter 
9828bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
9838bccd85fSChristoph Lameter 
9848bccd85fSChristoph Lameter 	if (err)
9858bccd85fSChristoph Lameter 		return err;
9868bccd85fSChristoph Lameter 
9878bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
9888bccd85fSChristoph Lameter 		return -EFAULT;
9898bccd85fSChristoph Lameter 
9908bccd85fSChristoph Lameter 	if (nmask)
9918bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
9928bccd85fSChristoph Lameter 
9938bccd85fSChristoph Lameter 	return err;
9948bccd85fSChristoph Lameter }
9958bccd85fSChristoph Lameter 
9961da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
9971da177e4SLinus Torvalds 
9981da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
9991da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
10001da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
10011da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
10021da177e4SLinus Torvalds {
10031da177e4SLinus Torvalds 	long err;
10041da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10051da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10061da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10071da177e4SLinus Torvalds 
10081da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10091da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10101da177e4SLinus Torvalds 
10111da177e4SLinus Torvalds 	if (nmask)
10121da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10131da177e4SLinus Torvalds 
10141da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
10151da177e4SLinus Torvalds 
10161da177e4SLinus Torvalds 	if (!err && nmask) {
10171da177e4SLinus Torvalds 		err = copy_from_user(bm, nm, alloc_size);
10181da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
10191da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
10201da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
10211da177e4SLinus Torvalds 	}
10221da177e4SLinus Torvalds 
10231da177e4SLinus Torvalds 	return err;
10241da177e4SLinus Torvalds }
10251da177e4SLinus Torvalds 
10261da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
10271da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
10281da177e4SLinus Torvalds {
10291da177e4SLinus Torvalds 	long err = 0;
10301da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10311da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
10321da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
10331da177e4SLinus Torvalds 
10341da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10351da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10361da177e4SLinus Torvalds 
10371da177e4SLinus Torvalds 	if (nmask) {
10381da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
10391da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
10401da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
10411da177e4SLinus Torvalds 	}
10421da177e4SLinus Torvalds 
10431da177e4SLinus Torvalds 	if (err)
10441da177e4SLinus Torvalds 		return -EFAULT;
10451da177e4SLinus Torvalds 
10461da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
10471da177e4SLinus Torvalds }
10481da177e4SLinus Torvalds 
10491da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
10501da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
10511da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
10521da177e4SLinus Torvalds {
10531da177e4SLinus Torvalds 	long err = 0;
10541da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
10551da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1056dfcd3c0dSAndi Kleen 	nodemask_t bm;
10571da177e4SLinus Torvalds 
10581da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
10591da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
10601da177e4SLinus Torvalds 
10611da177e4SLinus Torvalds 	if (nmask) {
1062dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
10631da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1064dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
10651da177e4SLinus Torvalds 	}
10661da177e4SLinus Torvalds 
10671da177e4SLinus Torvalds 	if (err)
10681da177e4SLinus Torvalds 		return -EFAULT;
10691da177e4SLinus Torvalds 
10701da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
10711da177e4SLinus Torvalds }
10721da177e4SLinus Torvalds 
10731da177e4SLinus Torvalds #endif
10741da177e4SLinus Torvalds 
1075480eccf9SLee Schermerhorn /*
1076480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1077480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1078480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1079480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1080480eccf9SLee Schermerhorn  *
1081480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1082480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
1083480eccf9SLee Schermerhorn  * Returned policy has extra reference count if shared, vma,
1084480eccf9SLee Schermerhorn  * or some other task's policy [show_numa_maps() can pass
1085480eccf9SLee Schermerhorn  * @task != current].  It is the caller's responsibility to
1086480eccf9SLee Schermerhorn  * free the reference in these cases.
1087480eccf9SLee Schermerhorn  */
108848fce342SChristoph Lameter static struct mempolicy * get_vma_policy(struct task_struct *task,
108948fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
10901da177e4SLinus Torvalds {
10916e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
1092480eccf9SLee Schermerhorn 	int shared_pol = 0;
10931da177e4SLinus Torvalds 
10941da177e4SLinus Torvalds 	if (vma) {
1095480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
10961da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
1097480eccf9SLee Schermerhorn 			shared_pol = 1;	/* if pol non-NULL, add ref below */
1098480eccf9SLee Schermerhorn 		} else if (vma->vm_policy &&
10991da177e4SLinus Torvalds 				vma->vm_policy->policy != MPOL_DEFAULT)
11001da177e4SLinus Torvalds 			pol = vma->vm_policy;
11011da177e4SLinus Torvalds 	}
11021da177e4SLinus Torvalds 	if (!pol)
11031da177e4SLinus Torvalds 		pol = &default_policy;
1104480eccf9SLee Schermerhorn 	else if (!shared_pol && pol != current->mempolicy)
1105480eccf9SLee Schermerhorn 		mpol_get(pol);	/* vma or other task's policy */
11061da177e4SLinus Torvalds 	return pol;
11071da177e4SLinus Torvalds }
11081da177e4SLinus Torvalds 
110919770b32SMel Gorman /* Return a nodemask representing a mempolicy */
111019770b32SMel Gorman static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
111119770b32SMel Gorman {
111219770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
111319770b32SMel Gorman 	if (unlikely(policy->policy == MPOL_BIND) &&
111419770b32SMel Gorman 			gfp_zone(gfp) >= policy_zone &&
111519770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
111619770b32SMel Gorman 		return &policy->v.nodes;
111719770b32SMel Gorman 
111819770b32SMel Gorman 	return NULL;
111919770b32SMel Gorman }
112019770b32SMel Gorman 
11211da177e4SLinus Torvalds /* Return a zonelist representing a mempolicy */
1122dd0fc66fSAl Viro static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
11231da177e4SLinus Torvalds {
11241da177e4SLinus Torvalds 	int nd;
11251da177e4SLinus Torvalds 
11261da177e4SLinus Torvalds 	switch (policy->policy) {
11271da177e4SLinus Torvalds 	case MPOL_PREFERRED:
11281da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
11291da177e4SLinus Torvalds 		if (nd < 0)
11301da177e4SLinus Torvalds 			nd = numa_node_id();
11311da177e4SLinus Torvalds 		break;
11321da177e4SLinus Torvalds 	case MPOL_BIND:
113319770b32SMel Gorman 		/*
113419770b32SMel Gorman 		 * Normally, MPOL_BIND allocations node-local are node-local
113519770b32SMel Gorman 		 * within the allowed nodemask. However, if __GFP_THISNODE is
113619770b32SMel Gorman 		 * set and the current node is part of the mask, we use the
113719770b32SMel Gorman 		 * the zonelist for the first node in the mask instead.
113819770b32SMel Gorman 		 */
113919770b32SMel Gorman 		nd = numa_node_id();
114019770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
114119770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
114219770b32SMel Gorman 			nd = first_node(policy->v.nodes);
114319770b32SMel Gorman 		break;
11441da177e4SLinus Torvalds 	case MPOL_INTERLEAVE: /* should not happen */
11451da177e4SLinus Torvalds 	case MPOL_DEFAULT:
11461da177e4SLinus Torvalds 		nd = numa_node_id();
11471da177e4SLinus Torvalds 		break;
11481da177e4SLinus Torvalds 	default:
11491da177e4SLinus Torvalds 		nd = 0;
11501da177e4SLinus Torvalds 		BUG();
11511da177e4SLinus Torvalds 	}
11520e88460dSMel Gorman 	return node_zonelist(nd, gfp);
11531da177e4SLinus Torvalds }
11541da177e4SLinus Torvalds 
11551da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
11561da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
11571da177e4SLinus Torvalds {
11581da177e4SLinus Torvalds 	unsigned nid, next;
11591da177e4SLinus Torvalds 	struct task_struct *me = current;
11601da177e4SLinus Torvalds 
11611da177e4SLinus Torvalds 	nid = me->il_next;
1162dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
11631da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1164dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1165*f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
11661da177e4SLinus Torvalds 		me->il_next = next;
11671da177e4SLinus Torvalds 	return nid;
11681da177e4SLinus Torvalds }
11691da177e4SLinus Torvalds 
1170dc85da15SChristoph Lameter /*
1171dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1172dc85da15SChristoph Lameter  * next slab entry.
1173dc85da15SChristoph Lameter  */
1174dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy)
1175dc85da15SChristoph Lameter {
1176a3b51e01SDavid Rientjes 	unsigned short pol = policy ? policy->policy : MPOL_DEFAULT;
1177765c4507SChristoph Lameter 
1178765c4507SChristoph Lameter 	switch (pol) {
1179dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1180dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1181dc85da15SChristoph Lameter 
1182dd1a239fSMel Gorman 	case MPOL_BIND: {
1183dc85da15SChristoph Lameter 		/*
1184dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1185dc85da15SChristoph Lameter 		 * first node.
1186dc85da15SChristoph Lameter 		 */
118719770b32SMel Gorman 		struct zonelist *zonelist;
118819770b32SMel Gorman 		struct zone *zone;
118919770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
119019770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
119119770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
119219770b32SMel Gorman 							&policy->v.nodes,
119319770b32SMel Gorman 							&zone);
119419770b32SMel Gorman 		return zone->node;
1195dd1a239fSMel Gorman 	}
1196dc85da15SChristoph Lameter 
1197dc85da15SChristoph Lameter 	case MPOL_PREFERRED:
1198dc85da15SChristoph Lameter 		if (policy->v.preferred_node >= 0)
1199dc85da15SChristoph Lameter 			return policy->v.preferred_node;
1200dc85da15SChristoph Lameter 		/* Fall through */
1201dc85da15SChristoph Lameter 
1202dc85da15SChristoph Lameter 	default:
1203dc85da15SChristoph Lameter 		return numa_node_id();
1204dc85da15SChristoph Lameter 	}
1205dc85da15SChristoph Lameter }
1206dc85da15SChristoph Lameter 
12071da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
12081da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
12091da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
12101da177e4SLinus Torvalds {
1211dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1212*f5b087b5SDavid Rientjes 	unsigned target;
12131da177e4SLinus Torvalds 	int c;
12141da177e4SLinus Torvalds 	int nid = -1;
12151da177e4SLinus Torvalds 
1216*f5b087b5SDavid Rientjes 	if (!nnodes)
1217*f5b087b5SDavid Rientjes 		return numa_node_id();
1218*f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
12191da177e4SLinus Torvalds 	c = 0;
12201da177e4SLinus Torvalds 	do {
1221dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
12221da177e4SLinus Torvalds 		c++;
12231da177e4SLinus Torvalds 	} while (c <= target);
12241da177e4SLinus Torvalds 	return nid;
12251da177e4SLinus Torvalds }
12261da177e4SLinus Torvalds 
12275da7ca86SChristoph Lameter /* Determine a node number for interleave */
12285da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
12295da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
12305da7ca86SChristoph Lameter {
12315da7ca86SChristoph Lameter 	if (vma) {
12325da7ca86SChristoph Lameter 		unsigned long off;
12335da7ca86SChristoph Lameter 
12343b98b087SNishanth Aravamudan 		/*
12353b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
12363b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
12373b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
12383b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
12393b98b087SNishanth Aravamudan 		 * a useful offset.
12403b98b087SNishanth Aravamudan 		 */
12413b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
12423b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
12435da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
12445da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
12455da7ca86SChristoph Lameter 	} else
12465da7ca86SChristoph Lameter 		return interleave_nodes(pol);
12475da7ca86SChristoph Lameter }
12485da7ca86SChristoph Lameter 
124900ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1250480eccf9SLee Schermerhorn /*
1251480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1252480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1253480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1254480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
125519770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
125619770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1257480eccf9SLee Schermerhorn  *
1258480eccf9SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation.
125919770b32SMel Gorman  * If the effective policy is 'BIND, returns pointer to local node's zonelist,
126019770b32SMel Gorman  * and a pointer to the mempolicy's @nodemask for filtering the zonelist.
1261480eccf9SLee Schermerhorn  * If it is also a policy for which get_vma_policy() returns an extra
126219770b32SMel Gorman  * reference, we must hold that reference until after the allocation.
1263480eccf9SLee Schermerhorn  * In that case, return policy via @mpol so hugetlb allocation can drop
1264480eccf9SLee Schermerhorn  * the reference. For non-'BIND referenced policies, we can/do drop the
1265480eccf9SLee Schermerhorn  * reference here, so the caller doesn't need to know about the special case
1266480eccf9SLee Schermerhorn  * for default and current task policy.
1267480eccf9SLee Schermerhorn  */
1268396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
126919770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
127019770b32SMel Gorman 				nodemask_t **nodemask)
12715da7ca86SChristoph Lameter {
12725da7ca86SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1273480eccf9SLee Schermerhorn 	struct zonelist *zl;
12745da7ca86SChristoph Lameter 
1275480eccf9SLee Schermerhorn 	*mpol = NULL;		/* probably no unref needed */
127619770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
127719770b32SMel Gorman 	if (pol->policy == MPOL_BIND) {
127819770b32SMel Gorman 			*nodemask = &pol->v.nodes;
127919770b32SMel Gorman 	} else if (pol->policy == MPOL_INTERLEAVE) {
12805da7ca86SChristoph Lameter 		unsigned nid;
12815da7ca86SChristoph Lameter 
12825da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
128369682d85SLee Schermerhorn 		if (unlikely(pol != &default_policy &&
128469682d85SLee Schermerhorn 				pol != current->mempolicy))
1285480eccf9SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
12860e88460dSMel Gorman 		return node_zonelist(nid, gfp_flags);
12875da7ca86SChristoph Lameter 	}
1288480eccf9SLee Schermerhorn 
1289480eccf9SLee Schermerhorn 	zl = zonelist_policy(GFP_HIGHUSER, pol);
1290480eccf9SLee Schermerhorn 	if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1291480eccf9SLee Schermerhorn 		if (pol->policy != MPOL_BIND)
1292480eccf9SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
1293480eccf9SLee Schermerhorn 		else
1294480eccf9SLee Schermerhorn 			*mpol = pol;	/* unref needed after allocation */
1295480eccf9SLee Schermerhorn 	}
1296480eccf9SLee Schermerhorn 	return zl;
12975da7ca86SChristoph Lameter }
129800ac59adSChen, Kenneth W #endif
12995da7ca86SChristoph Lameter 
13001da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
13011da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1302662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1303662f3a0bSAndi Kleen 					unsigned nid)
13041da177e4SLinus Torvalds {
13051da177e4SLinus Torvalds 	struct zonelist *zl;
13061da177e4SLinus Torvalds 	struct page *page;
13071da177e4SLinus Torvalds 
13080e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
13091da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1310dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1311ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
13121da177e4SLinus Torvalds 	return page;
13131da177e4SLinus Torvalds }
13141da177e4SLinus Torvalds 
13151da177e4SLinus Torvalds /**
13161da177e4SLinus Torvalds  * 	alloc_page_vma	- Allocate a page for a VMA.
13171da177e4SLinus Torvalds  *
13181da177e4SLinus Torvalds  * 	@gfp:
13191da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
13201da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
13211da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
13221da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
13231da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
13241da177e4SLinus Torvalds  *
13251da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
13261da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
13271da177e4SLinus Torvalds  *
13281da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
13291da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
13301da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
13311da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
13321da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
13331da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
13341da177e4SLinus Torvalds  *
13351da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
13361da177e4SLinus Torvalds  */
13371da177e4SLinus Torvalds struct page *
1338dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
13391da177e4SLinus Torvalds {
13406e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1341480eccf9SLee Schermerhorn 	struct zonelist *zl;
13421da177e4SLinus Torvalds 
1343cf2a473cSPaul Jackson 	cpuset_update_task_memory_state();
13441da177e4SLinus Torvalds 
13451da177e4SLinus Torvalds 	if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
13461da177e4SLinus Torvalds 		unsigned nid;
13475da7ca86SChristoph Lameter 
13485da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
134969682d85SLee Schermerhorn 		if (unlikely(pol != &default_policy &&
135069682d85SLee Schermerhorn 				pol != current->mempolicy))
135169682d85SLee Schermerhorn 			__mpol_free(pol);	/* finished with pol */
13521da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, 0, nid);
13531da177e4SLinus Torvalds 	}
1354480eccf9SLee Schermerhorn 	zl = zonelist_policy(gfp, pol);
1355480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy) {
1356480eccf9SLee Schermerhorn 		/*
1357480eccf9SLee Schermerhorn 		 * slow path: ref counted policy -- shared or vma
1358480eccf9SLee Schermerhorn 		 */
135919770b32SMel Gorman 		struct page *page =  __alloc_pages_nodemask(gfp, 0,
136019770b32SMel Gorman 						zl, nodemask_policy(gfp, pol));
1361480eccf9SLee Schermerhorn 		__mpol_free(pol);
1362480eccf9SLee Schermerhorn 		return page;
1363480eccf9SLee Schermerhorn 	}
1364480eccf9SLee Schermerhorn 	/*
1365480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
1366480eccf9SLee Schermerhorn 	 */
136719770b32SMel Gorman 	return __alloc_pages_nodemask(gfp, 0, zl, nodemask_policy(gfp, pol));
13681da177e4SLinus Torvalds }
13691da177e4SLinus Torvalds 
13701da177e4SLinus Torvalds /**
13711da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
13721da177e4SLinus Torvalds  *
13731da177e4SLinus Torvalds  *	@gfp:
13741da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
13751da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
13761da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
13771da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
13781da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
13791da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
13801da177e4SLinus Torvalds  *
13811da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
13821da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
13831da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
13841da177e4SLinus Torvalds  *
1385cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
13861da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
13871da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
13881da177e4SLinus Torvalds  */
1389dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
13901da177e4SLinus Torvalds {
13911da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
13921da177e4SLinus Torvalds 
13931da177e4SLinus Torvalds 	if ((gfp & __GFP_WAIT) && !in_interrupt())
1394cf2a473cSPaul Jackson 		cpuset_update_task_memory_state();
13959b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
13961da177e4SLinus Torvalds 		pol = &default_policy;
13971da177e4SLinus Torvalds 	if (pol->policy == MPOL_INTERLEAVE)
13981da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
139919770b32SMel Gorman 	return __alloc_pages_nodemask(gfp, order,
140019770b32SMel Gorman 			zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
14011da177e4SLinus Torvalds }
14021da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
14031da177e4SLinus Torvalds 
14044225399aSPaul Jackson /*
14054225399aSPaul Jackson  * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
14064225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
14074225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
14084225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
14094225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
14104225399aSPaul Jackson  */
14114225399aSPaul Jackson 
14121da177e4SLinus Torvalds /* Slow path of a mempolicy copy */
14131da177e4SLinus Torvalds struct mempolicy *__mpol_copy(struct mempolicy *old)
14141da177e4SLinus Torvalds {
14151da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
14161da177e4SLinus Torvalds 
14171da177e4SLinus Torvalds 	if (!new)
14181da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
14194225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
14204225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
14214225399aSPaul Jackson 		mpol_rebind_policy(old, &mems);
14224225399aSPaul Jackson 	}
14231da177e4SLinus Torvalds 	*new = *old;
14241da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
14251da177e4SLinus Torvalds 	return new;
14261da177e4SLinus Torvalds }
14271da177e4SLinus Torvalds 
1428*f5b087b5SDavid Rientjes static int mpol_match_intent(const struct mempolicy *a,
1429*f5b087b5SDavid Rientjes 			     const struct mempolicy *b)
1430*f5b087b5SDavid Rientjes {
1431*f5b087b5SDavid Rientjes 	if (a->flags != b->flags)
1432*f5b087b5SDavid Rientjes 		return 0;
1433*f5b087b5SDavid Rientjes 	if (!mpol_store_user_nodemask(a))
1434*f5b087b5SDavid Rientjes 		return 1;
1435*f5b087b5SDavid Rientjes 	return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1436*f5b087b5SDavid Rientjes }
1437*f5b087b5SDavid Rientjes 
14381da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
14391da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
14401da177e4SLinus Torvalds {
14411da177e4SLinus Torvalds 	if (!a || !b)
14421da177e4SLinus Torvalds 		return 0;
14431da177e4SLinus Torvalds 	if (a->policy != b->policy)
14441da177e4SLinus Torvalds 		return 0;
1445*f5b087b5SDavid Rientjes 	if (a->policy != MPOL_DEFAULT && !mpol_match_intent(a, b))
1446*f5b087b5SDavid Rientjes 		return 0;
14471da177e4SLinus Torvalds 	switch (a->policy) {
14481da177e4SLinus Torvalds 	case MPOL_DEFAULT:
14491da177e4SLinus Torvalds 		return 1;
145019770b32SMel Gorman 	case MPOL_BIND:
145119770b32SMel Gorman 		/* Fall through */
14521da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1453dfcd3c0dSAndi Kleen 		return nodes_equal(a->v.nodes, b->v.nodes);
14541da177e4SLinus Torvalds 	case MPOL_PREFERRED:
14551da177e4SLinus Torvalds 		return a->v.preferred_node == b->v.preferred_node;
14561da177e4SLinus Torvalds 	default:
14571da177e4SLinus Torvalds 		BUG();
14581da177e4SLinus Torvalds 		return 0;
14591da177e4SLinus Torvalds 	}
14601da177e4SLinus Torvalds }
14611da177e4SLinus Torvalds 
14621da177e4SLinus Torvalds /* Slow path of a mpol destructor. */
14631da177e4SLinus Torvalds void __mpol_free(struct mempolicy *p)
14641da177e4SLinus Torvalds {
14651da177e4SLinus Torvalds 	if (!atomic_dec_and_test(&p->refcnt))
14661da177e4SLinus Torvalds 		return;
14671da177e4SLinus Torvalds 	p->policy = MPOL_DEFAULT;
14681da177e4SLinus Torvalds 	kmem_cache_free(policy_cache, p);
14691da177e4SLinus Torvalds }
14701da177e4SLinus Torvalds 
14711da177e4SLinus Torvalds /*
14721da177e4SLinus Torvalds  * Shared memory backing store policy support.
14731da177e4SLinus Torvalds  *
14741da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
14751da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
14761da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
14771da177e4SLinus Torvalds  * for any accesses to the tree.
14781da177e4SLinus Torvalds  */
14791da177e4SLinus Torvalds 
14801da177e4SLinus Torvalds /* lookup first element intersecting start-end */
14811da177e4SLinus Torvalds /* Caller holds sp->lock */
14821da177e4SLinus Torvalds static struct sp_node *
14831da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
14841da177e4SLinus Torvalds {
14851da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
14861da177e4SLinus Torvalds 
14871da177e4SLinus Torvalds 	while (n) {
14881da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
14891da177e4SLinus Torvalds 
14901da177e4SLinus Torvalds 		if (start >= p->end)
14911da177e4SLinus Torvalds 			n = n->rb_right;
14921da177e4SLinus Torvalds 		else if (end <= p->start)
14931da177e4SLinus Torvalds 			n = n->rb_left;
14941da177e4SLinus Torvalds 		else
14951da177e4SLinus Torvalds 			break;
14961da177e4SLinus Torvalds 	}
14971da177e4SLinus Torvalds 	if (!n)
14981da177e4SLinus Torvalds 		return NULL;
14991da177e4SLinus Torvalds 	for (;;) {
15001da177e4SLinus Torvalds 		struct sp_node *w = NULL;
15011da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
15021da177e4SLinus Torvalds 		if (!prev)
15031da177e4SLinus Torvalds 			break;
15041da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
15051da177e4SLinus Torvalds 		if (w->end <= start)
15061da177e4SLinus Torvalds 			break;
15071da177e4SLinus Torvalds 		n = prev;
15081da177e4SLinus Torvalds 	}
15091da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
15101da177e4SLinus Torvalds }
15111da177e4SLinus Torvalds 
15121da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
15131da177e4SLinus Torvalds /* Caller holds sp->lock */
15141da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
15151da177e4SLinus Torvalds {
15161da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
15171da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
15181da177e4SLinus Torvalds 	struct sp_node *nd;
15191da177e4SLinus Torvalds 
15201da177e4SLinus Torvalds 	while (*p) {
15211da177e4SLinus Torvalds 		parent = *p;
15221da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
15231da177e4SLinus Torvalds 		if (new->start < nd->start)
15241da177e4SLinus Torvalds 			p = &(*p)->rb_left;
15251da177e4SLinus Torvalds 		else if (new->end > nd->end)
15261da177e4SLinus Torvalds 			p = &(*p)->rb_right;
15271da177e4SLinus Torvalds 		else
15281da177e4SLinus Torvalds 			BUG();
15291da177e4SLinus Torvalds 	}
15301da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
15311da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
1532140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
15331da177e4SLinus Torvalds 		 new->policy ? new->policy->policy : 0);
15341da177e4SLinus Torvalds }
15351da177e4SLinus Torvalds 
15361da177e4SLinus Torvalds /* Find shared policy intersecting idx */
15371da177e4SLinus Torvalds struct mempolicy *
15381da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
15391da177e4SLinus Torvalds {
15401da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
15411da177e4SLinus Torvalds 	struct sp_node *sn;
15421da177e4SLinus Torvalds 
15431da177e4SLinus Torvalds 	if (!sp->root.rb_node)
15441da177e4SLinus Torvalds 		return NULL;
15451da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15461da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
15471da177e4SLinus Torvalds 	if (sn) {
15481da177e4SLinus Torvalds 		mpol_get(sn->policy);
15491da177e4SLinus Torvalds 		pol = sn->policy;
15501da177e4SLinus Torvalds 	}
15511da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
15521da177e4SLinus Torvalds 	return pol;
15531da177e4SLinus Torvalds }
15541da177e4SLinus Torvalds 
15551da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
15561da177e4SLinus Torvalds {
1557140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
15581da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
15591da177e4SLinus Torvalds 	mpol_free(n->policy);
15601da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
15611da177e4SLinus Torvalds }
15621da177e4SLinus Torvalds 
1563dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1564dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
15651da177e4SLinus Torvalds {
15661da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
15671da177e4SLinus Torvalds 
15681da177e4SLinus Torvalds 	if (!n)
15691da177e4SLinus Torvalds 		return NULL;
15701da177e4SLinus Torvalds 	n->start = start;
15711da177e4SLinus Torvalds 	n->end = end;
15721da177e4SLinus Torvalds 	mpol_get(pol);
15731da177e4SLinus Torvalds 	n->policy = pol;
15741da177e4SLinus Torvalds 	return n;
15751da177e4SLinus Torvalds }
15761da177e4SLinus Torvalds 
15771da177e4SLinus Torvalds /* Replace a policy range. */
15781da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
15791da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
15801da177e4SLinus Torvalds {
15811da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
15821da177e4SLinus Torvalds 
15831da177e4SLinus Torvalds restart:
15841da177e4SLinus Torvalds 	spin_lock(&sp->lock);
15851da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
15861da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
15871da177e4SLinus Torvalds 	while (n && n->start < end) {
15881da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
15891da177e4SLinus Torvalds 		if (n->start >= start) {
15901da177e4SLinus Torvalds 			if (n->end <= end)
15911da177e4SLinus Torvalds 				sp_delete(sp, n);
15921da177e4SLinus Torvalds 			else
15931da177e4SLinus Torvalds 				n->start = end;
15941da177e4SLinus Torvalds 		} else {
15951da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
15961da177e4SLinus Torvalds 			if (n->end > end) {
15971da177e4SLinus Torvalds 				if (!new2) {
15981da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
15991da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
16001da177e4SLinus Torvalds 					if (!new2)
16011da177e4SLinus Torvalds 						return -ENOMEM;
16021da177e4SLinus Torvalds 					goto restart;
16031da177e4SLinus Torvalds 				}
16041da177e4SLinus Torvalds 				n->end = start;
16051da177e4SLinus Torvalds 				sp_insert(sp, new2);
16061da177e4SLinus Torvalds 				new2 = NULL;
16071da177e4SLinus Torvalds 				break;
16081da177e4SLinus Torvalds 			} else
16091da177e4SLinus Torvalds 				n->end = start;
16101da177e4SLinus Torvalds 		}
16111da177e4SLinus Torvalds 		if (!next)
16121da177e4SLinus Torvalds 			break;
16131da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
16141da177e4SLinus Torvalds 	}
16151da177e4SLinus Torvalds 	if (new)
16161da177e4SLinus Torvalds 		sp_insert(sp, new);
16171da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
16181da177e4SLinus Torvalds 	if (new2) {
16191da177e4SLinus Torvalds 		mpol_free(new2->policy);
16201da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
16211da177e4SLinus Torvalds 	}
16221da177e4SLinus Torvalds 	return 0;
16231da177e4SLinus Torvalds }
16241da177e4SLinus Torvalds 
1625a3b51e01SDavid Rientjes void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
1626028fec41SDavid Rientjes 			unsigned short flags, nodemask_t *policy_nodes)
16277339ff83SRobin Holt {
16287339ff83SRobin Holt 	info->root = RB_ROOT;
16297339ff83SRobin Holt 	spin_lock_init(&info->lock);
16307339ff83SRobin Holt 
16317339ff83SRobin Holt 	if (policy != MPOL_DEFAULT) {
16327339ff83SRobin Holt 		struct mempolicy *newpol;
16337339ff83SRobin Holt 
16347339ff83SRobin Holt 		/* Falls back to MPOL_DEFAULT on any error */
1635028fec41SDavid Rientjes 		newpol = mpol_new(policy, flags, policy_nodes);
16367339ff83SRobin Holt 		if (!IS_ERR(newpol)) {
16377339ff83SRobin Holt 			/* Create pseudo-vma that contains just the policy */
16387339ff83SRobin Holt 			struct vm_area_struct pvma;
16397339ff83SRobin Holt 
16407339ff83SRobin Holt 			memset(&pvma, 0, sizeof(struct vm_area_struct));
16417339ff83SRobin Holt 			/* Policy covers entire file */
16427339ff83SRobin Holt 			pvma.vm_end = TASK_SIZE;
16437339ff83SRobin Holt 			mpol_set_shared_policy(info, &pvma, newpol);
16447339ff83SRobin Holt 			mpol_free(newpol);
16457339ff83SRobin Holt 		}
16467339ff83SRobin Holt 	}
16477339ff83SRobin Holt }
16487339ff83SRobin Holt 
16491da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
16501da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
16511da177e4SLinus Torvalds {
16521da177e4SLinus Torvalds 	int err;
16531da177e4SLinus Torvalds 	struct sp_node *new = NULL;
16541da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
16551da177e4SLinus Torvalds 
1656028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
16571da177e4SLinus Torvalds 		 vma->vm_pgoff,
16581da177e4SLinus Torvalds 		 sz, npol ? npol->policy : -1,
1659028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
1660dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
16611da177e4SLinus Torvalds 
16621da177e4SLinus Torvalds 	if (npol) {
16631da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
16641da177e4SLinus Torvalds 		if (!new)
16651da177e4SLinus Torvalds 			return -ENOMEM;
16661da177e4SLinus Torvalds 	}
16671da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
16681da177e4SLinus Torvalds 	if (err && new)
16691da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
16701da177e4SLinus Torvalds 	return err;
16711da177e4SLinus Torvalds }
16721da177e4SLinus Torvalds 
16731da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
16741da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
16751da177e4SLinus Torvalds {
16761da177e4SLinus Torvalds 	struct sp_node *n;
16771da177e4SLinus Torvalds 	struct rb_node *next;
16781da177e4SLinus Torvalds 
16791da177e4SLinus Torvalds 	if (!p->root.rb_node)
16801da177e4SLinus Torvalds 		return;
16811da177e4SLinus Torvalds 	spin_lock(&p->lock);
16821da177e4SLinus Torvalds 	next = rb_first(&p->root);
16831da177e4SLinus Torvalds 	while (next) {
16841da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
16851da177e4SLinus Torvalds 		next = rb_next(&n->nd);
168690c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
16871da177e4SLinus Torvalds 		mpol_free(n->policy);
16881da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
16891da177e4SLinus Torvalds 	}
16901da177e4SLinus Torvalds 	spin_unlock(&p->lock);
16911da177e4SLinus Torvalds }
16921da177e4SLinus Torvalds 
16931da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
16941da177e4SLinus Torvalds void __init numa_policy_init(void)
16951da177e4SLinus Torvalds {
1696b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
1697b71636e2SPaul Mundt 	unsigned long largest = 0;
1698b71636e2SPaul Mundt 	int nid, prefer = 0;
1699b71636e2SPaul Mundt 
17001da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
17011da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
170220c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
17031da177e4SLinus Torvalds 
17041da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
17051da177e4SLinus Torvalds 				     sizeof(struct sp_node),
170620c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
17071da177e4SLinus Torvalds 
1708b71636e2SPaul Mundt 	/*
1709b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
1710b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
1711b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
1712b71636e2SPaul Mundt 	 */
1713b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
171456bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
1715b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
17161da177e4SLinus Torvalds 
1717b71636e2SPaul Mundt 		/* Preserve the largest node */
1718b71636e2SPaul Mundt 		if (largest < total_pages) {
1719b71636e2SPaul Mundt 			largest = total_pages;
1720b71636e2SPaul Mundt 			prefer = nid;
1721b71636e2SPaul Mundt 		}
1722b71636e2SPaul Mundt 
1723b71636e2SPaul Mundt 		/* Interleave this node? */
1724b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1725b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
1726b71636e2SPaul Mundt 	}
1727b71636e2SPaul Mundt 
1728b71636e2SPaul Mundt 	/* All too small, use the largest */
1729b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
1730b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
1731b71636e2SPaul Mundt 
1732028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
17331da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
17341da177e4SLinus Torvalds }
17351da177e4SLinus Torvalds 
17368bccd85fSChristoph Lameter /* Reset policy of current process to default */
17371da177e4SLinus Torvalds void numa_default_policy(void)
17381da177e4SLinus Torvalds {
1739028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
17401da177e4SLinus Torvalds }
174168860ec1SPaul Jackson 
174268860ec1SPaul Jackson /* Migrate a policy to a different set of nodes */
1743dbcb0f19SAdrian Bunk static void mpol_rebind_policy(struct mempolicy *pol,
1744dbcb0f19SAdrian Bunk 			       const nodemask_t *newmask)
174568860ec1SPaul Jackson {
174668860ec1SPaul Jackson 	nodemask_t tmp;
1747*f5b087b5SDavid Rientjes 	int static_nodes;
174868860ec1SPaul Jackson 
174968860ec1SPaul Jackson 	if (!pol)
175068860ec1SPaul Jackson 		return;
1751*f5b087b5SDavid Rientjes 	static_nodes = pol->flags & MPOL_F_STATIC_NODES;
1752*f5b087b5SDavid Rientjes 	if (!mpol_store_user_nodemask(pol) &&
1753*f5b087b5SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
175474cb2155SPaul Jackson 		return;
175568860ec1SPaul Jackson 
175668860ec1SPaul Jackson 	switch (pol->policy) {
175768860ec1SPaul Jackson 	case MPOL_DEFAULT:
175868860ec1SPaul Jackson 		break;
175919770b32SMel Gorman 	case MPOL_BIND:
176019770b32SMel Gorman 		/* Fall through */
176168860ec1SPaul Jackson 	case MPOL_INTERLEAVE:
1762*f5b087b5SDavid Rientjes 		if (static_nodes)
1763*f5b087b5SDavid Rientjes 			nodes_and(tmp, pol->w.user_nodemask, *newmask);
1764*f5b087b5SDavid Rientjes 		else {
1765*f5b087b5SDavid Rientjes 			nodes_remap(tmp, pol->v.nodes,
1766*f5b087b5SDavid Rientjes 				    pol->w.cpuset_mems_allowed, *newmask);
1767*f5b087b5SDavid Rientjes 			pol->w.cpuset_mems_allowed = *newmask;
1768*f5b087b5SDavid Rientjes 		}
176968860ec1SPaul Jackson 		pol->v.nodes = tmp;
1770*f5b087b5SDavid Rientjes 		if (!node_isset(current->il_next, tmp)) {
1771*f5b087b5SDavid Rientjes 			current->il_next = next_node(current->il_next, tmp);
1772*f5b087b5SDavid Rientjes 			if (current->il_next >= MAX_NUMNODES)
1773*f5b087b5SDavid Rientjes 				current->il_next = first_node(tmp);
1774*f5b087b5SDavid Rientjes 			if (current->il_next >= MAX_NUMNODES)
1775*f5b087b5SDavid Rientjes 				current->il_next = numa_node_id();
1776*f5b087b5SDavid Rientjes 		}
177768860ec1SPaul Jackson 		break;
177868860ec1SPaul Jackson 	case MPOL_PREFERRED:
1779*f5b087b5SDavid Rientjes 		if (static_nodes) {
1780*f5b087b5SDavid Rientjes 			int node = first_node(pol->w.user_nodemask);
1781*f5b087b5SDavid Rientjes 
1782*f5b087b5SDavid Rientjes 			if (node_isset(node, *newmask))
1783*f5b087b5SDavid Rientjes 				pol->v.preferred_node = node;
1784*f5b087b5SDavid Rientjes 			else
1785*f5b087b5SDavid Rientjes 				pol->v.preferred_node = -1;
1786*f5b087b5SDavid Rientjes 		} else {
178768860ec1SPaul Jackson 			pol->v.preferred_node = node_remap(pol->v.preferred_node,
1788*f5b087b5SDavid Rientjes 					pol->w.cpuset_mems_allowed, *newmask);
1789*f5b087b5SDavid Rientjes 			pol->w.cpuset_mems_allowed = *newmask;
1790*f5b087b5SDavid Rientjes 		}
179168860ec1SPaul Jackson 		break;
179268860ec1SPaul Jackson 	default:
179368860ec1SPaul Jackson 		BUG();
179468860ec1SPaul Jackson 		break;
179568860ec1SPaul Jackson 	}
179668860ec1SPaul Jackson }
179768860ec1SPaul Jackson 
179868860ec1SPaul Jackson /*
179974cb2155SPaul Jackson  * Wrapper for mpol_rebind_policy() that just requires task
180074cb2155SPaul Jackson  * pointer, and updates task mempolicy.
180168860ec1SPaul Jackson  */
180274cb2155SPaul Jackson 
180374cb2155SPaul Jackson void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
180468860ec1SPaul Jackson {
180574cb2155SPaul Jackson 	mpol_rebind_policy(tsk->mempolicy, new);
180668860ec1SPaul Jackson }
18071a75a6c8SChristoph Lameter 
18081a75a6c8SChristoph Lameter /*
18094225399aSPaul Jackson  * Rebind each vma in mm to new nodemask.
18104225399aSPaul Jackson  *
18114225399aSPaul Jackson  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
18124225399aSPaul Jackson  */
18134225399aSPaul Jackson 
18144225399aSPaul Jackson void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
18154225399aSPaul Jackson {
18164225399aSPaul Jackson 	struct vm_area_struct *vma;
18174225399aSPaul Jackson 
18184225399aSPaul Jackson 	down_write(&mm->mmap_sem);
18194225399aSPaul Jackson 	for (vma = mm->mmap; vma; vma = vma->vm_next)
18204225399aSPaul Jackson 		mpol_rebind_policy(vma->vm_policy, new);
18214225399aSPaul Jackson 	up_write(&mm->mmap_sem);
18224225399aSPaul Jackson }
18234225399aSPaul Jackson 
18244225399aSPaul Jackson /*
18251a75a6c8SChristoph Lameter  * Display pages allocated per node and memory policy via /proc.
18261a75a6c8SChristoph Lameter  */
18271a75a6c8SChristoph Lameter 
182815ad7cdcSHelge Deller static const char * const policy_types[] =
182915ad7cdcSHelge Deller 	{ "default", "prefer", "bind", "interleave" };
18301a75a6c8SChristoph Lameter 
18311a75a6c8SChristoph Lameter /*
18321a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
18331a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
18341a75a6c8SChristoph Lameter  * or an error (negative)
18351a75a6c8SChristoph Lameter  */
18361a75a6c8SChristoph Lameter static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
18371a75a6c8SChristoph Lameter {
18381a75a6c8SChristoph Lameter 	char *p = buffer;
18391a75a6c8SChristoph Lameter 	int l;
18401a75a6c8SChristoph Lameter 	nodemask_t nodes;
1841a3b51e01SDavid Rientjes 	unsigned short mode = pol ? pol->policy : MPOL_DEFAULT;
1842*f5b087b5SDavid Rientjes 	unsigned short flags = pol ? pol->flags : 0;
18431a75a6c8SChristoph Lameter 
18441a75a6c8SChristoph Lameter 	switch (mode) {
18451a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
18461a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18471a75a6c8SChristoph Lameter 		break;
18481a75a6c8SChristoph Lameter 
18491a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
18501a75a6c8SChristoph Lameter 		nodes_clear(nodes);
18511a75a6c8SChristoph Lameter 		node_set(pol->v.preferred_node, nodes);
18521a75a6c8SChristoph Lameter 		break;
18531a75a6c8SChristoph Lameter 
18541a75a6c8SChristoph Lameter 	case MPOL_BIND:
185519770b32SMel Gorman 		/* Fall through */
18561a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
18571a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
18581a75a6c8SChristoph Lameter 		break;
18591a75a6c8SChristoph Lameter 
18601a75a6c8SChristoph Lameter 	default:
18611a75a6c8SChristoph Lameter 		BUG();
18621a75a6c8SChristoph Lameter 		return -EFAULT;
18631a75a6c8SChristoph Lameter 	}
18641a75a6c8SChristoph Lameter 
18651a75a6c8SChristoph Lameter 	l = strlen(policy_types[mode]);
18661a75a6c8SChristoph Lameter  	if (buffer + maxlen < p + l + 1)
18671a75a6c8SChristoph Lameter  		return -ENOSPC;
18681a75a6c8SChristoph Lameter 
18691a75a6c8SChristoph Lameter 	strcpy(p, policy_types[mode]);
18701a75a6c8SChristoph Lameter 	p += l;
18711a75a6c8SChristoph Lameter 
1872*f5b087b5SDavid Rientjes 	if (flags) {
1873*f5b087b5SDavid Rientjes 		int need_bar = 0;
1874*f5b087b5SDavid Rientjes 
1875*f5b087b5SDavid Rientjes 		if (buffer + maxlen < p + 2)
1876*f5b087b5SDavid Rientjes 			return -ENOSPC;
1877*f5b087b5SDavid Rientjes 		*p++ = '=';
1878*f5b087b5SDavid Rientjes 
1879*f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
1880*f5b087b5SDavid Rientjes 			p += sprintf(p, "%sstatic", need_bar++ ? "|" : "");
1881*f5b087b5SDavid Rientjes 	}
1882*f5b087b5SDavid Rientjes 
18831a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
18841a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
18851a75a6c8SChristoph Lameter 			return -ENOSPC;
18861a75a6c8SChristoph Lameter 		*p++ = '=';
18871a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
18881a75a6c8SChristoph Lameter 	}
18891a75a6c8SChristoph Lameter 	return p - buffer;
18901a75a6c8SChristoph Lameter }
18911a75a6c8SChristoph Lameter 
18921a75a6c8SChristoph Lameter struct numa_maps {
18931a75a6c8SChristoph Lameter 	unsigned long pages;
18941a75a6c8SChristoph Lameter 	unsigned long anon;
1895397874dfSChristoph Lameter 	unsigned long active;
1896397874dfSChristoph Lameter 	unsigned long writeback;
18971a75a6c8SChristoph Lameter 	unsigned long mapcount_max;
1898397874dfSChristoph Lameter 	unsigned long dirty;
1899397874dfSChristoph Lameter 	unsigned long swapcache;
19001a75a6c8SChristoph Lameter 	unsigned long node[MAX_NUMNODES];
19011a75a6c8SChristoph Lameter };
19021a75a6c8SChristoph Lameter 
1903397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty)
19041a75a6c8SChristoph Lameter {
19051a75a6c8SChristoph Lameter 	struct numa_maps *md = private;
19061a75a6c8SChristoph Lameter 	int count = page_mapcount(page);
19071a75a6c8SChristoph Lameter 
19081a75a6c8SChristoph Lameter 	md->pages++;
1909397874dfSChristoph Lameter 	if (pte_dirty || PageDirty(page))
1910397874dfSChristoph Lameter 		md->dirty++;
1911397874dfSChristoph Lameter 
1912397874dfSChristoph Lameter 	if (PageSwapCache(page))
1913397874dfSChristoph Lameter 		md->swapcache++;
1914397874dfSChristoph Lameter 
1915397874dfSChristoph Lameter 	if (PageActive(page))
1916397874dfSChristoph Lameter 		md->active++;
1917397874dfSChristoph Lameter 
1918397874dfSChristoph Lameter 	if (PageWriteback(page))
1919397874dfSChristoph Lameter 		md->writeback++;
19201a75a6c8SChristoph Lameter 
19211a75a6c8SChristoph Lameter 	if (PageAnon(page))
19221a75a6c8SChristoph Lameter 		md->anon++;
19231a75a6c8SChristoph Lameter 
1924397874dfSChristoph Lameter 	if (count > md->mapcount_max)
1925397874dfSChristoph Lameter 		md->mapcount_max = count;
1926397874dfSChristoph Lameter 
19271a75a6c8SChristoph Lameter 	md->node[page_to_nid(page)]++;
19281a75a6c8SChristoph Lameter }
19291a75a6c8SChristoph Lameter 
19307f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE
1931397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma,
1932397874dfSChristoph Lameter 		unsigned long start, unsigned long end,
1933397874dfSChristoph Lameter 		struct numa_maps *md)
1934397874dfSChristoph Lameter {
1935397874dfSChristoph Lameter 	unsigned long addr;
1936397874dfSChristoph Lameter 	struct page *page;
1937397874dfSChristoph Lameter 
1938397874dfSChristoph Lameter 	for (addr = start; addr < end; addr += HPAGE_SIZE) {
1939397874dfSChristoph Lameter 		pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1940397874dfSChristoph Lameter 		pte_t pte;
1941397874dfSChristoph Lameter 
1942397874dfSChristoph Lameter 		if (!ptep)
1943397874dfSChristoph Lameter 			continue;
1944397874dfSChristoph Lameter 
1945397874dfSChristoph Lameter 		pte = *ptep;
1946397874dfSChristoph Lameter 		if (pte_none(pte))
1947397874dfSChristoph Lameter 			continue;
1948397874dfSChristoph Lameter 
1949397874dfSChristoph Lameter 		page = pte_page(pte);
1950397874dfSChristoph Lameter 		if (!page)
1951397874dfSChristoph Lameter 			continue;
1952397874dfSChristoph Lameter 
1953397874dfSChristoph Lameter 		gather_stats(page, md, pte_dirty(*ptep));
1954397874dfSChristoph Lameter 	}
1955397874dfSChristoph Lameter }
19567f709ed0SAndrew Morton #else
19577f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma,
19587f709ed0SAndrew Morton 		unsigned long start, unsigned long end,
19597f709ed0SAndrew Morton 		struct numa_maps *md)
19607f709ed0SAndrew Morton {
19617f709ed0SAndrew Morton }
19627f709ed0SAndrew Morton #endif
1963397874dfSChristoph Lameter 
19641a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v)
19651a75a6c8SChristoph Lameter {
196699f89551SEric W. Biederman 	struct proc_maps_private *priv = m->private;
19671a75a6c8SChristoph Lameter 	struct vm_area_struct *vma = v;
19681a75a6c8SChristoph Lameter 	struct numa_maps *md;
1969397874dfSChristoph Lameter 	struct file *file = vma->vm_file;
1970397874dfSChristoph Lameter 	struct mm_struct *mm = vma->vm_mm;
1971480eccf9SLee Schermerhorn 	struct mempolicy *pol;
19721a75a6c8SChristoph Lameter 	int n;
19731a75a6c8SChristoph Lameter 	char buffer[50];
19741a75a6c8SChristoph Lameter 
1975397874dfSChristoph Lameter 	if (!mm)
19761a75a6c8SChristoph Lameter 		return 0;
19771a75a6c8SChristoph Lameter 
19781a75a6c8SChristoph Lameter 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
19791a75a6c8SChristoph Lameter 	if (!md)
19801a75a6c8SChristoph Lameter 		return 0;
19811a75a6c8SChristoph Lameter 
1982480eccf9SLee Schermerhorn 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
1983480eccf9SLee Schermerhorn 	mpol_to_str(buffer, sizeof(buffer), pol);
1984480eccf9SLee Schermerhorn 	/*
1985480eccf9SLee Schermerhorn 	 * unref shared or other task's mempolicy
1986480eccf9SLee Schermerhorn 	 */
1987480eccf9SLee Schermerhorn 	if (pol != &default_policy && pol != current->mempolicy)
1988480eccf9SLee Schermerhorn 		__mpol_free(pol);
19891a75a6c8SChristoph Lameter 
1990397874dfSChristoph Lameter 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1991397874dfSChristoph Lameter 
1992397874dfSChristoph Lameter 	if (file) {
1993397874dfSChristoph Lameter 		seq_printf(m, " file=");
1994c32c2f63SJan Blunck 		seq_path(m, &file->f_path, "\n\t= ");
1995397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1996397874dfSChristoph Lameter 		seq_printf(m, " heap");
1997397874dfSChristoph Lameter 	} else if (vma->vm_start <= mm->start_stack &&
1998397874dfSChristoph Lameter 			vma->vm_end >= mm->start_stack) {
1999397874dfSChristoph Lameter 		seq_printf(m, " stack");
2000397874dfSChristoph Lameter 	}
2001397874dfSChristoph Lameter 
2002397874dfSChristoph Lameter 	if (is_vm_hugetlb_page(vma)) {
2003397874dfSChristoph Lameter 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2004397874dfSChristoph Lameter 		seq_printf(m, " huge");
2005397874dfSChristoph Lameter 	} else {
2006397874dfSChristoph Lameter 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
200756bbd65dSChristoph Lameter 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2008397874dfSChristoph Lameter 	}
2009397874dfSChristoph Lameter 
2010397874dfSChristoph Lameter 	if (!md->pages)
2011397874dfSChristoph Lameter 		goto out;
20121a75a6c8SChristoph Lameter 
20131a75a6c8SChristoph Lameter 	if (md->anon)
20141a75a6c8SChristoph Lameter 		seq_printf(m," anon=%lu",md->anon);
20151a75a6c8SChristoph Lameter 
2016397874dfSChristoph Lameter 	if (md->dirty)
2017397874dfSChristoph Lameter 		seq_printf(m," dirty=%lu",md->dirty);
2018397874dfSChristoph Lameter 
2019397874dfSChristoph Lameter 	if (md->pages != md->anon && md->pages != md->dirty)
2020397874dfSChristoph Lameter 		seq_printf(m, " mapped=%lu", md->pages);
2021397874dfSChristoph Lameter 
2022397874dfSChristoph Lameter 	if (md->mapcount_max > 1)
2023397874dfSChristoph Lameter 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2024397874dfSChristoph Lameter 
2025397874dfSChristoph Lameter 	if (md->swapcache)
2026397874dfSChristoph Lameter 		seq_printf(m," swapcache=%lu", md->swapcache);
2027397874dfSChristoph Lameter 
2028397874dfSChristoph Lameter 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2029397874dfSChristoph Lameter 		seq_printf(m," active=%lu", md->active);
2030397874dfSChristoph Lameter 
2031397874dfSChristoph Lameter 	if (md->writeback)
2032397874dfSChristoph Lameter 		seq_printf(m," writeback=%lu", md->writeback);
2033397874dfSChristoph Lameter 
203456bbd65dSChristoph Lameter 	for_each_node_state(n, N_HIGH_MEMORY)
20351a75a6c8SChristoph Lameter 		if (md->node[n])
20361a75a6c8SChristoph Lameter 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2037397874dfSChristoph Lameter out:
20381a75a6c8SChristoph Lameter 	seq_putc(m, '\n');
20391a75a6c8SChristoph Lameter 	kfree(md);
20401a75a6c8SChristoph Lameter 
20411a75a6c8SChristoph Lameter 	if (m->count < m->size)
204299f89551SEric W. Biederman 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
20431a75a6c8SChristoph Lameter 	return 0;
20441a75a6c8SChristoph Lameter }
2045