xref: /openbmc/linux/mm/mempolicy.c (revision 5da7ca86078964cbfe6c83efc1205904587706fe)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds    could replace all the switch()es with a mempolicy_ops structure.
671da177e4SLinus Torvalds */
681da177e4SLinus Torvalds 
691da177e4SLinus Torvalds #include <linux/mempolicy.h>
701da177e4SLinus Torvalds #include <linux/mm.h>
711da177e4SLinus Torvalds #include <linux/highmem.h>
721da177e4SLinus Torvalds #include <linux/hugetlb.h>
731da177e4SLinus Torvalds #include <linux/kernel.h>
741da177e4SLinus Torvalds #include <linux/sched.h>
751da177e4SLinus Torvalds #include <linux/mm.h>
761da177e4SLinus Torvalds #include <linux/nodemask.h>
771da177e4SLinus Torvalds #include <linux/cpuset.h>
781da177e4SLinus Torvalds #include <linux/gfp.h>
791da177e4SLinus Torvalds #include <linux/slab.h>
801da177e4SLinus Torvalds #include <linux/string.h>
811da177e4SLinus Torvalds #include <linux/module.h>
821da177e4SLinus Torvalds #include <linux/interrupt.h>
831da177e4SLinus Torvalds #include <linux/init.h>
841da177e4SLinus Torvalds #include <linux/compat.h>
851da177e4SLinus Torvalds #include <linux/mempolicy.h>
861da177e4SLinus Torvalds #include <asm/tlbflush.h>
871da177e4SLinus Torvalds #include <asm/uaccess.h>
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds static kmem_cache_t *policy_cache;
901da177e4SLinus Torvalds static kmem_cache_t *sn_cache;
911da177e4SLinus Torvalds 
921da177e4SLinus Torvalds #define PDprintk(fmt...)
931da177e4SLinus Torvalds 
941da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
951da177e4SLinus Torvalds    policied. */
961da177e4SLinus Torvalds static int policy_zone;
971da177e4SLinus Torvalds 
98d42c6997SAndi Kleen struct mempolicy default_policy = {
991da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1001da177e4SLinus Torvalds 	.policy = MPOL_DEFAULT,
1011da177e4SLinus Torvalds };
1021da177e4SLinus Torvalds 
1031da177e4SLinus Torvalds /* Do sanity checking on a policy */
104dfcd3c0dSAndi Kleen static int mpol_check_policy(int mode, nodemask_t *nodes)
1051da177e4SLinus Torvalds {
106dfcd3c0dSAndi Kleen 	int empty = nodes_empty(*nodes);
1071da177e4SLinus Torvalds 
1081da177e4SLinus Torvalds 	switch (mode) {
1091da177e4SLinus Torvalds 	case MPOL_DEFAULT:
1101da177e4SLinus Torvalds 		if (!empty)
1111da177e4SLinus Torvalds 			return -EINVAL;
1121da177e4SLinus Torvalds 		break;
1131da177e4SLinus Torvalds 	case MPOL_BIND:
1141da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
1151da177e4SLinus Torvalds 		/* Preferred will only use the first bit, but allow
1161da177e4SLinus Torvalds 		   more for now. */
1171da177e4SLinus Torvalds 		if (empty)
1181da177e4SLinus Torvalds 			return -EINVAL;
1191da177e4SLinus Torvalds 		break;
1201da177e4SLinus Torvalds 	}
121dfcd3c0dSAndi Kleen 	return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
1221da177e4SLinus Torvalds }
1231da177e4SLinus Torvalds /* Generate a custom zonelist for the BIND policy. */
124dfcd3c0dSAndi Kleen static struct zonelist *bind_zonelist(nodemask_t *nodes)
1251da177e4SLinus Torvalds {
1261da177e4SLinus Torvalds 	struct zonelist *zl;
1271da177e4SLinus Torvalds 	int num, max, nd;
1281da177e4SLinus Torvalds 
129dfcd3c0dSAndi Kleen 	max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
1301da177e4SLinus Torvalds 	zl = kmalloc(sizeof(void *) * max, GFP_KERNEL);
1311da177e4SLinus Torvalds 	if (!zl)
1321da177e4SLinus Torvalds 		return NULL;
1331da177e4SLinus Torvalds 	num = 0;
134dfcd3c0dSAndi Kleen 	for_each_node_mask(nd, *nodes) {
1351da177e4SLinus Torvalds 		int k;
1361da177e4SLinus Torvalds 		for (k = MAX_NR_ZONES-1; k >= 0; k--) {
1371da177e4SLinus Torvalds 			struct zone *z = &NODE_DATA(nd)->node_zones[k];
1381da177e4SLinus Torvalds 			if (!z->present_pages)
1391da177e4SLinus Torvalds 				continue;
1401da177e4SLinus Torvalds 			zl->zones[num++] = z;
1411da177e4SLinus Torvalds 			if (k > policy_zone)
1421da177e4SLinus Torvalds 				policy_zone = k;
1431da177e4SLinus Torvalds 		}
1441da177e4SLinus Torvalds 	}
1451da177e4SLinus Torvalds 	zl->zones[num] = NULL;
1461da177e4SLinus Torvalds 	return zl;
1471da177e4SLinus Torvalds }
1481da177e4SLinus Torvalds 
1491da177e4SLinus Torvalds /* Create a new policy */
150dfcd3c0dSAndi Kleen static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
1511da177e4SLinus Torvalds {
1521da177e4SLinus Torvalds 	struct mempolicy *policy;
1531da177e4SLinus Torvalds 
154dfcd3c0dSAndi Kleen 	PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]);
1551da177e4SLinus Torvalds 	if (mode == MPOL_DEFAULT)
1561da177e4SLinus Torvalds 		return NULL;
1571da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1581da177e4SLinus Torvalds 	if (!policy)
1591da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
1601da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
1611da177e4SLinus Torvalds 	switch (mode) {
1621da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
163dfcd3c0dSAndi Kleen 		policy->v.nodes = *nodes;
1648f493d79SAndi Kleen 		if (nodes_weight(*nodes) == 0) {
1658f493d79SAndi Kleen 			kmem_cache_free(policy_cache, policy);
1668f493d79SAndi Kleen 			return ERR_PTR(-EINVAL);
1678f493d79SAndi Kleen 		}
1681da177e4SLinus Torvalds 		break;
1691da177e4SLinus Torvalds 	case MPOL_PREFERRED:
170dfcd3c0dSAndi Kleen 		policy->v.preferred_node = first_node(*nodes);
1711da177e4SLinus Torvalds 		if (policy->v.preferred_node >= MAX_NUMNODES)
1721da177e4SLinus Torvalds 			policy->v.preferred_node = -1;
1731da177e4SLinus Torvalds 		break;
1741da177e4SLinus Torvalds 	case MPOL_BIND:
1751da177e4SLinus Torvalds 		policy->v.zonelist = bind_zonelist(nodes);
1761da177e4SLinus Torvalds 		if (policy->v.zonelist == NULL) {
1771da177e4SLinus Torvalds 			kmem_cache_free(policy_cache, policy);
1781da177e4SLinus Torvalds 			return ERR_PTR(-ENOMEM);
1791da177e4SLinus Torvalds 		}
1801da177e4SLinus Torvalds 		break;
1811da177e4SLinus Torvalds 	}
1821da177e4SLinus Torvalds 	policy->policy = mode;
1831da177e4SLinus Torvalds 	return policy;
1841da177e4SLinus Torvalds }
1851da177e4SLinus Torvalds 
1861da177e4SLinus Torvalds /* Ensure all existing pages follow the policy. */
187b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
188dfcd3c0dSAndi Kleen 		unsigned long addr, unsigned long end, nodemask_t *nodes)
1891da177e4SLinus Torvalds {
19091612e0dSHugh Dickins 	pte_t *orig_pte;
19191612e0dSHugh Dickins 	pte_t *pte;
192705e87c0SHugh Dickins 	spinlock_t *ptl;
193941150a3SHugh Dickins 
194705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
19591612e0dSHugh Dickins 	do {
1966aab341eSLinus Torvalds 		struct page *page;
19791612e0dSHugh Dickins 		unsigned int nid;
19891612e0dSHugh Dickins 
19991612e0dSHugh Dickins 		if (!pte_present(*pte))
20091612e0dSHugh Dickins 			continue;
2016aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
2026aab341eSLinus Torvalds 		if (!page)
20391612e0dSHugh Dickins 			continue;
2046aab341eSLinus Torvalds 		nid = page_to_nid(page);
205dfcd3c0dSAndi Kleen 		if (!node_isset(nid, *nodes))
2061da177e4SLinus Torvalds 			break;
20791612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
208705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
20991612e0dSHugh Dickins 	return addr != end;
21091612e0dSHugh Dickins }
21191612e0dSHugh Dickins 
212b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
213dfcd3c0dSAndi Kleen 		unsigned long addr, unsigned long end, nodemask_t *nodes)
21491612e0dSHugh Dickins {
21591612e0dSHugh Dickins 	pmd_t *pmd;
21691612e0dSHugh Dickins 	unsigned long next;
21791612e0dSHugh Dickins 
21891612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
21991612e0dSHugh Dickins 	do {
22091612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
22191612e0dSHugh Dickins 		if (pmd_none_or_clear_bad(pmd))
22291612e0dSHugh Dickins 			continue;
223b5810039SNick Piggin 		if (check_pte_range(vma, pmd, addr, next, nodes))
22491612e0dSHugh Dickins 			return -EIO;
22591612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
22691612e0dSHugh Dickins 	return 0;
22791612e0dSHugh Dickins }
22891612e0dSHugh Dickins 
229b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
230dfcd3c0dSAndi Kleen 		unsigned long addr, unsigned long end, nodemask_t *nodes)
23191612e0dSHugh Dickins {
23291612e0dSHugh Dickins 	pud_t *pud;
23391612e0dSHugh Dickins 	unsigned long next;
23491612e0dSHugh Dickins 
23591612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
23691612e0dSHugh Dickins 	do {
23791612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
23891612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
23991612e0dSHugh Dickins 			continue;
240b5810039SNick Piggin 		if (check_pmd_range(vma, pud, addr, next, nodes))
24191612e0dSHugh Dickins 			return -EIO;
24291612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
24391612e0dSHugh Dickins 	return 0;
24491612e0dSHugh Dickins }
24591612e0dSHugh Dickins 
246b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
247dfcd3c0dSAndi Kleen 		unsigned long addr, unsigned long end, nodemask_t *nodes)
24891612e0dSHugh Dickins {
24991612e0dSHugh Dickins 	pgd_t *pgd;
25091612e0dSHugh Dickins 	unsigned long next;
25191612e0dSHugh Dickins 
252b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
25391612e0dSHugh Dickins 	do {
25491612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
25591612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
25691612e0dSHugh Dickins 			continue;
257b5810039SNick Piggin 		if (check_pud_range(vma, pgd, addr, next, nodes))
25891612e0dSHugh Dickins 			return -EIO;
25991612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
26091612e0dSHugh Dickins 	return 0;
2611da177e4SLinus Torvalds }
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds /* Step 1: check the range */
2641da177e4SLinus Torvalds static struct vm_area_struct *
2651da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
266dfcd3c0dSAndi Kleen 	    nodemask_t *nodes, unsigned long flags)
2671da177e4SLinus Torvalds {
2681da177e4SLinus Torvalds 	int err;
2691da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
2701da177e4SLinus Torvalds 
2711da177e4SLinus Torvalds 	first = find_vma(mm, start);
2721da177e4SLinus Torvalds 	if (!first)
2731da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
2741da177e4SLinus Torvalds 	prev = NULL;
2751da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
2761da177e4SLinus Torvalds 		if (!vma->vm_next && vma->vm_end < end)
2771da177e4SLinus Torvalds 			return ERR_PTR(-EFAULT);
2781da177e4SLinus Torvalds 		if (prev && prev->vm_end < vma->vm_start)
2791da177e4SLinus Torvalds 			return ERR_PTR(-EFAULT);
2801da177e4SLinus Torvalds 		if ((flags & MPOL_MF_STRICT) && !is_vm_hugetlb_page(vma)) {
2815b952b3cSAndi Kleen 			unsigned long endvma = vma->vm_end;
2825b952b3cSAndi Kleen 			if (endvma > end)
2835b952b3cSAndi Kleen 				endvma = end;
2845b952b3cSAndi Kleen 			if (vma->vm_start > start)
2855b952b3cSAndi Kleen 				start = vma->vm_start;
286b5810039SNick Piggin 			err = check_pgd_range(vma, start, endvma, nodes);
2871da177e4SLinus Torvalds 			if (err) {
2881da177e4SLinus Torvalds 				first = ERR_PTR(err);
2891da177e4SLinus Torvalds 				break;
2901da177e4SLinus Torvalds 			}
2911da177e4SLinus Torvalds 		}
2921da177e4SLinus Torvalds 		prev = vma;
2931da177e4SLinus Torvalds 	}
2941da177e4SLinus Torvalds 	return first;
2951da177e4SLinus Torvalds }
2961da177e4SLinus Torvalds 
2971da177e4SLinus Torvalds /* Apply policy to a single VMA */
2981da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
2991da177e4SLinus Torvalds {
3001da177e4SLinus Torvalds 	int err = 0;
3011da177e4SLinus Torvalds 	struct mempolicy *old = vma->vm_policy;
3021da177e4SLinus Torvalds 
3031da177e4SLinus Torvalds 	PDprintk("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
3041da177e4SLinus Torvalds 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
3051da177e4SLinus Torvalds 		 vma->vm_ops, vma->vm_file,
3061da177e4SLinus Torvalds 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
3071da177e4SLinus Torvalds 
3081da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->set_policy)
3091da177e4SLinus Torvalds 		err = vma->vm_ops->set_policy(vma, new);
3101da177e4SLinus Torvalds 	if (!err) {
3111da177e4SLinus Torvalds 		mpol_get(new);
3121da177e4SLinus Torvalds 		vma->vm_policy = new;
3131da177e4SLinus Torvalds 		mpol_free(old);
3141da177e4SLinus Torvalds 	}
3151da177e4SLinus Torvalds 	return err;
3161da177e4SLinus Torvalds }
3171da177e4SLinus Torvalds 
3181da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
3191da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start,
3201da177e4SLinus Torvalds 		       unsigned long end, struct mempolicy *new)
3211da177e4SLinus Torvalds {
3221da177e4SLinus Torvalds 	struct vm_area_struct *next;
3231da177e4SLinus Torvalds 	int err;
3241da177e4SLinus Torvalds 
3251da177e4SLinus Torvalds 	err = 0;
3261da177e4SLinus Torvalds 	for (; vma && vma->vm_start < end; vma = next) {
3271da177e4SLinus Torvalds 		next = vma->vm_next;
3281da177e4SLinus Torvalds 		if (vma->vm_start < start)
3291da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, start, 1);
3301da177e4SLinus Torvalds 		if (!err && vma->vm_end > end)
3311da177e4SLinus Torvalds 			err = split_vma(vma->vm_mm, vma, end, 0);
3321da177e4SLinus Torvalds 		if (!err)
3331da177e4SLinus Torvalds 			err = policy_vma(vma, new);
3341da177e4SLinus Torvalds 		if (err)
3351da177e4SLinus Torvalds 			break;
3361da177e4SLinus Torvalds 	}
3371da177e4SLinus Torvalds 	return err;
3381da177e4SLinus Torvalds }
3391da177e4SLinus Torvalds 
3408bccd85fSChristoph Lameter static int contextualize_policy(int mode, nodemask_t *nodes)
3418bccd85fSChristoph Lameter {
3428bccd85fSChristoph Lameter 	if (!nodes)
3438bccd85fSChristoph Lameter 		return 0;
3448bccd85fSChristoph Lameter 
3458bccd85fSChristoph Lameter 	/* Update current mems_allowed */
3468bccd85fSChristoph Lameter 	cpuset_update_current_mems_allowed();
3478bccd85fSChristoph Lameter 	/* Ignore nodes not set in current->mems_allowed */
3488bccd85fSChristoph Lameter 	cpuset_restrict_to_mems_allowed(nodes->bits);
3498bccd85fSChristoph Lameter 	return mpol_check_policy(mode, nodes);
3508bccd85fSChristoph Lameter }
3518bccd85fSChristoph Lameter 
3528bccd85fSChristoph Lameter long do_mbind(unsigned long start, unsigned long len,
3538bccd85fSChristoph Lameter 		unsigned long mode, nodemask_t *nmask, unsigned long flags)
3541da177e4SLinus Torvalds {
3551da177e4SLinus Torvalds 	struct vm_area_struct *vma;
3561da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
3571da177e4SLinus Torvalds 	struct mempolicy *new;
3581da177e4SLinus Torvalds 	unsigned long end;
3591da177e4SLinus Torvalds 	int err;
3601da177e4SLinus Torvalds 
3611da177e4SLinus Torvalds 	if ((flags & ~(unsigned long)(MPOL_MF_STRICT)) || mode > MPOL_MAX)
3621da177e4SLinus Torvalds 		return -EINVAL;
3631da177e4SLinus Torvalds 	if (start & ~PAGE_MASK)
3641da177e4SLinus Torvalds 		return -EINVAL;
3651da177e4SLinus Torvalds 	if (mode == MPOL_DEFAULT)
3661da177e4SLinus Torvalds 		flags &= ~MPOL_MF_STRICT;
3671da177e4SLinus Torvalds 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
3681da177e4SLinus Torvalds 	end = start + len;
3691da177e4SLinus Torvalds 	if (end < start)
3701da177e4SLinus Torvalds 		return -EINVAL;
3711da177e4SLinus Torvalds 	if (end == start)
3721da177e4SLinus Torvalds 		return 0;
3735fcbb230SChristoph Lameter 	if (mpol_check_policy(mode, nmask))
3748bccd85fSChristoph Lameter 		return -EINVAL;
3758bccd85fSChristoph Lameter 	new = mpol_new(mode, nmask);
3761da177e4SLinus Torvalds 	if (IS_ERR(new))
3771da177e4SLinus Torvalds 		return PTR_ERR(new);
3781da177e4SLinus Torvalds 
3791da177e4SLinus Torvalds 	PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
380dfcd3c0dSAndi Kleen 			mode,nodes_addr(nodes)[0]);
3811da177e4SLinus Torvalds 
3821da177e4SLinus Torvalds 	down_write(&mm->mmap_sem);
3838bccd85fSChristoph Lameter 	vma = check_range(mm, start, end, nmask, flags);
3841da177e4SLinus Torvalds 	err = PTR_ERR(vma);
3851da177e4SLinus Torvalds 	if (!IS_ERR(vma))
3861da177e4SLinus Torvalds 		err = mbind_range(vma, start, end, new);
3871da177e4SLinus Torvalds 	up_write(&mm->mmap_sem);
3881da177e4SLinus Torvalds 	mpol_free(new);
3891da177e4SLinus Torvalds 	return err;
3901da177e4SLinus Torvalds }
3911da177e4SLinus Torvalds 
3921da177e4SLinus Torvalds /* Set the process memory policy */
3938bccd85fSChristoph Lameter long do_set_mempolicy(int mode, nodemask_t *nodes)
3941da177e4SLinus Torvalds {
3951da177e4SLinus Torvalds 	struct mempolicy *new;
3961da177e4SLinus Torvalds 
3978bccd85fSChristoph Lameter 	if (contextualize_policy(mode, nodes))
3981da177e4SLinus Torvalds 		return -EINVAL;
3998bccd85fSChristoph Lameter 	new = mpol_new(mode, nodes);
4001da177e4SLinus Torvalds 	if (IS_ERR(new))
4011da177e4SLinus Torvalds 		return PTR_ERR(new);
4021da177e4SLinus Torvalds 	mpol_free(current->mempolicy);
4031da177e4SLinus Torvalds 	current->mempolicy = new;
4041da177e4SLinus Torvalds 	if (new && new->policy == MPOL_INTERLEAVE)
405dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
4061da177e4SLinus Torvalds 	return 0;
4071da177e4SLinus Torvalds }
4081da177e4SLinus Torvalds 
4091da177e4SLinus Torvalds /* Fill a zone bitmap for a policy */
410dfcd3c0dSAndi Kleen static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
4111da177e4SLinus Torvalds {
4121da177e4SLinus Torvalds 	int i;
4131da177e4SLinus Torvalds 
414dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
4151da177e4SLinus Torvalds 	switch (p->policy) {
4161da177e4SLinus Torvalds 	case MPOL_BIND:
4171da177e4SLinus Torvalds 		for (i = 0; p->v.zonelist->zones[i]; i++)
4188bccd85fSChristoph Lameter 			node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id,
4198bccd85fSChristoph Lameter 				*nodes);
4201da177e4SLinus Torvalds 		break;
4211da177e4SLinus Torvalds 	case MPOL_DEFAULT:
4221da177e4SLinus Torvalds 		break;
4231da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
424dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
4251da177e4SLinus Torvalds 		break;
4261da177e4SLinus Torvalds 	case MPOL_PREFERRED:
4271da177e4SLinus Torvalds 		/* or use current node instead of online map? */
4281da177e4SLinus Torvalds 		if (p->v.preferred_node < 0)
429dfcd3c0dSAndi Kleen 			*nodes = node_online_map;
4301da177e4SLinus Torvalds 		else
431dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
4321da177e4SLinus Torvalds 		break;
4331da177e4SLinus Torvalds 	default:
4341da177e4SLinus Torvalds 		BUG();
4351da177e4SLinus Torvalds 	}
4361da177e4SLinus Torvalds }
4371da177e4SLinus Torvalds 
4381da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
4391da177e4SLinus Torvalds {
4401da177e4SLinus Torvalds 	struct page *p;
4411da177e4SLinus Torvalds 	int err;
4421da177e4SLinus Torvalds 
4431da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
4441da177e4SLinus Torvalds 	if (err >= 0) {
4451da177e4SLinus Torvalds 		err = page_to_nid(p);
4461da177e4SLinus Torvalds 		put_page(p);
4471da177e4SLinus Torvalds 	}
4481da177e4SLinus Torvalds 	return err;
4491da177e4SLinus Torvalds }
4501da177e4SLinus Torvalds 
4511da177e4SLinus Torvalds /* Retrieve NUMA policy */
4528bccd85fSChristoph Lameter long do_get_mempolicy(int *policy, nodemask_t *nmask,
4531da177e4SLinus Torvalds 			unsigned long addr, unsigned long flags)
4541da177e4SLinus Torvalds {
4558bccd85fSChristoph Lameter 	int err;
4561da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
4571da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
4581da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
4591da177e4SLinus Torvalds 
46068860ec1SPaul Jackson 	cpuset_update_current_mems_allowed();
4611da177e4SLinus Torvalds 	if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
4621da177e4SLinus Torvalds 		return -EINVAL;
4631da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
4641da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
4651da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
4661da177e4SLinus Torvalds 		if (!vma) {
4671da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
4681da177e4SLinus Torvalds 			return -EFAULT;
4691da177e4SLinus Torvalds 		}
4701da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
4711da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
4721da177e4SLinus Torvalds 		else
4731da177e4SLinus Torvalds 			pol = vma->vm_policy;
4741da177e4SLinus Torvalds 	} else if (addr)
4751da177e4SLinus Torvalds 		return -EINVAL;
4761da177e4SLinus Torvalds 
4771da177e4SLinus Torvalds 	if (!pol)
4781da177e4SLinus Torvalds 		pol = &default_policy;
4791da177e4SLinus Torvalds 
4801da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
4811da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
4821da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
4831da177e4SLinus Torvalds 			if (err < 0)
4841da177e4SLinus Torvalds 				goto out;
4858bccd85fSChristoph Lameter 			*policy = err;
4861da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
4871da177e4SLinus Torvalds 				pol->policy == MPOL_INTERLEAVE) {
4888bccd85fSChristoph Lameter 			*policy = current->il_next;
4891da177e4SLinus Torvalds 		} else {
4901da177e4SLinus Torvalds 			err = -EINVAL;
4911da177e4SLinus Torvalds 			goto out;
4921da177e4SLinus Torvalds 		}
4931da177e4SLinus Torvalds 	} else
4948bccd85fSChristoph Lameter 		*policy = pol->policy;
4951da177e4SLinus Torvalds 
4961da177e4SLinus Torvalds 	if (vma) {
4971da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
4981da177e4SLinus Torvalds 		vma = NULL;
4991da177e4SLinus Torvalds 	}
5001da177e4SLinus Torvalds 
5011da177e4SLinus Torvalds 	err = 0;
5028bccd85fSChristoph Lameter 	if (nmask)
5038bccd85fSChristoph Lameter 		get_zonemask(pol, nmask);
5041da177e4SLinus Torvalds 
5051da177e4SLinus Torvalds  out:
5061da177e4SLinus Torvalds 	if (vma)
5071da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
5081da177e4SLinus Torvalds 	return err;
5091da177e4SLinus Torvalds }
5101da177e4SLinus Torvalds 
5118bccd85fSChristoph Lameter /*
5128bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
5138bccd85fSChristoph Lameter  */
5148bccd85fSChristoph Lameter 
5158bccd85fSChristoph Lameter /* Copy a node mask from user space. */
5168bccd85fSChristoph Lameter static int get_nodes(nodemask_t *nodes, unsigned long __user *nmask,
5178bccd85fSChristoph Lameter 		     unsigned long maxnode)
5188bccd85fSChristoph Lameter {
5198bccd85fSChristoph Lameter 	unsigned long k;
5208bccd85fSChristoph Lameter 	unsigned long nlongs;
5218bccd85fSChristoph Lameter 	unsigned long endmask;
5228bccd85fSChristoph Lameter 
5238bccd85fSChristoph Lameter 	--maxnode;
5248bccd85fSChristoph Lameter 	nodes_clear(*nodes);
5258bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
5268bccd85fSChristoph Lameter 		return 0;
5278bccd85fSChristoph Lameter 
5288bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
5298bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
5308bccd85fSChristoph Lameter 		endmask = ~0UL;
5318bccd85fSChristoph Lameter 	else
5328bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
5338bccd85fSChristoph Lameter 
5348bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
5358bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
5368bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
5378bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
5388bccd85fSChristoph Lameter 			return -EINVAL;
5398bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
5408bccd85fSChristoph Lameter 			unsigned long t;
5418bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
5428bccd85fSChristoph Lameter 				return -EFAULT;
5438bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
5448bccd85fSChristoph Lameter 				if (t & endmask)
5458bccd85fSChristoph Lameter 					return -EINVAL;
5468bccd85fSChristoph Lameter 			} else if (t)
5478bccd85fSChristoph Lameter 				return -EINVAL;
5488bccd85fSChristoph Lameter 		}
5498bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
5508bccd85fSChristoph Lameter 		endmask = ~0UL;
5518bccd85fSChristoph Lameter 	}
5528bccd85fSChristoph Lameter 
5538bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
5548bccd85fSChristoph Lameter 		return -EFAULT;
5558bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
5568bccd85fSChristoph Lameter 	return 0;
5578bccd85fSChristoph Lameter }
5588bccd85fSChristoph Lameter 
5598bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
5608bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
5618bccd85fSChristoph Lameter 			      nodemask_t *nodes)
5628bccd85fSChristoph Lameter {
5638bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
5648bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
5658bccd85fSChristoph Lameter 
5668bccd85fSChristoph Lameter 	if (copy > nbytes) {
5678bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
5688bccd85fSChristoph Lameter 			return -EINVAL;
5698bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
5708bccd85fSChristoph Lameter 			return -EFAULT;
5718bccd85fSChristoph Lameter 		copy = nbytes;
5728bccd85fSChristoph Lameter 	}
5738bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
5748bccd85fSChristoph Lameter }
5758bccd85fSChristoph Lameter 
5768bccd85fSChristoph Lameter asmlinkage long sys_mbind(unsigned long start, unsigned long len,
5778bccd85fSChristoph Lameter 			unsigned long mode,
5788bccd85fSChristoph Lameter 			unsigned long __user *nmask, unsigned long maxnode,
5798bccd85fSChristoph Lameter 			unsigned flags)
5808bccd85fSChristoph Lameter {
5818bccd85fSChristoph Lameter 	nodemask_t nodes;
5828bccd85fSChristoph Lameter 	int err;
5838bccd85fSChristoph Lameter 
5848bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
5858bccd85fSChristoph Lameter 	if (err)
5868bccd85fSChristoph Lameter 		return err;
5878bccd85fSChristoph Lameter 	return do_mbind(start, len, mode, &nodes, flags);
5888bccd85fSChristoph Lameter }
5898bccd85fSChristoph Lameter 
5908bccd85fSChristoph Lameter /* Set the process memory policy */
5918bccd85fSChristoph Lameter asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
5928bccd85fSChristoph Lameter 		unsigned long maxnode)
5938bccd85fSChristoph Lameter {
5948bccd85fSChristoph Lameter 	int err;
5958bccd85fSChristoph Lameter 	nodemask_t nodes;
5968bccd85fSChristoph Lameter 
5978bccd85fSChristoph Lameter 	if (mode < 0 || mode > MPOL_MAX)
5988bccd85fSChristoph Lameter 		return -EINVAL;
5998bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
6008bccd85fSChristoph Lameter 	if (err)
6018bccd85fSChristoph Lameter 		return err;
6028bccd85fSChristoph Lameter 	return do_set_mempolicy(mode, &nodes);
6038bccd85fSChristoph Lameter }
6048bccd85fSChristoph Lameter 
6058bccd85fSChristoph Lameter /* Retrieve NUMA policy */
6068bccd85fSChristoph Lameter asmlinkage long sys_get_mempolicy(int __user *policy,
6078bccd85fSChristoph Lameter 				unsigned long __user *nmask,
6088bccd85fSChristoph Lameter 				unsigned long maxnode,
6098bccd85fSChristoph Lameter 				unsigned long addr, unsigned long flags)
6108bccd85fSChristoph Lameter {
6118bccd85fSChristoph Lameter 	int err, pval;
6128bccd85fSChristoph Lameter 	nodemask_t nodes;
6138bccd85fSChristoph Lameter 
6148bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
6158bccd85fSChristoph Lameter 		return -EINVAL;
6168bccd85fSChristoph Lameter 
6178bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
6188bccd85fSChristoph Lameter 
6198bccd85fSChristoph Lameter 	if (err)
6208bccd85fSChristoph Lameter 		return err;
6218bccd85fSChristoph Lameter 
6228bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
6238bccd85fSChristoph Lameter 		return -EFAULT;
6248bccd85fSChristoph Lameter 
6258bccd85fSChristoph Lameter 	if (nmask)
6268bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
6278bccd85fSChristoph Lameter 
6288bccd85fSChristoph Lameter 	return err;
6298bccd85fSChristoph Lameter }
6308bccd85fSChristoph Lameter 
6311da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
6321da177e4SLinus Torvalds 
6331da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
6341da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
6351da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
6361da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
6371da177e4SLinus Torvalds {
6381da177e4SLinus Torvalds 	long err;
6391da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
6401da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
6411da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
6421da177e4SLinus Torvalds 
6431da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
6441da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
6451da177e4SLinus Torvalds 
6461da177e4SLinus Torvalds 	if (nmask)
6471da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
6481da177e4SLinus Torvalds 
6491da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
6501da177e4SLinus Torvalds 
6511da177e4SLinus Torvalds 	if (!err && nmask) {
6521da177e4SLinus Torvalds 		err = copy_from_user(bm, nm, alloc_size);
6531da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
6541da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
6551da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
6561da177e4SLinus Torvalds 	}
6571da177e4SLinus Torvalds 
6581da177e4SLinus Torvalds 	return err;
6591da177e4SLinus Torvalds }
6601da177e4SLinus Torvalds 
6611da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
6621da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
6631da177e4SLinus Torvalds {
6641da177e4SLinus Torvalds 	long err = 0;
6651da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
6661da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
6671da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
6681da177e4SLinus Torvalds 
6691da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
6701da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
6711da177e4SLinus Torvalds 
6721da177e4SLinus Torvalds 	if (nmask) {
6731da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
6741da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
6751da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
6761da177e4SLinus Torvalds 	}
6771da177e4SLinus Torvalds 
6781da177e4SLinus Torvalds 	if (err)
6791da177e4SLinus Torvalds 		return -EFAULT;
6801da177e4SLinus Torvalds 
6811da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
6821da177e4SLinus Torvalds }
6831da177e4SLinus Torvalds 
6841da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
6851da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
6861da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
6871da177e4SLinus Torvalds {
6881da177e4SLinus Torvalds 	long err = 0;
6891da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
6901da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
691dfcd3c0dSAndi Kleen 	nodemask_t bm;
6921da177e4SLinus Torvalds 
6931da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
6941da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
6951da177e4SLinus Torvalds 
6961da177e4SLinus Torvalds 	if (nmask) {
697dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
6981da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
699dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
7001da177e4SLinus Torvalds 	}
7011da177e4SLinus Torvalds 
7021da177e4SLinus Torvalds 	if (err)
7031da177e4SLinus Torvalds 		return -EFAULT;
7041da177e4SLinus Torvalds 
7051da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
7061da177e4SLinus Torvalds }
7071da177e4SLinus Torvalds 
7081da177e4SLinus Torvalds #endif
7091da177e4SLinus Torvalds 
7101da177e4SLinus Torvalds /* Return effective policy for a VMA */
7116e21c8f1SChristoph Lameter struct mempolicy *
7126e21c8f1SChristoph Lameter get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr)
7131da177e4SLinus Torvalds {
7146e21c8f1SChristoph Lameter 	struct mempolicy *pol = task->mempolicy;
7151da177e4SLinus Torvalds 
7161da177e4SLinus Torvalds 	if (vma) {
7171da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
7181da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
7191da177e4SLinus Torvalds 		else if (vma->vm_policy &&
7201da177e4SLinus Torvalds 				vma->vm_policy->policy != MPOL_DEFAULT)
7211da177e4SLinus Torvalds 			pol = vma->vm_policy;
7221da177e4SLinus Torvalds 	}
7231da177e4SLinus Torvalds 	if (!pol)
7241da177e4SLinus Torvalds 		pol = &default_policy;
7251da177e4SLinus Torvalds 	return pol;
7261da177e4SLinus Torvalds }
7271da177e4SLinus Torvalds 
7281da177e4SLinus Torvalds /* Return a zonelist representing a mempolicy */
729dd0fc66fSAl Viro static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
7301da177e4SLinus Torvalds {
7311da177e4SLinus Torvalds 	int nd;
7321da177e4SLinus Torvalds 
7331da177e4SLinus Torvalds 	switch (policy->policy) {
7341da177e4SLinus Torvalds 	case MPOL_PREFERRED:
7351da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
7361da177e4SLinus Torvalds 		if (nd < 0)
7371da177e4SLinus Torvalds 			nd = numa_node_id();
7381da177e4SLinus Torvalds 		break;
7391da177e4SLinus Torvalds 	case MPOL_BIND:
7401da177e4SLinus Torvalds 		/* Lower zones don't get a policy applied */
7411da177e4SLinus Torvalds 		/* Careful: current->mems_allowed might have moved */
742af4ca457SAl Viro 		if (gfp_zone(gfp) >= policy_zone)
7431da177e4SLinus Torvalds 			if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
7441da177e4SLinus Torvalds 				return policy->v.zonelist;
7451da177e4SLinus Torvalds 		/*FALL THROUGH*/
7461da177e4SLinus Torvalds 	case MPOL_INTERLEAVE: /* should not happen */
7471da177e4SLinus Torvalds 	case MPOL_DEFAULT:
7481da177e4SLinus Torvalds 		nd = numa_node_id();
7491da177e4SLinus Torvalds 		break;
7501da177e4SLinus Torvalds 	default:
7511da177e4SLinus Torvalds 		nd = 0;
7521da177e4SLinus Torvalds 		BUG();
7531da177e4SLinus Torvalds 	}
754af4ca457SAl Viro 	return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
7551da177e4SLinus Torvalds }
7561da177e4SLinus Torvalds 
7571da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
7581da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
7591da177e4SLinus Torvalds {
7601da177e4SLinus Torvalds 	unsigned nid, next;
7611da177e4SLinus Torvalds 	struct task_struct *me = current;
7621da177e4SLinus Torvalds 
7631da177e4SLinus Torvalds 	nid = me->il_next;
764dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
7651da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
766dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
7671da177e4SLinus Torvalds 	me->il_next = next;
7681da177e4SLinus Torvalds 	return nid;
7691da177e4SLinus Torvalds }
7701da177e4SLinus Torvalds 
7711da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
7721da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
7731da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
7741da177e4SLinus Torvalds {
775dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
7761da177e4SLinus Torvalds 	unsigned target = (unsigned)off % nnodes;
7771da177e4SLinus Torvalds 	int c;
7781da177e4SLinus Torvalds 	int nid = -1;
7791da177e4SLinus Torvalds 
7801da177e4SLinus Torvalds 	c = 0;
7811da177e4SLinus Torvalds 	do {
782dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
7831da177e4SLinus Torvalds 		c++;
7841da177e4SLinus Torvalds 	} while (c <= target);
7851da177e4SLinus Torvalds 	return nid;
7861da177e4SLinus Torvalds }
7871da177e4SLinus Torvalds 
788*5da7ca86SChristoph Lameter /* Determine a node number for interleave */
789*5da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
790*5da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
791*5da7ca86SChristoph Lameter {
792*5da7ca86SChristoph Lameter 	if (vma) {
793*5da7ca86SChristoph Lameter 		unsigned long off;
794*5da7ca86SChristoph Lameter 
795*5da7ca86SChristoph Lameter 		off = vma->vm_pgoff;
796*5da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
797*5da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
798*5da7ca86SChristoph Lameter 	} else
799*5da7ca86SChristoph Lameter 		return interleave_nodes(pol);
800*5da7ca86SChristoph Lameter }
801*5da7ca86SChristoph Lameter 
802*5da7ca86SChristoph Lameter /* Return a zonelist suitable for a huge page allocation. */
803*5da7ca86SChristoph Lameter struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
804*5da7ca86SChristoph Lameter {
805*5da7ca86SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
806*5da7ca86SChristoph Lameter 
807*5da7ca86SChristoph Lameter 	if (pol->policy == MPOL_INTERLEAVE) {
808*5da7ca86SChristoph Lameter 		unsigned nid;
809*5da7ca86SChristoph Lameter 
810*5da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
811*5da7ca86SChristoph Lameter 		return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
812*5da7ca86SChristoph Lameter 	}
813*5da7ca86SChristoph Lameter 	return zonelist_policy(GFP_HIGHUSER, pol);
814*5da7ca86SChristoph Lameter }
815*5da7ca86SChristoph Lameter 
8161da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
8171da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
818662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
819662f3a0bSAndi Kleen 					unsigned nid)
8201da177e4SLinus Torvalds {
8211da177e4SLinus Torvalds 	struct zonelist *zl;
8221da177e4SLinus Torvalds 	struct page *page;
8231da177e4SLinus Torvalds 
824af4ca457SAl Viro 	zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
8251da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
8261da177e4SLinus Torvalds 	if (page && page_zone(page) == zl->zones[0]) {
827e7c8d5c9SChristoph Lameter 		zone_pcp(zl->zones[0],get_cpu())->interleave_hit++;
8281da177e4SLinus Torvalds 		put_cpu();
8291da177e4SLinus Torvalds 	}
8301da177e4SLinus Torvalds 	return page;
8311da177e4SLinus Torvalds }
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds /**
8341da177e4SLinus Torvalds  * 	alloc_page_vma	- Allocate a page for a VMA.
8351da177e4SLinus Torvalds  *
8361da177e4SLinus Torvalds  * 	@gfp:
8371da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
8381da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
8391da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
8401da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
8411da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
8421da177e4SLinus Torvalds  *
8431da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
8441da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
8451da177e4SLinus Torvalds  *
8461da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
8471da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
8481da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
8491da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
8501da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
8511da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
8521da177e4SLinus Torvalds  *
8531da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
8541da177e4SLinus Torvalds  */
8551da177e4SLinus Torvalds struct page *
856dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
8571da177e4SLinus Torvalds {
8586e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
8591da177e4SLinus Torvalds 
8601da177e4SLinus Torvalds 	cpuset_update_current_mems_allowed();
8611da177e4SLinus Torvalds 
8621da177e4SLinus Torvalds 	if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
8631da177e4SLinus Torvalds 		unsigned nid;
864*5da7ca86SChristoph Lameter 
865*5da7ca86SChristoph Lameter 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
8661da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, 0, nid);
8671da177e4SLinus Torvalds 	}
8681da177e4SLinus Torvalds 	return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
8691da177e4SLinus Torvalds }
8701da177e4SLinus Torvalds 
8711da177e4SLinus Torvalds /**
8721da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
8731da177e4SLinus Torvalds  *
8741da177e4SLinus Torvalds  *	@gfp:
8751da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
8761da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
8771da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
8781da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
8791da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
8801da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
8811da177e4SLinus Torvalds  *
8821da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
8831da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
8841da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
8851da177e4SLinus Torvalds  *
8861da177e4SLinus Torvalds  *	Don't call cpuset_update_current_mems_allowed() unless
8871da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
8881da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
8891da177e4SLinus Torvalds  */
890dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
8911da177e4SLinus Torvalds {
8921da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8931da177e4SLinus Torvalds 
8941da177e4SLinus Torvalds 	if ((gfp & __GFP_WAIT) && !in_interrupt())
8951da177e4SLinus Torvalds 		cpuset_update_current_mems_allowed();
8961da177e4SLinus Torvalds 	if (!pol || in_interrupt())
8971da177e4SLinus Torvalds 		pol = &default_policy;
8981da177e4SLinus Torvalds 	if (pol->policy == MPOL_INTERLEAVE)
8991da177e4SLinus Torvalds 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
9001da177e4SLinus Torvalds 	return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
9011da177e4SLinus Torvalds }
9021da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
9031da177e4SLinus Torvalds 
9041da177e4SLinus Torvalds /* Slow path of a mempolicy copy */
9051da177e4SLinus Torvalds struct mempolicy *__mpol_copy(struct mempolicy *old)
9061da177e4SLinus Torvalds {
9071da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
9081da177e4SLinus Torvalds 
9091da177e4SLinus Torvalds 	if (!new)
9101da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
9111da177e4SLinus Torvalds 	*new = *old;
9121da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
9131da177e4SLinus Torvalds 	if (new->policy == MPOL_BIND) {
9141da177e4SLinus Torvalds 		int sz = ksize(old->v.zonelist);
9151da177e4SLinus Torvalds 		new->v.zonelist = kmalloc(sz, SLAB_KERNEL);
9161da177e4SLinus Torvalds 		if (!new->v.zonelist) {
9171da177e4SLinus Torvalds 			kmem_cache_free(policy_cache, new);
9181da177e4SLinus Torvalds 			return ERR_PTR(-ENOMEM);
9191da177e4SLinus Torvalds 		}
9201da177e4SLinus Torvalds 		memcpy(new->v.zonelist, old->v.zonelist, sz);
9211da177e4SLinus Torvalds 	}
9221da177e4SLinus Torvalds 	return new;
9231da177e4SLinus Torvalds }
9241da177e4SLinus Torvalds 
9251da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
9261da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
9271da177e4SLinus Torvalds {
9281da177e4SLinus Torvalds 	if (!a || !b)
9291da177e4SLinus Torvalds 		return 0;
9301da177e4SLinus Torvalds 	if (a->policy != b->policy)
9311da177e4SLinus Torvalds 		return 0;
9321da177e4SLinus Torvalds 	switch (a->policy) {
9331da177e4SLinus Torvalds 	case MPOL_DEFAULT:
9341da177e4SLinus Torvalds 		return 1;
9351da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
936dfcd3c0dSAndi Kleen 		return nodes_equal(a->v.nodes, b->v.nodes);
9371da177e4SLinus Torvalds 	case MPOL_PREFERRED:
9381da177e4SLinus Torvalds 		return a->v.preferred_node == b->v.preferred_node;
9391da177e4SLinus Torvalds 	case MPOL_BIND: {
9401da177e4SLinus Torvalds 		int i;
9411da177e4SLinus Torvalds 		for (i = 0; a->v.zonelist->zones[i]; i++)
9421da177e4SLinus Torvalds 			if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
9431da177e4SLinus Torvalds 				return 0;
9441da177e4SLinus Torvalds 		return b->v.zonelist->zones[i] == NULL;
9451da177e4SLinus Torvalds 	}
9461da177e4SLinus Torvalds 	default:
9471da177e4SLinus Torvalds 		BUG();
9481da177e4SLinus Torvalds 		return 0;
9491da177e4SLinus Torvalds 	}
9501da177e4SLinus Torvalds }
9511da177e4SLinus Torvalds 
9521da177e4SLinus Torvalds /* Slow path of a mpol destructor. */
9531da177e4SLinus Torvalds void __mpol_free(struct mempolicy *p)
9541da177e4SLinus Torvalds {
9551da177e4SLinus Torvalds 	if (!atomic_dec_and_test(&p->refcnt))
9561da177e4SLinus Torvalds 		return;
9571da177e4SLinus Torvalds 	if (p->policy == MPOL_BIND)
9581da177e4SLinus Torvalds 		kfree(p->v.zonelist);
9591da177e4SLinus Torvalds 	p->policy = MPOL_DEFAULT;
9601da177e4SLinus Torvalds 	kmem_cache_free(policy_cache, p);
9611da177e4SLinus Torvalds }
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds /*
9641da177e4SLinus Torvalds  * Hugetlb policy. Same as above, just works with node numbers instead of
9651da177e4SLinus Torvalds  * zonelists.
9661da177e4SLinus Torvalds  */
9671da177e4SLinus Torvalds 
9681da177e4SLinus Torvalds /* Find first node suitable for an allocation */
9691da177e4SLinus Torvalds int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
9701da177e4SLinus Torvalds {
9716e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
9721da177e4SLinus Torvalds 
9731da177e4SLinus Torvalds 	switch (pol->policy) {
9741da177e4SLinus Torvalds 	case MPOL_DEFAULT:
9751da177e4SLinus Torvalds 		return numa_node_id();
9761da177e4SLinus Torvalds 	case MPOL_BIND:
9771da177e4SLinus Torvalds 		return pol->v.zonelist->zones[0]->zone_pgdat->node_id;
9781da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
9791da177e4SLinus Torvalds 		return interleave_nodes(pol);
9801da177e4SLinus Torvalds 	case MPOL_PREFERRED:
9811da177e4SLinus Torvalds 		return pol->v.preferred_node >= 0 ?
9821da177e4SLinus Torvalds 				pol->v.preferred_node : numa_node_id();
9831da177e4SLinus Torvalds 	}
9841da177e4SLinus Torvalds 	BUG();
9851da177e4SLinus Torvalds 	return 0;
9861da177e4SLinus Torvalds }
9871da177e4SLinus Torvalds 
9881da177e4SLinus Torvalds /* Find secondary valid nodes for an allocation */
9891da177e4SLinus Torvalds int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr)
9901da177e4SLinus Torvalds {
9916e21c8f1SChristoph Lameter 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
9921da177e4SLinus Torvalds 
9931da177e4SLinus Torvalds 	switch (pol->policy) {
9941da177e4SLinus Torvalds 	case MPOL_PREFERRED:
9951da177e4SLinus Torvalds 	case MPOL_DEFAULT:
9961da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
9971da177e4SLinus Torvalds 		return 1;
9981da177e4SLinus Torvalds 	case MPOL_BIND: {
9991da177e4SLinus Torvalds 		struct zone **z;
10001da177e4SLinus Torvalds 		for (z = pol->v.zonelist->zones; *z; z++)
10011da177e4SLinus Torvalds 			if ((*z)->zone_pgdat->node_id == nid)
10021da177e4SLinus Torvalds 				return 1;
10031da177e4SLinus Torvalds 		return 0;
10041da177e4SLinus Torvalds 	}
10051da177e4SLinus Torvalds 	default:
10061da177e4SLinus Torvalds 		BUG();
10071da177e4SLinus Torvalds 		return 0;
10081da177e4SLinus Torvalds 	}
10091da177e4SLinus Torvalds }
10101da177e4SLinus Torvalds 
10111da177e4SLinus Torvalds /*
10121da177e4SLinus Torvalds  * Shared memory backing store policy support.
10131da177e4SLinus Torvalds  *
10141da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
10151da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
10161da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
10171da177e4SLinus Torvalds  * for any accesses to the tree.
10181da177e4SLinus Torvalds  */
10191da177e4SLinus Torvalds 
10201da177e4SLinus Torvalds /* lookup first element intersecting start-end */
10211da177e4SLinus Torvalds /* Caller holds sp->lock */
10221da177e4SLinus Torvalds static struct sp_node *
10231da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
10241da177e4SLinus Torvalds {
10251da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
10261da177e4SLinus Torvalds 
10271da177e4SLinus Torvalds 	while (n) {
10281da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
10291da177e4SLinus Torvalds 
10301da177e4SLinus Torvalds 		if (start >= p->end)
10311da177e4SLinus Torvalds 			n = n->rb_right;
10321da177e4SLinus Torvalds 		else if (end <= p->start)
10331da177e4SLinus Torvalds 			n = n->rb_left;
10341da177e4SLinus Torvalds 		else
10351da177e4SLinus Torvalds 			break;
10361da177e4SLinus Torvalds 	}
10371da177e4SLinus Torvalds 	if (!n)
10381da177e4SLinus Torvalds 		return NULL;
10391da177e4SLinus Torvalds 	for (;;) {
10401da177e4SLinus Torvalds 		struct sp_node *w = NULL;
10411da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
10421da177e4SLinus Torvalds 		if (!prev)
10431da177e4SLinus Torvalds 			break;
10441da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
10451da177e4SLinus Torvalds 		if (w->end <= start)
10461da177e4SLinus Torvalds 			break;
10471da177e4SLinus Torvalds 		n = prev;
10481da177e4SLinus Torvalds 	}
10491da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
10501da177e4SLinus Torvalds }
10511da177e4SLinus Torvalds 
10521da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
10531da177e4SLinus Torvalds /* Caller holds sp->lock */
10541da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
10551da177e4SLinus Torvalds {
10561da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
10571da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
10581da177e4SLinus Torvalds 	struct sp_node *nd;
10591da177e4SLinus Torvalds 
10601da177e4SLinus Torvalds 	while (*p) {
10611da177e4SLinus Torvalds 		parent = *p;
10621da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
10631da177e4SLinus Torvalds 		if (new->start < nd->start)
10641da177e4SLinus Torvalds 			p = &(*p)->rb_left;
10651da177e4SLinus Torvalds 		else if (new->end > nd->end)
10661da177e4SLinus Torvalds 			p = &(*p)->rb_right;
10671da177e4SLinus Torvalds 		else
10681da177e4SLinus Torvalds 			BUG();
10691da177e4SLinus Torvalds 	}
10701da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
10711da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
10721da177e4SLinus Torvalds 	PDprintk("inserting %lx-%lx: %d\n", new->start, new->end,
10731da177e4SLinus Torvalds 		 new->policy ? new->policy->policy : 0);
10741da177e4SLinus Torvalds }
10751da177e4SLinus Torvalds 
10761da177e4SLinus Torvalds /* Find shared policy intersecting idx */
10771da177e4SLinus Torvalds struct mempolicy *
10781da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
10791da177e4SLinus Torvalds {
10801da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
10811da177e4SLinus Torvalds 	struct sp_node *sn;
10821da177e4SLinus Torvalds 
10831da177e4SLinus Torvalds 	if (!sp->root.rb_node)
10841da177e4SLinus Torvalds 		return NULL;
10851da177e4SLinus Torvalds 	spin_lock(&sp->lock);
10861da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
10871da177e4SLinus Torvalds 	if (sn) {
10881da177e4SLinus Torvalds 		mpol_get(sn->policy);
10891da177e4SLinus Torvalds 		pol = sn->policy;
10901da177e4SLinus Torvalds 	}
10911da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
10921da177e4SLinus Torvalds 	return pol;
10931da177e4SLinus Torvalds }
10941da177e4SLinus Torvalds 
10951da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
10961da177e4SLinus Torvalds {
10971da177e4SLinus Torvalds 	PDprintk("deleting %lx-l%x\n", n->start, n->end);
10981da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
10991da177e4SLinus Torvalds 	mpol_free(n->policy);
11001da177e4SLinus Torvalds 	kmem_cache_free(sn_cache, n);
11011da177e4SLinus Torvalds }
11021da177e4SLinus Torvalds 
11031da177e4SLinus Torvalds struct sp_node *
11041da177e4SLinus Torvalds sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol)
11051da177e4SLinus Torvalds {
11061da177e4SLinus Torvalds 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
11071da177e4SLinus Torvalds 
11081da177e4SLinus Torvalds 	if (!n)
11091da177e4SLinus Torvalds 		return NULL;
11101da177e4SLinus Torvalds 	n->start = start;
11111da177e4SLinus Torvalds 	n->end = end;
11121da177e4SLinus Torvalds 	mpol_get(pol);
11131da177e4SLinus Torvalds 	n->policy = pol;
11141da177e4SLinus Torvalds 	return n;
11151da177e4SLinus Torvalds }
11161da177e4SLinus Torvalds 
11171da177e4SLinus Torvalds /* Replace a policy range. */
11181da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
11191da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
11201da177e4SLinus Torvalds {
11211da177e4SLinus Torvalds 	struct sp_node *n, *new2 = NULL;
11221da177e4SLinus Torvalds 
11231da177e4SLinus Torvalds restart:
11241da177e4SLinus Torvalds 	spin_lock(&sp->lock);
11251da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
11261da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
11271da177e4SLinus Torvalds 	while (n && n->start < end) {
11281da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
11291da177e4SLinus Torvalds 		if (n->start >= start) {
11301da177e4SLinus Torvalds 			if (n->end <= end)
11311da177e4SLinus Torvalds 				sp_delete(sp, n);
11321da177e4SLinus Torvalds 			else
11331da177e4SLinus Torvalds 				n->start = end;
11341da177e4SLinus Torvalds 		} else {
11351da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
11361da177e4SLinus Torvalds 			if (n->end > end) {
11371da177e4SLinus Torvalds 				if (!new2) {
11381da177e4SLinus Torvalds 					spin_unlock(&sp->lock);
11391da177e4SLinus Torvalds 					new2 = sp_alloc(end, n->end, n->policy);
11401da177e4SLinus Torvalds 					if (!new2)
11411da177e4SLinus Torvalds 						return -ENOMEM;
11421da177e4SLinus Torvalds 					goto restart;
11431da177e4SLinus Torvalds 				}
11441da177e4SLinus Torvalds 				n->end = start;
11451da177e4SLinus Torvalds 				sp_insert(sp, new2);
11461da177e4SLinus Torvalds 				new2 = NULL;
11471da177e4SLinus Torvalds 				break;
11481da177e4SLinus Torvalds 			} else
11491da177e4SLinus Torvalds 				n->end = start;
11501da177e4SLinus Torvalds 		}
11511da177e4SLinus Torvalds 		if (!next)
11521da177e4SLinus Torvalds 			break;
11531da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
11541da177e4SLinus Torvalds 	}
11551da177e4SLinus Torvalds 	if (new)
11561da177e4SLinus Torvalds 		sp_insert(sp, new);
11571da177e4SLinus Torvalds 	spin_unlock(&sp->lock);
11581da177e4SLinus Torvalds 	if (new2) {
11591da177e4SLinus Torvalds 		mpol_free(new2->policy);
11601da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new2);
11611da177e4SLinus Torvalds 	}
11621da177e4SLinus Torvalds 	return 0;
11631da177e4SLinus Torvalds }
11641da177e4SLinus Torvalds 
11651da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
11661da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
11671da177e4SLinus Torvalds {
11681da177e4SLinus Torvalds 	int err;
11691da177e4SLinus Torvalds 	struct sp_node *new = NULL;
11701da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
11711da177e4SLinus Torvalds 
11721da177e4SLinus Torvalds 	PDprintk("set_shared_policy %lx sz %lu %d %lx\n",
11731da177e4SLinus Torvalds 		 vma->vm_pgoff,
11741da177e4SLinus Torvalds 		 sz, npol? npol->policy : -1,
1175dfcd3c0dSAndi Kleen 		npol ? nodes_addr(npol->v.nodes)[0] : -1);
11761da177e4SLinus Torvalds 
11771da177e4SLinus Torvalds 	if (npol) {
11781da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
11791da177e4SLinus Torvalds 		if (!new)
11801da177e4SLinus Torvalds 			return -ENOMEM;
11811da177e4SLinus Torvalds 	}
11821da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
11831da177e4SLinus Torvalds 	if (err && new)
11841da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, new);
11851da177e4SLinus Torvalds 	return err;
11861da177e4SLinus Torvalds }
11871da177e4SLinus Torvalds 
11881da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
11891da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
11901da177e4SLinus Torvalds {
11911da177e4SLinus Torvalds 	struct sp_node *n;
11921da177e4SLinus Torvalds 	struct rb_node *next;
11931da177e4SLinus Torvalds 
11941da177e4SLinus Torvalds 	if (!p->root.rb_node)
11951da177e4SLinus Torvalds 		return;
11961da177e4SLinus Torvalds 	spin_lock(&p->lock);
11971da177e4SLinus Torvalds 	next = rb_first(&p->root);
11981da177e4SLinus Torvalds 	while (next) {
11991da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
12001da177e4SLinus Torvalds 		next = rb_next(&n->nd);
120190c5029eSAndi Kleen 		rb_erase(&n->nd, &p->root);
12021da177e4SLinus Torvalds 		mpol_free(n->policy);
12031da177e4SLinus Torvalds 		kmem_cache_free(sn_cache, n);
12041da177e4SLinus Torvalds 	}
12051da177e4SLinus Torvalds 	spin_unlock(&p->lock);
12061da177e4SLinus Torvalds }
12071da177e4SLinus Torvalds 
12081da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
12091da177e4SLinus Torvalds void __init numa_policy_init(void)
12101da177e4SLinus Torvalds {
12111da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
12121da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
12131da177e4SLinus Torvalds 					 0, SLAB_PANIC, NULL, NULL);
12141da177e4SLinus Torvalds 
12151da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
12161da177e4SLinus Torvalds 				     sizeof(struct sp_node),
12171da177e4SLinus Torvalds 				     0, SLAB_PANIC, NULL, NULL);
12181da177e4SLinus Torvalds 
12191da177e4SLinus Torvalds 	/* Set interleaving policy for system init. This way not all
12201da177e4SLinus Torvalds 	   the data structures allocated at system boot end up in node zero. */
12211da177e4SLinus Torvalds 
12228bccd85fSChristoph Lameter 	if (do_set_mempolicy(MPOL_INTERLEAVE, &node_online_map))
12231da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
12241da177e4SLinus Torvalds }
12251da177e4SLinus Torvalds 
12268bccd85fSChristoph Lameter /* Reset policy of current process to default */
12271da177e4SLinus Torvalds void numa_default_policy(void)
12281da177e4SLinus Torvalds {
12298bccd85fSChristoph Lameter 	do_set_mempolicy(MPOL_DEFAULT, NULL);
12301da177e4SLinus Torvalds }
123168860ec1SPaul Jackson 
123268860ec1SPaul Jackson /* Migrate a policy to a different set of nodes */
123368860ec1SPaul Jackson static void rebind_policy(struct mempolicy *pol, const nodemask_t *old,
123468860ec1SPaul Jackson 							const nodemask_t *new)
123568860ec1SPaul Jackson {
123668860ec1SPaul Jackson 	nodemask_t tmp;
123768860ec1SPaul Jackson 
123868860ec1SPaul Jackson 	if (!pol)
123968860ec1SPaul Jackson 		return;
124068860ec1SPaul Jackson 
124168860ec1SPaul Jackson 	switch (pol->policy) {
124268860ec1SPaul Jackson 	case MPOL_DEFAULT:
124368860ec1SPaul Jackson 		break;
124468860ec1SPaul Jackson 	case MPOL_INTERLEAVE:
124568860ec1SPaul Jackson 		nodes_remap(tmp, pol->v.nodes, *old, *new);
124668860ec1SPaul Jackson 		pol->v.nodes = tmp;
124768860ec1SPaul Jackson 		current->il_next = node_remap(current->il_next, *old, *new);
124868860ec1SPaul Jackson 		break;
124968860ec1SPaul Jackson 	case MPOL_PREFERRED:
125068860ec1SPaul Jackson 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
125168860ec1SPaul Jackson 								*old, *new);
125268860ec1SPaul Jackson 		break;
125368860ec1SPaul Jackson 	case MPOL_BIND: {
125468860ec1SPaul Jackson 		nodemask_t nodes;
125568860ec1SPaul Jackson 		struct zone **z;
125668860ec1SPaul Jackson 		struct zonelist *zonelist;
125768860ec1SPaul Jackson 
125868860ec1SPaul Jackson 		nodes_clear(nodes);
125968860ec1SPaul Jackson 		for (z = pol->v.zonelist->zones; *z; z++)
126068860ec1SPaul Jackson 			node_set((*z)->zone_pgdat->node_id, nodes);
126168860ec1SPaul Jackson 		nodes_remap(tmp, nodes, *old, *new);
126268860ec1SPaul Jackson 		nodes = tmp;
126368860ec1SPaul Jackson 
126468860ec1SPaul Jackson 		zonelist = bind_zonelist(&nodes);
126568860ec1SPaul Jackson 
126668860ec1SPaul Jackson 		/* If no mem, then zonelist is NULL and we keep old zonelist.
126768860ec1SPaul Jackson 		 * If that old zonelist has no remaining mems_allowed nodes,
126868860ec1SPaul Jackson 		 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
126968860ec1SPaul Jackson 		 */
127068860ec1SPaul Jackson 
127168860ec1SPaul Jackson 		if (zonelist) {
127268860ec1SPaul Jackson 			/* Good - got mem - substitute new zonelist */
127368860ec1SPaul Jackson 			kfree(pol->v.zonelist);
127468860ec1SPaul Jackson 			pol->v.zonelist = zonelist;
127568860ec1SPaul Jackson 		}
127668860ec1SPaul Jackson 		break;
127768860ec1SPaul Jackson 	}
127868860ec1SPaul Jackson 	default:
127968860ec1SPaul Jackson 		BUG();
128068860ec1SPaul Jackson 		break;
128168860ec1SPaul Jackson 	}
128268860ec1SPaul Jackson }
128368860ec1SPaul Jackson 
128468860ec1SPaul Jackson /*
128568860ec1SPaul Jackson  * Someone moved this task to different nodes.  Fixup mempolicies.
128668860ec1SPaul Jackson  *
128768860ec1SPaul Jackson  * TODO - fixup current->mm->vma and shmfs/tmpfs/hugetlbfs policies as well,
128868860ec1SPaul Jackson  * once we have a cpuset mechanism to mark which cpuset subtree is migrating.
128968860ec1SPaul Jackson  */
129068860ec1SPaul Jackson void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new)
129168860ec1SPaul Jackson {
129268860ec1SPaul Jackson 	rebind_policy(current->mempolicy, old, new);
129368860ec1SPaul Jackson }
1294