11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 291da177e4SLinus Torvalds * As a special case node -1 here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 681da177e4SLinus Torvalds #include <linux/mempolicy.h> 691da177e4SLinus Torvalds #include <linux/mm.h> 701da177e4SLinus Torvalds #include <linux/highmem.h> 711da177e4SLinus Torvalds #include <linux/hugetlb.h> 721da177e4SLinus Torvalds #include <linux/kernel.h> 731da177e4SLinus Torvalds #include <linux/sched.h> 741da177e4SLinus Torvalds #include <linux/nodemask.h> 751da177e4SLinus Torvalds #include <linux/cpuset.h> 761da177e4SLinus Torvalds #include <linux/slab.h> 771da177e4SLinus Torvalds #include <linux/string.h> 781da177e4SLinus Torvalds #include <linux/module.h> 79b488893aSPavel Emelyanov #include <linux/nsproxy.h> 801da177e4SLinus Torvalds #include <linux/interrupt.h> 811da177e4SLinus Torvalds #include <linux/init.h> 821da177e4SLinus Torvalds #include <linux/compat.h> 83dc9aa5b9SChristoph Lameter #include <linux/swap.h> 841a75a6c8SChristoph Lameter #include <linux/seq_file.h> 851a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 86b20a3503SChristoph Lameter #include <linux/migrate.h> 8762b61f61SHugh Dickins #include <linux/ksm.h> 8895a402c3SChristoph Lameter #include <linux/rmap.h> 8986c3a764SDavid Quigley #include <linux/security.h> 90dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 91095f1fc4SLee Schermerhorn #include <linux/ctype.h> 926d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 93dc9aa5b9SChristoph Lameter 941da177e4SLinus Torvalds #include <asm/tlbflush.h> 951da177e4SLinus Torvalds #include <asm/uaccess.h> 961da177e4SLinus Torvalds 9762695a84SNick Piggin #include "internal.h" 9862695a84SNick Piggin 9938e35860SChristoph Lameter /* Internal flags */ 100dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10138e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 1021a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ 103dc9aa5b9SChristoph Lameter 104fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 105fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1061da177e4SLinus Torvalds 1071da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1081da177e4SLinus Torvalds policied. */ 1096267276fSChristoph Lameter enum zone_type policy_zone = 0; 1101da177e4SLinus Torvalds 111bea904d5SLee Schermerhorn /* 112bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 113bea904d5SLee Schermerhorn */ 114d42c6997SAndi Kleen struct mempolicy default_policy = { 1151da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 116bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 117fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1181da177e4SLinus Torvalds }; 1191da177e4SLinus Torvalds 12037012946SDavid Rientjes static const struct mempolicy_operations { 12137012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 12237012946SDavid Rientjes void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 12337012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 12437012946SDavid Rientjes 12519770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */ 12637012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask) 1271da177e4SLinus Torvalds { 12819770b32SMel Gorman int nd, k; 1291da177e4SLinus Torvalds 13019770b32SMel Gorman for_each_node_mask(nd, *nodemask) { 13119770b32SMel Gorman struct zone *z; 13219770b32SMel Gorman 13319770b32SMel Gorman for (k = 0; k <= policy_zone; k++) { 13419770b32SMel Gorman z = &NODE_DATA(nd)->node_zones[k]; 135dd942ae3SAndi Kleen if (z->present_pages > 0) 13619770b32SMel Gorman return 1; 137dd942ae3SAndi Kleen } 138dd942ae3SAndi Kleen } 13919770b32SMel Gorman 14019770b32SMel Gorman return 0; 1411da177e4SLinus Torvalds } 1421da177e4SLinus Torvalds 143f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 144f5b087b5SDavid Rientjes { 1456d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1464c50bc01SDavid Rientjes } 1474c50bc01SDavid Rientjes 1484c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1494c50bc01SDavid Rientjes const nodemask_t *rel) 1504c50bc01SDavid Rientjes { 1514c50bc01SDavid Rientjes nodemask_t tmp; 1524c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1534c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 154f5b087b5SDavid Rientjes } 155f5b087b5SDavid Rientjes 15637012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 15737012946SDavid Rientjes { 15837012946SDavid Rientjes if (nodes_empty(*nodes)) 15937012946SDavid Rientjes return -EINVAL; 16037012946SDavid Rientjes pol->v.nodes = *nodes; 16137012946SDavid Rientjes return 0; 16237012946SDavid Rientjes } 16337012946SDavid Rientjes 16437012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 16537012946SDavid Rientjes { 16637012946SDavid Rientjes if (!nodes) 167fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 16837012946SDavid Rientjes else if (nodes_empty(*nodes)) 16937012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 17037012946SDavid Rientjes else 17137012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 17237012946SDavid Rientjes return 0; 17337012946SDavid Rientjes } 17437012946SDavid Rientjes 17537012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 17637012946SDavid Rientjes { 17737012946SDavid Rientjes if (!is_valid_nodemask(nodes)) 17837012946SDavid Rientjes return -EINVAL; 17937012946SDavid Rientjes pol->v.nodes = *nodes; 18037012946SDavid Rientjes return 0; 18137012946SDavid Rientjes } 18237012946SDavid Rientjes 18358568d2aSMiao Xie /* 18458568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 18558568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 18658568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 18758568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 18858568d2aSMiao Xie * 18958568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 19058568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 19158568d2aSMiao Xie */ 1924bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 1934bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 19458568d2aSMiao Xie { 19558568d2aSMiao Xie int ret; 19658568d2aSMiao Xie 19758568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 19858568d2aSMiao Xie if (pol == NULL) 19958568d2aSMiao Xie return 0; 2004bfc4495SKAMEZAWA Hiroyuki /* Check N_HIGH_MEMORY */ 2014bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 2024bfc4495SKAMEZAWA Hiroyuki cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]); 20358568d2aSMiao Xie 20458568d2aSMiao Xie VM_BUG_ON(!nodes); 20558568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 20658568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 20758568d2aSMiao Xie else { 20858568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2094bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1); 21058568d2aSMiao Xie else 2114bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2124bfc4495SKAMEZAWA Hiroyuki 21358568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 21458568d2aSMiao Xie pol->w.user_nodemask = *nodes; 21558568d2aSMiao Xie else 21658568d2aSMiao Xie pol->w.cpuset_mems_allowed = 21758568d2aSMiao Xie cpuset_current_mems_allowed; 21858568d2aSMiao Xie } 21958568d2aSMiao Xie 2204bfc4495SKAMEZAWA Hiroyuki if (nodes) 2214bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2224bfc4495SKAMEZAWA Hiroyuki else 2234bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 22458568d2aSMiao Xie return ret; 22558568d2aSMiao Xie } 22658568d2aSMiao Xie 22758568d2aSMiao Xie /* 22858568d2aSMiao Xie * This function just creates a new policy, does some check and simple 22958568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 23058568d2aSMiao Xie */ 231028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 232028fec41SDavid Rientjes nodemask_t *nodes) 2331da177e4SLinus Torvalds { 2341da177e4SLinus Torvalds struct mempolicy *policy; 2351da177e4SLinus Torvalds 236028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 237028fec41SDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); 238140d5a49SPaul Mundt 2393e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2403e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 24137012946SDavid Rientjes return ERR_PTR(-EINVAL); 242bea904d5SLee Schermerhorn return NULL; /* simply delete any existing policy */ 24337012946SDavid Rientjes } 2443e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2453e1f0645SDavid Rientjes 2463e1f0645SDavid Rientjes /* 2473e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2483e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2493e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2503e1f0645SDavid Rientjes */ 2513e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2523e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2533e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2543e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2553e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2563e1f0645SDavid Rientjes } 2573e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2583e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2591da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2601da177e4SLinus Torvalds if (!policy) 2611da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2621da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 26345c4745aSLee Schermerhorn policy->mode = mode; 26437012946SDavid Rientjes policy->flags = flags; 2653e1f0645SDavid Rientjes 26637012946SDavid Rientjes return policy; 26737012946SDavid Rientjes } 26837012946SDavid Rientjes 26952cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 27052cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 27152cd3b07SLee Schermerhorn { 27252cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 27352cd3b07SLee Schermerhorn return; 27452cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 27552cd3b07SLee Schermerhorn } 27652cd3b07SLee Schermerhorn 27737012946SDavid Rientjes static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 27837012946SDavid Rientjes { 27937012946SDavid Rientjes } 28037012946SDavid Rientjes 28137012946SDavid Rientjes static void mpol_rebind_nodemask(struct mempolicy *pol, 28237012946SDavid Rientjes const nodemask_t *nodes) 2831d0d2680SDavid Rientjes { 2841d0d2680SDavid Rientjes nodemask_t tmp; 2851d0d2680SDavid Rientjes 28637012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 28737012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 28837012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 28937012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 2901d0d2680SDavid Rientjes else { 29137012946SDavid Rientjes nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, 29237012946SDavid Rientjes *nodes); 29337012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 2941d0d2680SDavid Rientjes } 29537012946SDavid Rientjes 2961d0d2680SDavid Rientjes pol->v.nodes = tmp; 2971d0d2680SDavid Rientjes if (!node_isset(current->il_next, tmp)) { 2981d0d2680SDavid Rientjes current->il_next = next_node(current->il_next, tmp); 2991d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3001d0d2680SDavid Rientjes current->il_next = first_node(tmp); 3011d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3021d0d2680SDavid Rientjes current->il_next = numa_node_id(); 3031d0d2680SDavid Rientjes } 30437012946SDavid Rientjes } 30537012946SDavid Rientjes 30637012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 30737012946SDavid Rientjes const nodemask_t *nodes) 30837012946SDavid Rientjes { 30937012946SDavid Rientjes nodemask_t tmp; 31037012946SDavid Rientjes 31137012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3121d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3131d0d2680SDavid Rientjes 314fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3151d0d2680SDavid Rientjes pol->v.preferred_node = node; 316fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 317fc36b8d3SLee Schermerhorn } else 318fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 31937012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 32037012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3211d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 322fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3231d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 32437012946SDavid Rientjes pol->w.cpuset_mems_allowed, 32537012946SDavid Rientjes *nodes); 32637012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3271d0d2680SDavid Rientjes } 3281d0d2680SDavid Rientjes } 32937012946SDavid Rientjes 33037012946SDavid Rientjes /* Migrate a policy to a different set of nodes */ 33137012946SDavid Rientjes static void mpol_rebind_policy(struct mempolicy *pol, 33237012946SDavid Rientjes const nodemask_t *newmask) 33337012946SDavid Rientjes { 33437012946SDavid Rientjes if (!pol) 33537012946SDavid Rientjes return; 33637012946SDavid Rientjes if (!mpol_store_user_nodemask(pol) && 33737012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 33837012946SDavid Rientjes return; 33945c4745aSLee Schermerhorn mpol_ops[pol->mode].rebind(pol, newmask); 3401d0d2680SDavid Rientjes } 3411d0d2680SDavid Rientjes 3421d0d2680SDavid Rientjes /* 3431d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3441d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 34558568d2aSMiao Xie * 34658568d2aSMiao Xie * Called with task's alloc_lock held. 3471d0d2680SDavid Rientjes */ 3481d0d2680SDavid Rientjes 3491d0d2680SDavid Rientjes void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3501d0d2680SDavid Rientjes { 3511d0d2680SDavid Rientjes mpol_rebind_policy(tsk->mempolicy, new); 3521d0d2680SDavid Rientjes } 3531d0d2680SDavid Rientjes 3541d0d2680SDavid Rientjes /* 3551d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3561d0d2680SDavid Rientjes * 3571d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 3581d0d2680SDavid Rientjes */ 3591d0d2680SDavid Rientjes 3601d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3611d0d2680SDavid Rientjes { 3621d0d2680SDavid Rientjes struct vm_area_struct *vma; 3631d0d2680SDavid Rientjes 3641d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 3651d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 3661d0d2680SDavid Rientjes mpol_rebind_policy(vma->vm_policy, new); 3671d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 3681d0d2680SDavid Rientjes } 3691d0d2680SDavid Rientjes 37037012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 37137012946SDavid Rientjes [MPOL_DEFAULT] = { 37237012946SDavid Rientjes .rebind = mpol_rebind_default, 37337012946SDavid Rientjes }, 37437012946SDavid Rientjes [MPOL_INTERLEAVE] = { 37537012946SDavid Rientjes .create = mpol_new_interleave, 37637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 37737012946SDavid Rientjes }, 37837012946SDavid Rientjes [MPOL_PREFERRED] = { 37937012946SDavid Rientjes .create = mpol_new_preferred, 38037012946SDavid Rientjes .rebind = mpol_rebind_preferred, 38137012946SDavid Rientjes }, 38237012946SDavid Rientjes [MPOL_BIND] = { 38337012946SDavid Rientjes .create = mpol_new_bind, 38437012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 38537012946SDavid Rientjes }, 38637012946SDavid Rientjes }; 38737012946SDavid Rientjes 388397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty); 389fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 390fc301289SChristoph Lameter unsigned long flags); 3911a75a6c8SChristoph Lameter 39238e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */ 393b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 394dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 395dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 39638e35860SChristoph Lameter void *private) 3971da177e4SLinus Torvalds { 39891612e0dSHugh Dickins pte_t *orig_pte; 39991612e0dSHugh Dickins pte_t *pte; 400705e87c0SHugh Dickins spinlock_t *ptl; 401941150a3SHugh Dickins 402705e87c0SHugh Dickins orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 40391612e0dSHugh Dickins do { 4046aab341eSLinus Torvalds struct page *page; 40525ba77c1SAndy Whitcroft int nid; 40691612e0dSHugh Dickins 40791612e0dSHugh Dickins if (!pte_present(*pte)) 40891612e0dSHugh Dickins continue; 4096aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 4106aab341eSLinus Torvalds if (!page) 41191612e0dSHugh Dickins continue; 412053837fcSNick Piggin /* 41362b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 41462b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 41562b61f61SHugh Dickins * And we cannot move PageKsm pages sensibly or safely yet. 416053837fcSNick Piggin */ 41762b61f61SHugh Dickins if (PageReserved(page) || PageKsm(page)) 418f4598c8bSChristoph Lameter continue; 4196aab341eSLinus Torvalds nid = page_to_nid(page); 42038e35860SChristoph Lameter if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 42138e35860SChristoph Lameter continue; 42238e35860SChristoph Lameter 4231a75a6c8SChristoph Lameter if (flags & MPOL_MF_STATS) 424397874dfSChristoph Lameter gather_stats(page, private, pte_dirty(*pte)); 425053837fcSNick Piggin else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 426fc301289SChristoph Lameter migrate_page_add(page, private, flags); 427dc9aa5b9SChristoph Lameter else 4281da177e4SLinus Torvalds break; 42991612e0dSHugh Dickins } while (pte++, addr += PAGE_SIZE, addr != end); 430705e87c0SHugh Dickins pte_unmap_unlock(orig_pte, ptl); 43191612e0dSHugh Dickins return addr != end; 43291612e0dSHugh Dickins } 43391612e0dSHugh Dickins 434b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, 435dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 436dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 43738e35860SChristoph Lameter void *private) 43891612e0dSHugh Dickins { 43991612e0dSHugh Dickins pmd_t *pmd; 44091612e0dSHugh Dickins unsigned long next; 44191612e0dSHugh Dickins 44291612e0dSHugh Dickins pmd = pmd_offset(pud, addr); 44391612e0dSHugh Dickins do { 44491612e0dSHugh Dickins next = pmd_addr_end(addr, end); 44591612e0dSHugh Dickins if (pmd_none_or_clear_bad(pmd)) 44691612e0dSHugh Dickins continue; 447dc9aa5b9SChristoph Lameter if (check_pte_range(vma, pmd, addr, next, nodes, 44838e35860SChristoph Lameter flags, private)) 44991612e0dSHugh Dickins return -EIO; 45091612e0dSHugh Dickins } while (pmd++, addr = next, addr != end); 45191612e0dSHugh Dickins return 0; 45291612e0dSHugh Dickins } 45391612e0dSHugh Dickins 454b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 455dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 456dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 45738e35860SChristoph Lameter void *private) 45891612e0dSHugh Dickins { 45991612e0dSHugh Dickins pud_t *pud; 46091612e0dSHugh Dickins unsigned long next; 46191612e0dSHugh Dickins 46291612e0dSHugh Dickins pud = pud_offset(pgd, addr); 46391612e0dSHugh Dickins do { 46491612e0dSHugh Dickins next = pud_addr_end(addr, end); 46591612e0dSHugh Dickins if (pud_none_or_clear_bad(pud)) 46691612e0dSHugh Dickins continue; 467dc9aa5b9SChristoph Lameter if (check_pmd_range(vma, pud, addr, next, nodes, 46838e35860SChristoph Lameter flags, private)) 46991612e0dSHugh Dickins return -EIO; 47091612e0dSHugh Dickins } while (pud++, addr = next, addr != end); 47191612e0dSHugh Dickins return 0; 47291612e0dSHugh Dickins } 47391612e0dSHugh Dickins 474b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma, 475dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 476dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 47738e35860SChristoph Lameter void *private) 47891612e0dSHugh Dickins { 47991612e0dSHugh Dickins pgd_t *pgd; 48091612e0dSHugh Dickins unsigned long next; 48191612e0dSHugh Dickins 482b5810039SNick Piggin pgd = pgd_offset(vma->vm_mm, addr); 48391612e0dSHugh Dickins do { 48491612e0dSHugh Dickins next = pgd_addr_end(addr, end); 48591612e0dSHugh Dickins if (pgd_none_or_clear_bad(pgd)) 48691612e0dSHugh Dickins continue; 487dc9aa5b9SChristoph Lameter if (check_pud_range(vma, pgd, addr, next, nodes, 48838e35860SChristoph Lameter flags, private)) 48991612e0dSHugh Dickins return -EIO; 49091612e0dSHugh Dickins } while (pgd++, addr = next, addr != end); 49191612e0dSHugh Dickins return 0; 4921da177e4SLinus Torvalds } 4931da177e4SLinus Torvalds 494dc9aa5b9SChristoph Lameter /* 495dc9aa5b9SChristoph Lameter * Check if all pages in a range are on a set of nodes. 496dc9aa5b9SChristoph Lameter * If pagelist != NULL then isolate pages from the LRU and 497dc9aa5b9SChristoph Lameter * put them on the pagelist. 498dc9aa5b9SChristoph Lameter */ 4991da177e4SLinus Torvalds static struct vm_area_struct * 5001da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end, 50138e35860SChristoph Lameter const nodemask_t *nodes, unsigned long flags, void *private) 5021da177e4SLinus Torvalds { 5031da177e4SLinus Torvalds int err; 5041da177e4SLinus Torvalds struct vm_area_struct *first, *vma, *prev; 5051da177e4SLinus Torvalds 506053837fcSNick Piggin 5071da177e4SLinus Torvalds first = find_vma(mm, start); 5081da177e4SLinus Torvalds if (!first) 5091da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 5101da177e4SLinus Torvalds prev = NULL; 5111da177e4SLinus Torvalds for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 512dc9aa5b9SChristoph Lameter if (!(flags & MPOL_MF_DISCONTIG_OK)) { 5131da177e4SLinus Torvalds if (!vma->vm_next && vma->vm_end < end) 5141da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 5151da177e4SLinus Torvalds if (prev && prev->vm_end < vma->vm_start) 5161da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 517dc9aa5b9SChristoph Lameter } 518dc9aa5b9SChristoph Lameter if (!is_vm_hugetlb_page(vma) && 519dc9aa5b9SChristoph Lameter ((flags & MPOL_MF_STRICT) || 520dc9aa5b9SChristoph Lameter ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && 521dc9aa5b9SChristoph Lameter vma_migratable(vma)))) { 5225b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 523dc9aa5b9SChristoph Lameter 5245b952b3cSAndi Kleen if (endvma > end) 5255b952b3cSAndi Kleen endvma = end; 5265b952b3cSAndi Kleen if (vma->vm_start > start) 5275b952b3cSAndi Kleen start = vma->vm_start; 528dc9aa5b9SChristoph Lameter err = check_pgd_range(vma, start, endvma, nodes, 52938e35860SChristoph Lameter flags, private); 5301da177e4SLinus Torvalds if (err) { 5311da177e4SLinus Torvalds first = ERR_PTR(err); 5321da177e4SLinus Torvalds break; 5331da177e4SLinus Torvalds } 5341da177e4SLinus Torvalds } 5351da177e4SLinus Torvalds prev = vma; 5361da177e4SLinus Torvalds } 5371da177e4SLinus Torvalds return first; 5381da177e4SLinus Torvalds } 5391da177e4SLinus Torvalds 5401da177e4SLinus Torvalds /* Apply policy to a single VMA */ 5411da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new) 5421da177e4SLinus Torvalds { 5431da177e4SLinus Torvalds int err = 0; 5441da177e4SLinus Torvalds struct mempolicy *old = vma->vm_policy; 5451da177e4SLinus Torvalds 546140d5a49SPaul Mundt pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 5471da177e4SLinus Torvalds vma->vm_start, vma->vm_end, vma->vm_pgoff, 5481da177e4SLinus Torvalds vma->vm_ops, vma->vm_file, 5491da177e4SLinus Torvalds vma->vm_ops ? vma->vm_ops->set_policy : NULL); 5501da177e4SLinus Torvalds 5511da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->set_policy) 5521da177e4SLinus Torvalds err = vma->vm_ops->set_policy(vma, new); 5531da177e4SLinus Torvalds if (!err) { 5541da177e4SLinus Torvalds mpol_get(new); 5551da177e4SLinus Torvalds vma->vm_policy = new; 556f0be3d32SLee Schermerhorn mpol_put(old); 5571da177e4SLinus Torvalds } 5581da177e4SLinus Torvalds return err; 5591da177e4SLinus Torvalds } 5601da177e4SLinus Torvalds 5611da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 5629d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 5639d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 5641da177e4SLinus Torvalds { 5651da177e4SLinus Torvalds struct vm_area_struct *next; 5669d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 5679d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 5689d8cebd4SKOSAKI Motohiro int err = 0; 5699d8cebd4SKOSAKI Motohiro pgoff_t pgoff; 5709d8cebd4SKOSAKI Motohiro unsigned long vmstart; 5719d8cebd4SKOSAKI Motohiro unsigned long vmend; 5721da177e4SLinus Torvalds 5739d8cebd4SKOSAKI Motohiro vma = find_vma_prev(mm, start, &prev); 5749d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 5759d8cebd4SKOSAKI Motohiro return -EFAULT; 5769d8cebd4SKOSAKI Motohiro 5779d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 5781da177e4SLinus Torvalds next = vma->vm_next; 5799d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 5809d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 5819d8cebd4SKOSAKI Motohiro 5829d8cebd4SKOSAKI Motohiro pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 5839d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 5849d8cebd4SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, new_pol); 5859d8cebd4SKOSAKI Motohiro if (prev) { 5869d8cebd4SKOSAKI Motohiro vma = prev; 5879d8cebd4SKOSAKI Motohiro next = vma->vm_next; 5889d8cebd4SKOSAKI Motohiro continue; 5891da177e4SLinus Torvalds } 5909d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 5919d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 5929d8cebd4SKOSAKI Motohiro if (err) 5939d8cebd4SKOSAKI Motohiro goto out; 5949d8cebd4SKOSAKI Motohiro } 5959d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 5969d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 5979d8cebd4SKOSAKI Motohiro if (err) 5989d8cebd4SKOSAKI Motohiro goto out; 5999d8cebd4SKOSAKI Motohiro } 6009d8cebd4SKOSAKI Motohiro err = policy_vma(vma, new_pol); 6019d8cebd4SKOSAKI Motohiro if (err) 6029d8cebd4SKOSAKI Motohiro goto out; 6039d8cebd4SKOSAKI Motohiro } 6049d8cebd4SKOSAKI Motohiro 6059d8cebd4SKOSAKI Motohiro out: 6061da177e4SLinus Torvalds return err; 6071da177e4SLinus Torvalds } 6081da177e4SLinus Torvalds 609c61afb18SPaul Jackson /* 610c61afb18SPaul Jackson * Update task->flags PF_MEMPOLICY bit: set iff non-default 611c61afb18SPaul Jackson * mempolicy. Allows more rapid checking of this (combined perhaps 612c61afb18SPaul Jackson * with other PF_* flag bits) on memory allocation hot code paths. 613c61afb18SPaul Jackson * 614c61afb18SPaul Jackson * If called from outside this file, the task 'p' should -only- be 615c61afb18SPaul Jackson * a newly forked child not yet visible on the task list, because 616c61afb18SPaul Jackson * manipulating the task flags of a visible task is not safe. 617c61afb18SPaul Jackson * 618c61afb18SPaul Jackson * The above limitation is why this routine has the funny name 619c61afb18SPaul Jackson * mpol_fix_fork_child_flag(). 620c61afb18SPaul Jackson * 621c61afb18SPaul Jackson * It is also safe to call this with a task pointer of current, 622c61afb18SPaul Jackson * which the static wrapper mpol_set_task_struct_flag() does, 623c61afb18SPaul Jackson * for use within this file. 624c61afb18SPaul Jackson */ 625c61afb18SPaul Jackson 626c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p) 627c61afb18SPaul Jackson { 628c61afb18SPaul Jackson if (p->mempolicy) 629c61afb18SPaul Jackson p->flags |= PF_MEMPOLICY; 630c61afb18SPaul Jackson else 631c61afb18SPaul Jackson p->flags &= ~PF_MEMPOLICY; 632c61afb18SPaul Jackson } 633c61afb18SPaul Jackson 634c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void) 635c61afb18SPaul Jackson { 636c61afb18SPaul Jackson mpol_fix_fork_child_flag(current); 637c61afb18SPaul Jackson } 638c61afb18SPaul Jackson 6391da177e4SLinus Torvalds /* Set the process memory policy */ 640028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 641028fec41SDavid Rientjes nodemask_t *nodes) 6421da177e4SLinus Torvalds { 64358568d2aSMiao Xie struct mempolicy *new, *old; 644f4e53d91SLee Schermerhorn struct mm_struct *mm = current->mm; 6454bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 64658568d2aSMiao Xie int ret; 6471da177e4SLinus Torvalds 6484bfc4495SKAMEZAWA Hiroyuki if (!scratch) 6494bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 650f4e53d91SLee Schermerhorn 6514bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 6524bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 6534bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 6544bfc4495SKAMEZAWA Hiroyuki goto out; 6554bfc4495SKAMEZAWA Hiroyuki } 656f4e53d91SLee Schermerhorn /* 657f4e53d91SLee Schermerhorn * prevent changing our mempolicy while show_numa_maps() 658f4e53d91SLee Schermerhorn * is using it. 659f4e53d91SLee Schermerhorn * Note: do_set_mempolicy() can be called at init time 660f4e53d91SLee Schermerhorn * with no 'mm'. 661f4e53d91SLee Schermerhorn */ 662f4e53d91SLee Schermerhorn if (mm) 663f4e53d91SLee Schermerhorn down_write(&mm->mmap_sem); 66458568d2aSMiao Xie task_lock(current); 6654bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 66658568d2aSMiao Xie if (ret) { 66758568d2aSMiao Xie task_unlock(current); 66858568d2aSMiao Xie if (mm) 66958568d2aSMiao Xie up_write(&mm->mmap_sem); 67058568d2aSMiao Xie mpol_put(new); 6714bfc4495SKAMEZAWA Hiroyuki goto out; 67258568d2aSMiao Xie } 67358568d2aSMiao Xie old = current->mempolicy; 6741da177e4SLinus Torvalds current->mempolicy = new; 675c61afb18SPaul Jackson mpol_set_task_struct_flag(); 67645c4745aSLee Schermerhorn if (new && new->mode == MPOL_INTERLEAVE && 677f5b087b5SDavid Rientjes nodes_weight(new->v.nodes)) 678dfcd3c0dSAndi Kleen current->il_next = first_node(new->v.nodes); 67958568d2aSMiao Xie task_unlock(current); 680f4e53d91SLee Schermerhorn if (mm) 681f4e53d91SLee Schermerhorn up_write(&mm->mmap_sem); 682f4e53d91SLee Schermerhorn 68358568d2aSMiao Xie mpol_put(old); 6844bfc4495SKAMEZAWA Hiroyuki ret = 0; 6854bfc4495SKAMEZAWA Hiroyuki out: 6864bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 6874bfc4495SKAMEZAWA Hiroyuki return ret; 6881da177e4SLinus Torvalds } 6891da177e4SLinus Torvalds 690bea904d5SLee Schermerhorn /* 691bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 69258568d2aSMiao Xie * 69358568d2aSMiao Xie * Called with task's alloc_lock held 694bea904d5SLee Schermerhorn */ 695bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 6961da177e4SLinus Torvalds { 697dfcd3c0dSAndi Kleen nodes_clear(*nodes); 698bea904d5SLee Schermerhorn if (p == &default_policy) 699bea904d5SLee Schermerhorn return; 700bea904d5SLee Schermerhorn 70145c4745aSLee Schermerhorn switch (p->mode) { 70219770b32SMel Gorman case MPOL_BIND: 70319770b32SMel Gorman /* Fall through */ 7041da177e4SLinus Torvalds case MPOL_INTERLEAVE: 705dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 7061da177e4SLinus Torvalds break; 7071da177e4SLinus Torvalds case MPOL_PREFERRED: 708fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 709dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 71053f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 7111da177e4SLinus Torvalds break; 7121da177e4SLinus Torvalds default: 7131da177e4SLinus Torvalds BUG(); 7141da177e4SLinus Torvalds } 7151da177e4SLinus Torvalds } 7161da177e4SLinus Torvalds 7171da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr) 7181da177e4SLinus Torvalds { 7191da177e4SLinus Torvalds struct page *p; 7201da177e4SLinus Torvalds int err; 7211da177e4SLinus Torvalds 7221da177e4SLinus Torvalds err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 7231da177e4SLinus Torvalds if (err >= 0) { 7241da177e4SLinus Torvalds err = page_to_nid(p); 7251da177e4SLinus Torvalds put_page(p); 7261da177e4SLinus Torvalds } 7271da177e4SLinus Torvalds return err; 7281da177e4SLinus Torvalds } 7291da177e4SLinus Torvalds 7301da177e4SLinus Torvalds /* Retrieve NUMA policy */ 731dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 7321da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 7331da177e4SLinus Torvalds { 7348bccd85fSChristoph Lameter int err; 7351da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 7361da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 7371da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 7381da177e4SLinus Torvalds 739754af6f5SLee Schermerhorn if (flags & 740754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 7411da177e4SLinus Torvalds return -EINVAL; 742754af6f5SLee Schermerhorn 743754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 744754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 745754af6f5SLee Schermerhorn return -EINVAL; 746754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 74758568d2aSMiao Xie task_lock(current); 748754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 74958568d2aSMiao Xie task_unlock(current); 750754af6f5SLee Schermerhorn return 0; 751754af6f5SLee Schermerhorn } 752754af6f5SLee Schermerhorn 7531da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 754bea904d5SLee Schermerhorn /* 755bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 756bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 757bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 758bea904d5SLee Schermerhorn */ 7591da177e4SLinus Torvalds down_read(&mm->mmap_sem); 7601da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 7611da177e4SLinus Torvalds if (!vma) { 7621da177e4SLinus Torvalds up_read(&mm->mmap_sem); 7631da177e4SLinus Torvalds return -EFAULT; 7641da177e4SLinus Torvalds } 7651da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 7661da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 7671da177e4SLinus Torvalds else 7681da177e4SLinus Torvalds pol = vma->vm_policy; 7691da177e4SLinus Torvalds } else if (addr) 7701da177e4SLinus Torvalds return -EINVAL; 7711da177e4SLinus Torvalds 7721da177e4SLinus Torvalds if (!pol) 773bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 7741da177e4SLinus Torvalds 7751da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 7761da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 7771da177e4SLinus Torvalds err = lookup_node(mm, addr); 7781da177e4SLinus Torvalds if (err < 0) 7791da177e4SLinus Torvalds goto out; 7808bccd85fSChristoph Lameter *policy = err; 7811da177e4SLinus Torvalds } else if (pol == current->mempolicy && 78245c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 7838bccd85fSChristoph Lameter *policy = current->il_next; 7841da177e4SLinus Torvalds } else { 7851da177e4SLinus Torvalds err = -EINVAL; 7861da177e4SLinus Torvalds goto out; 7871da177e4SLinus Torvalds } 788bea904d5SLee Schermerhorn } else { 789bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 790bea904d5SLee Schermerhorn pol->mode; 791d79df630SDavid Rientjes /* 792d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 793d79df630SDavid Rientjes * the policy to userspace. 794d79df630SDavid Rientjes */ 795d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 796bea904d5SLee Schermerhorn } 7971da177e4SLinus Torvalds 7981da177e4SLinus Torvalds if (vma) { 7991da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 8001da177e4SLinus Torvalds vma = NULL; 8011da177e4SLinus Torvalds } 8021da177e4SLinus Torvalds 8031da177e4SLinus Torvalds err = 0; 80458568d2aSMiao Xie if (nmask) { 805c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 806c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 807c6b6ef8bSLee Schermerhorn } else { 80858568d2aSMiao Xie task_lock(current); 809bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 81058568d2aSMiao Xie task_unlock(current); 81158568d2aSMiao Xie } 812c6b6ef8bSLee Schermerhorn } 8131da177e4SLinus Torvalds 8141da177e4SLinus Torvalds out: 81552cd3b07SLee Schermerhorn mpol_cond_put(pol); 8161da177e4SLinus Torvalds if (vma) 8171da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 8181da177e4SLinus Torvalds return err; 8191da177e4SLinus Torvalds } 8201da177e4SLinus Torvalds 821b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 8228bccd85fSChristoph Lameter /* 8236ce3c4c0SChristoph Lameter * page migration 8246ce3c4c0SChristoph Lameter */ 825fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 826fc301289SChristoph Lameter unsigned long flags) 8276ce3c4c0SChristoph Lameter { 8286ce3c4c0SChristoph Lameter /* 829fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 8306ce3c4c0SChristoph Lameter */ 83162695a84SNick Piggin if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 83262695a84SNick Piggin if (!isolate_lru_page(page)) { 83362695a84SNick Piggin list_add_tail(&page->lru, pagelist); 8346d9c285aSKOSAKI Motohiro inc_zone_page_state(page, NR_ISOLATED_ANON + 8356d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 83662695a84SNick Piggin } 83762695a84SNick Piggin } 8386ce3c4c0SChristoph Lameter } 8396ce3c4c0SChristoph Lameter 840742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 84195a402c3SChristoph Lameter { 8426484eb3eSMel Gorman return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); 84395a402c3SChristoph Lameter } 84495a402c3SChristoph Lameter 8456ce3c4c0SChristoph Lameter /* 8467e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 8477e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 8487e2ab150SChristoph Lameter */ 849dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 850dbcb0f19SAdrian Bunk int flags) 8517e2ab150SChristoph Lameter { 8527e2ab150SChristoph Lameter nodemask_t nmask; 8537e2ab150SChristoph Lameter LIST_HEAD(pagelist); 8547e2ab150SChristoph Lameter int err = 0; 8557e2ab150SChristoph Lameter 8567e2ab150SChristoph Lameter nodes_clear(nmask); 8577e2ab150SChristoph Lameter node_set(source, nmask); 8587e2ab150SChristoph Lameter 8597e2ab150SChristoph Lameter check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask, 8607e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 8617e2ab150SChristoph Lameter 8627e2ab150SChristoph Lameter if (!list_empty(&pagelist)) 86362b61f61SHugh Dickins err = migrate_pages(&pagelist, new_node_page, dest, 0); 86495a402c3SChristoph Lameter 8657e2ab150SChristoph Lameter return err; 8667e2ab150SChristoph Lameter } 8677e2ab150SChristoph Lameter 8687e2ab150SChristoph Lameter /* 8697e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 8707e2ab150SChristoph Lameter * layout as much as possible. 87139743889SChristoph Lameter * 87239743889SChristoph Lameter * Returns the number of page that could not be moved. 87339743889SChristoph Lameter */ 87439743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm, 87539743889SChristoph Lameter const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 87639743889SChristoph Lameter { 8777e2ab150SChristoph Lameter int busy = 0; 8780aedadf9SChristoph Lameter int err; 8797e2ab150SChristoph Lameter nodemask_t tmp; 88039743889SChristoph Lameter 8810aedadf9SChristoph Lameter err = migrate_prep(); 8820aedadf9SChristoph Lameter if (err) 8830aedadf9SChristoph Lameter return err; 8840aedadf9SChristoph Lameter 88539743889SChristoph Lameter down_read(&mm->mmap_sem); 886d4984711SChristoph Lameter 8877b2259b3SChristoph Lameter err = migrate_vmas(mm, from_nodes, to_nodes, flags); 8887b2259b3SChristoph Lameter if (err) 8897b2259b3SChristoph Lameter goto out; 8907b2259b3SChristoph Lameter 8917e2ab150SChristoph Lameter /* 8927e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 8937e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 8947e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 8957e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 8967e2ab150SChristoph Lameter * 8977e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 8987e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 8997e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 9007e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 9017e2ab150SChristoph Lameter * 9027e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 9037e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 9047e2ab150SChristoph Lameter * (nothing left to migrate). 9057e2ab150SChristoph Lameter * 9067e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 9077e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 9087e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 9097e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 9107e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 9117e2ab150SChristoph Lameter * 9127e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 9137e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 9147e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 9157e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 9167e2ab150SChristoph Lameter * Otherwise when we finish scannng from_tmp, we at least have the 9177e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 9187e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 9197e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 9207e2ab150SChristoph Lameter */ 9217e2ab150SChristoph Lameter 9227e2ab150SChristoph Lameter tmp = *from_nodes; 9237e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 9247e2ab150SChristoph Lameter int s,d; 9257e2ab150SChristoph Lameter int source = -1; 9267e2ab150SChristoph Lameter int dest = 0; 9277e2ab150SChristoph Lameter 9287e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 9297e2ab150SChristoph Lameter d = node_remap(s, *from_nodes, *to_nodes); 9307e2ab150SChristoph Lameter if (s == d) 9317e2ab150SChristoph Lameter continue; 9327e2ab150SChristoph Lameter 9337e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 9347e2ab150SChristoph Lameter dest = d; 9357e2ab150SChristoph Lameter 9367e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 9377e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 9387e2ab150SChristoph Lameter break; 9397e2ab150SChristoph Lameter } 9407e2ab150SChristoph Lameter if (source == -1) 9417e2ab150SChristoph Lameter break; 9427e2ab150SChristoph Lameter 9437e2ab150SChristoph Lameter node_clear(source, tmp); 9447e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 9457e2ab150SChristoph Lameter if (err > 0) 9467e2ab150SChristoph Lameter busy += err; 9477e2ab150SChristoph Lameter if (err < 0) 9487e2ab150SChristoph Lameter break; 94939743889SChristoph Lameter } 9507b2259b3SChristoph Lameter out: 95139743889SChristoph Lameter up_read(&mm->mmap_sem); 9527e2ab150SChristoph Lameter if (err < 0) 9537e2ab150SChristoph Lameter return err; 9547e2ab150SChristoph Lameter return busy; 955b20a3503SChristoph Lameter 95639743889SChristoph Lameter } 95739743889SChristoph Lameter 9583ad33b24SLee Schermerhorn /* 9593ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 9603ad33b24SLee Schermerhorn * Start assuming that page is mapped by vma pointed to by @private. 9613ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 9623ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 9633ad33b24SLee Schermerhorn * is in virtual address order. 9643ad33b24SLee Schermerhorn */ 965742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 96695a402c3SChristoph Lameter { 96795a402c3SChristoph Lameter struct vm_area_struct *vma = (struct vm_area_struct *)private; 9683ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 96995a402c3SChristoph Lameter 9703ad33b24SLee Schermerhorn while (vma) { 9713ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 9723ad33b24SLee Schermerhorn if (address != -EFAULT) 9733ad33b24SLee Schermerhorn break; 9743ad33b24SLee Schermerhorn vma = vma->vm_next; 9753ad33b24SLee Schermerhorn } 9763ad33b24SLee Schermerhorn 9773ad33b24SLee Schermerhorn /* 9783ad33b24SLee Schermerhorn * if !vma, alloc_page_vma() will use task or system default policy 9793ad33b24SLee Schermerhorn */ 9803ad33b24SLee Schermerhorn return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 98195a402c3SChristoph Lameter } 982b20a3503SChristoph Lameter #else 983b20a3503SChristoph Lameter 984b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 985b20a3503SChristoph Lameter unsigned long flags) 986b20a3503SChristoph Lameter { 987b20a3503SChristoph Lameter } 988b20a3503SChristoph Lameter 989b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm, 990b20a3503SChristoph Lameter const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 991b20a3503SChristoph Lameter { 992b20a3503SChristoph Lameter return -ENOSYS; 993b20a3503SChristoph Lameter } 99495a402c3SChristoph Lameter 99569939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 99695a402c3SChristoph Lameter { 99795a402c3SChristoph Lameter return NULL; 99895a402c3SChristoph Lameter } 999b20a3503SChristoph Lameter #endif 1000b20a3503SChristoph Lameter 1001dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1002028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1003028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 10046ce3c4c0SChristoph Lameter { 10056ce3c4c0SChristoph Lameter struct vm_area_struct *vma; 10066ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 10076ce3c4c0SChristoph Lameter struct mempolicy *new; 10086ce3c4c0SChristoph Lameter unsigned long end; 10096ce3c4c0SChristoph Lameter int err; 10106ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 10116ce3c4c0SChristoph Lameter 1012a3b51e01SDavid Rientjes if (flags & ~(unsigned long)(MPOL_MF_STRICT | 10136ce3c4c0SChristoph Lameter MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 10146ce3c4c0SChristoph Lameter return -EINVAL; 101574c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 10166ce3c4c0SChristoph Lameter return -EPERM; 10176ce3c4c0SChristoph Lameter 10186ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 10196ce3c4c0SChristoph Lameter return -EINVAL; 10206ce3c4c0SChristoph Lameter 10216ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 10226ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 10236ce3c4c0SChristoph Lameter 10246ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 10256ce3c4c0SChristoph Lameter end = start + len; 10266ce3c4c0SChristoph Lameter 10276ce3c4c0SChristoph Lameter if (end < start) 10286ce3c4c0SChristoph Lameter return -EINVAL; 10296ce3c4c0SChristoph Lameter if (end == start) 10306ce3c4c0SChristoph Lameter return 0; 10316ce3c4c0SChristoph Lameter 1032028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 10336ce3c4c0SChristoph Lameter if (IS_ERR(new)) 10346ce3c4c0SChristoph Lameter return PTR_ERR(new); 10356ce3c4c0SChristoph Lameter 10366ce3c4c0SChristoph Lameter /* 10376ce3c4c0SChristoph Lameter * If we are using the default policy then operation 10386ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 10396ce3c4c0SChristoph Lameter */ 10406ce3c4c0SChristoph Lameter if (!new) 10416ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 10426ce3c4c0SChristoph Lameter 1043028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1044028fec41SDavid Rientjes start, start + len, mode, mode_flags, 1045028fec41SDavid Rientjes nmask ? nodes_addr(*nmask)[0] : -1); 10466ce3c4c0SChristoph Lameter 10470aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 10480aedadf9SChristoph Lameter 10490aedadf9SChristoph Lameter err = migrate_prep(); 10500aedadf9SChristoph Lameter if (err) 1051b05ca738SKOSAKI Motohiro goto mpol_out; 10520aedadf9SChristoph Lameter } 10534bfc4495SKAMEZAWA Hiroyuki { 10544bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 10554bfc4495SKAMEZAWA Hiroyuki if (scratch) { 10566ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 105758568d2aSMiao Xie task_lock(current); 10584bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 105958568d2aSMiao Xie task_unlock(current); 10604bfc4495SKAMEZAWA Hiroyuki if (err) 106158568d2aSMiao Xie up_write(&mm->mmap_sem); 10624bfc4495SKAMEZAWA Hiroyuki } else 10634bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 10644bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 10654bfc4495SKAMEZAWA Hiroyuki } 1066b05ca738SKOSAKI Motohiro if (err) 1067b05ca738SKOSAKI Motohiro goto mpol_out; 1068b05ca738SKOSAKI Motohiro 10696ce3c4c0SChristoph Lameter vma = check_range(mm, start, end, nmask, 10706ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 10716ce3c4c0SChristoph Lameter 10726ce3c4c0SChristoph Lameter err = PTR_ERR(vma); 10736ce3c4c0SChristoph Lameter if (!IS_ERR(vma)) { 10746ce3c4c0SChristoph Lameter int nr_failed = 0; 10756ce3c4c0SChristoph Lameter 10769d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 10777e2ab150SChristoph Lameter 10786ce3c4c0SChristoph Lameter if (!list_empty(&pagelist)) 107995a402c3SChristoph Lameter nr_failed = migrate_pages(&pagelist, new_vma_page, 108062b61f61SHugh Dickins (unsigned long)vma, 0); 10816ce3c4c0SChristoph Lameter 10826ce3c4c0SChristoph Lameter if (!err && nr_failed && (flags & MPOL_MF_STRICT)) 10836ce3c4c0SChristoph Lameter err = -EIO; 1084ab8a3e14SKOSAKI Motohiro } else 1085ab8a3e14SKOSAKI Motohiro putback_lru_pages(&pagelist); 1086b20a3503SChristoph Lameter 10876ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1088b05ca738SKOSAKI Motohiro mpol_out: 1089f0be3d32SLee Schermerhorn mpol_put(new); 10906ce3c4c0SChristoph Lameter return err; 10916ce3c4c0SChristoph Lameter } 10926ce3c4c0SChristoph Lameter 109339743889SChristoph Lameter /* 10948bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 10958bccd85fSChristoph Lameter */ 10968bccd85fSChristoph Lameter 10978bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 109839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 10998bccd85fSChristoph Lameter unsigned long maxnode) 11008bccd85fSChristoph Lameter { 11018bccd85fSChristoph Lameter unsigned long k; 11028bccd85fSChristoph Lameter unsigned long nlongs; 11038bccd85fSChristoph Lameter unsigned long endmask; 11048bccd85fSChristoph Lameter 11058bccd85fSChristoph Lameter --maxnode; 11068bccd85fSChristoph Lameter nodes_clear(*nodes); 11078bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 11088bccd85fSChristoph Lameter return 0; 1109a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1110636f13c1SChris Wright return -EINVAL; 11118bccd85fSChristoph Lameter 11128bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 11138bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 11148bccd85fSChristoph Lameter endmask = ~0UL; 11158bccd85fSChristoph Lameter else 11168bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 11178bccd85fSChristoph Lameter 11188bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 11198bccd85fSChristoph Lameter if the non supported part is all zero. */ 11208bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 11218bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 11228bccd85fSChristoph Lameter return -EINVAL; 11238bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 11248bccd85fSChristoph Lameter unsigned long t; 11258bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 11268bccd85fSChristoph Lameter return -EFAULT; 11278bccd85fSChristoph Lameter if (k == nlongs - 1) { 11288bccd85fSChristoph Lameter if (t & endmask) 11298bccd85fSChristoph Lameter return -EINVAL; 11308bccd85fSChristoph Lameter } else if (t) 11318bccd85fSChristoph Lameter return -EINVAL; 11328bccd85fSChristoph Lameter } 11338bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 11348bccd85fSChristoph Lameter endmask = ~0UL; 11358bccd85fSChristoph Lameter } 11368bccd85fSChristoph Lameter 11378bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 11388bccd85fSChristoph Lameter return -EFAULT; 11398bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 11408bccd85fSChristoph Lameter return 0; 11418bccd85fSChristoph Lameter } 11428bccd85fSChristoph Lameter 11438bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 11448bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 11458bccd85fSChristoph Lameter nodemask_t *nodes) 11468bccd85fSChristoph Lameter { 11478bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 11488bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 11498bccd85fSChristoph Lameter 11508bccd85fSChristoph Lameter if (copy > nbytes) { 11518bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 11528bccd85fSChristoph Lameter return -EINVAL; 11538bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 11548bccd85fSChristoph Lameter return -EFAULT; 11558bccd85fSChristoph Lameter copy = nbytes; 11568bccd85fSChristoph Lameter } 11578bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 11588bccd85fSChristoph Lameter } 11598bccd85fSChristoph Lameter 1160938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1161938bb9f5SHeiko Carstens unsigned long, mode, unsigned long __user *, nmask, 1162938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 11638bccd85fSChristoph Lameter { 11648bccd85fSChristoph Lameter nodemask_t nodes; 11658bccd85fSChristoph Lameter int err; 1166028fec41SDavid Rientjes unsigned short mode_flags; 11678bccd85fSChristoph Lameter 1168028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1169028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1170a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1171a3b51e01SDavid Rientjes return -EINVAL; 11724c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 11734c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 11744c50bc01SDavid Rientjes return -EINVAL; 11758bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 11768bccd85fSChristoph Lameter if (err) 11778bccd85fSChristoph Lameter return err; 1178028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 11798bccd85fSChristoph Lameter } 11808bccd85fSChristoph Lameter 11818bccd85fSChristoph Lameter /* Set the process memory policy */ 1182938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, 1183938bb9f5SHeiko Carstens unsigned long, maxnode) 11848bccd85fSChristoph Lameter { 11858bccd85fSChristoph Lameter int err; 11868bccd85fSChristoph Lameter nodemask_t nodes; 1187028fec41SDavid Rientjes unsigned short flags; 11888bccd85fSChristoph Lameter 1189028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1190028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1191028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 11928bccd85fSChristoph Lameter return -EINVAL; 11934c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 11944c50bc01SDavid Rientjes return -EINVAL; 11958bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 11968bccd85fSChristoph Lameter if (err) 11978bccd85fSChristoph Lameter return err; 1198028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 11998bccd85fSChristoph Lameter } 12008bccd85fSChristoph Lameter 1201938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1202938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1203938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 120439743889SChristoph Lameter { 1205c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 120639743889SChristoph Lameter struct mm_struct *mm; 120739743889SChristoph Lameter struct task_struct *task; 120839743889SChristoph Lameter nodemask_t old; 120939743889SChristoph Lameter nodemask_t new; 121039743889SChristoph Lameter nodemask_t task_nodes; 121139743889SChristoph Lameter int err; 121239743889SChristoph Lameter 121339743889SChristoph Lameter err = get_nodes(&old, old_nodes, maxnode); 121439743889SChristoph Lameter if (err) 121539743889SChristoph Lameter return err; 121639743889SChristoph Lameter 121739743889SChristoph Lameter err = get_nodes(&new, new_nodes, maxnode); 121839743889SChristoph Lameter if (err) 121939743889SChristoph Lameter return err; 122039743889SChristoph Lameter 122139743889SChristoph Lameter /* Find the mm_struct */ 122239743889SChristoph Lameter read_lock(&tasklist_lock); 1223228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 122439743889SChristoph Lameter if (!task) { 122539743889SChristoph Lameter read_unlock(&tasklist_lock); 122639743889SChristoph Lameter return -ESRCH; 122739743889SChristoph Lameter } 122839743889SChristoph Lameter mm = get_task_mm(task); 122939743889SChristoph Lameter read_unlock(&tasklist_lock); 123039743889SChristoph Lameter 123139743889SChristoph Lameter if (!mm) 123239743889SChristoph Lameter return -EINVAL; 123339743889SChristoph Lameter 123439743889SChristoph Lameter /* 123539743889SChristoph Lameter * Check if this process has the right to modify the specified 123639743889SChristoph Lameter * process. The right exists if the process has administrative 12377f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 123839743889SChristoph Lameter * userid as the target process. 123939743889SChristoph Lameter */ 1240c69e8d9cSDavid Howells rcu_read_lock(); 1241c69e8d9cSDavid Howells tcred = __task_cred(task); 1242b6dff3ecSDavid Howells if (cred->euid != tcred->suid && cred->euid != tcred->uid && 1243b6dff3ecSDavid Howells cred->uid != tcred->suid && cred->uid != tcred->uid && 124474c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1245c69e8d9cSDavid Howells rcu_read_unlock(); 124639743889SChristoph Lameter err = -EPERM; 124739743889SChristoph Lameter goto out; 124839743889SChristoph Lameter } 1249c69e8d9cSDavid Howells rcu_read_unlock(); 125039743889SChristoph Lameter 125139743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 125239743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 125374c00241SChristoph Lameter if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) { 125439743889SChristoph Lameter err = -EPERM; 125539743889SChristoph Lameter goto out; 125639743889SChristoph Lameter } 125739743889SChristoph Lameter 125837b07e41SLee Schermerhorn if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) { 12593b42d28bSChristoph Lameter err = -EINVAL; 12603b42d28bSChristoph Lameter goto out; 12613b42d28bSChristoph Lameter } 12623b42d28bSChristoph Lameter 126386c3a764SDavid Quigley err = security_task_movememory(task); 126486c3a764SDavid Quigley if (err) 126586c3a764SDavid Quigley goto out; 126686c3a764SDavid Quigley 1267511030bcSChristoph Lameter err = do_migrate_pages(mm, &old, &new, 126874c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 126939743889SChristoph Lameter out: 127039743889SChristoph Lameter mmput(mm); 127139743889SChristoph Lameter return err; 127239743889SChristoph Lameter } 127339743889SChristoph Lameter 127439743889SChristoph Lameter 12758bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1276938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1277938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1278938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 12798bccd85fSChristoph Lameter { 1280dbcb0f19SAdrian Bunk int err; 1281dbcb0f19SAdrian Bunk int uninitialized_var(pval); 12828bccd85fSChristoph Lameter nodemask_t nodes; 12838bccd85fSChristoph Lameter 12848bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 12858bccd85fSChristoph Lameter return -EINVAL; 12868bccd85fSChristoph Lameter 12878bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 12888bccd85fSChristoph Lameter 12898bccd85fSChristoph Lameter if (err) 12908bccd85fSChristoph Lameter return err; 12918bccd85fSChristoph Lameter 12928bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 12938bccd85fSChristoph Lameter return -EFAULT; 12948bccd85fSChristoph Lameter 12958bccd85fSChristoph Lameter if (nmask) 12968bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 12978bccd85fSChristoph Lameter 12988bccd85fSChristoph Lameter return err; 12998bccd85fSChristoph Lameter } 13008bccd85fSChristoph Lameter 13011da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 13021da177e4SLinus Torvalds 13031da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy, 13041da177e4SLinus Torvalds compat_ulong_t __user *nmask, 13051da177e4SLinus Torvalds compat_ulong_t maxnode, 13061da177e4SLinus Torvalds compat_ulong_t addr, compat_ulong_t flags) 13071da177e4SLinus Torvalds { 13081da177e4SLinus Torvalds long err; 13091da177e4SLinus Torvalds unsigned long __user *nm = NULL; 13101da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 13111da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 13121da177e4SLinus Torvalds 13131da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 13141da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 13151da177e4SLinus Torvalds 13161da177e4SLinus Torvalds if (nmask) 13171da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 13181da177e4SLinus Torvalds 13191da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 13201da177e4SLinus Torvalds 13211da177e4SLinus Torvalds if (!err && nmask) { 13221da177e4SLinus Torvalds err = copy_from_user(bm, nm, alloc_size); 13231da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 13241da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 13251da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 13261da177e4SLinus Torvalds } 13271da177e4SLinus Torvalds 13281da177e4SLinus Torvalds return err; 13291da177e4SLinus Torvalds } 13301da177e4SLinus Torvalds 13311da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, 13321da177e4SLinus Torvalds compat_ulong_t maxnode) 13331da177e4SLinus Torvalds { 13341da177e4SLinus Torvalds long err = 0; 13351da177e4SLinus Torvalds unsigned long __user *nm = NULL; 13361da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 13371da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 13381da177e4SLinus Torvalds 13391da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 13401da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 13411da177e4SLinus Torvalds 13421da177e4SLinus Torvalds if (nmask) { 13431da177e4SLinus Torvalds err = compat_get_bitmap(bm, nmask, nr_bits); 13441da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 13451da177e4SLinus Torvalds err |= copy_to_user(nm, bm, alloc_size); 13461da177e4SLinus Torvalds } 13471da177e4SLinus Torvalds 13481da177e4SLinus Torvalds if (err) 13491da177e4SLinus Torvalds return -EFAULT; 13501da177e4SLinus Torvalds 13511da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 13521da177e4SLinus Torvalds } 13531da177e4SLinus Torvalds 13541da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, 13551da177e4SLinus Torvalds compat_ulong_t mode, compat_ulong_t __user *nmask, 13561da177e4SLinus Torvalds compat_ulong_t maxnode, compat_ulong_t flags) 13571da177e4SLinus Torvalds { 13581da177e4SLinus Torvalds long err = 0; 13591da177e4SLinus Torvalds unsigned long __user *nm = NULL; 13601da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1361dfcd3c0dSAndi Kleen nodemask_t bm; 13621da177e4SLinus Torvalds 13631da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 13641da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 13651da177e4SLinus Torvalds 13661da177e4SLinus Torvalds if (nmask) { 1367dfcd3c0dSAndi Kleen err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 13681da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 1369dfcd3c0dSAndi Kleen err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 13701da177e4SLinus Torvalds } 13711da177e4SLinus Torvalds 13721da177e4SLinus Torvalds if (err) 13731da177e4SLinus Torvalds return -EFAULT; 13741da177e4SLinus Torvalds 13751da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 13761da177e4SLinus Torvalds } 13771da177e4SLinus Torvalds 13781da177e4SLinus Torvalds #endif 13791da177e4SLinus Torvalds 1380480eccf9SLee Schermerhorn /* 1381480eccf9SLee Schermerhorn * get_vma_policy(@task, @vma, @addr) 1382480eccf9SLee Schermerhorn * @task - task for fallback if vma policy == default 1383480eccf9SLee Schermerhorn * @vma - virtual memory area whose policy is sought 1384480eccf9SLee Schermerhorn * @addr - address in @vma for shared policy lookup 1385480eccf9SLee Schermerhorn * 1386480eccf9SLee Schermerhorn * Returns effective policy for a VMA at specified address. 1387480eccf9SLee Schermerhorn * Falls back to @task or system default policy, as necessary. 138852cd3b07SLee Schermerhorn * Current or other task's task mempolicy and non-shared vma policies 138952cd3b07SLee Schermerhorn * are protected by the task's mmap_sem, which must be held for read by 139052cd3b07SLee Schermerhorn * the caller. 139152cd3b07SLee Schermerhorn * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 139252cd3b07SLee Schermerhorn * count--added by the get_policy() vm_op, as appropriate--to protect against 139352cd3b07SLee Schermerhorn * freeing by another task. It is the caller's responsibility to free the 139452cd3b07SLee Schermerhorn * extra reference for shared policies. 1395480eccf9SLee Schermerhorn */ 139648fce342SChristoph Lameter static struct mempolicy *get_vma_policy(struct task_struct *task, 139748fce342SChristoph Lameter struct vm_area_struct *vma, unsigned long addr) 13981da177e4SLinus Torvalds { 13996e21c8f1SChristoph Lameter struct mempolicy *pol = task->mempolicy; 14001da177e4SLinus Torvalds 14011da177e4SLinus Torvalds if (vma) { 1402480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 1403ae4d8c16SLee Schermerhorn struct mempolicy *vpol = vma->vm_ops->get_policy(vma, 1404ae4d8c16SLee Schermerhorn addr); 1405ae4d8c16SLee Schermerhorn if (vpol) 1406ae4d8c16SLee Schermerhorn pol = vpol; 1407bea904d5SLee Schermerhorn } else if (vma->vm_policy) 14081da177e4SLinus Torvalds pol = vma->vm_policy; 14091da177e4SLinus Torvalds } 14101da177e4SLinus Torvalds if (!pol) 14111da177e4SLinus Torvalds pol = &default_policy; 14121da177e4SLinus Torvalds return pol; 14131da177e4SLinus Torvalds } 14141da177e4SLinus Torvalds 141552cd3b07SLee Schermerhorn /* 141652cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 141752cd3b07SLee Schermerhorn * page allocation 141852cd3b07SLee Schermerhorn */ 141952cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 142019770b32SMel Gorman { 142119770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 142245c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 142319770b32SMel Gorman gfp_zone(gfp) >= policy_zone && 142419770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 142519770b32SMel Gorman return &policy->v.nodes; 142619770b32SMel Gorman 142719770b32SMel Gorman return NULL; 142819770b32SMel Gorman } 142919770b32SMel Gorman 143052cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */ 143152cd3b07SLee Schermerhorn static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) 14321da177e4SLinus Torvalds { 1433fc36b8d3SLee Schermerhorn int nd = numa_node_id(); 14341da177e4SLinus Torvalds 143545c4745aSLee Schermerhorn switch (policy->mode) { 14361da177e4SLinus Torvalds case MPOL_PREFERRED: 1437fc36b8d3SLee Schermerhorn if (!(policy->flags & MPOL_F_LOCAL)) 14381da177e4SLinus Torvalds nd = policy->v.preferred_node; 14391da177e4SLinus Torvalds break; 14401da177e4SLinus Torvalds case MPOL_BIND: 144119770b32SMel Gorman /* 144252cd3b07SLee Schermerhorn * Normally, MPOL_BIND allocations are node-local within the 144352cd3b07SLee Schermerhorn * allowed nodemask. However, if __GFP_THISNODE is set and the 14446eb27e1fSBob Liu * current node isn't part of the mask, we use the zonelist for 144552cd3b07SLee Schermerhorn * the first node in the mask instead. 144619770b32SMel Gorman */ 144719770b32SMel Gorman if (unlikely(gfp & __GFP_THISNODE) && 144819770b32SMel Gorman unlikely(!node_isset(nd, policy->v.nodes))) 144919770b32SMel Gorman nd = first_node(policy->v.nodes); 145019770b32SMel Gorman break; 14511da177e4SLinus Torvalds default: 14521da177e4SLinus Torvalds BUG(); 14531da177e4SLinus Torvalds } 14540e88460dSMel Gorman return node_zonelist(nd, gfp); 14551da177e4SLinus Torvalds } 14561da177e4SLinus Torvalds 14571da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 14581da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 14591da177e4SLinus Torvalds { 14601da177e4SLinus Torvalds unsigned nid, next; 14611da177e4SLinus Torvalds struct task_struct *me = current; 14621da177e4SLinus Torvalds 14631da177e4SLinus Torvalds nid = me->il_next; 1464dfcd3c0dSAndi Kleen next = next_node(nid, policy->v.nodes); 14651da177e4SLinus Torvalds if (next >= MAX_NUMNODES) 1466dfcd3c0dSAndi Kleen next = first_node(policy->v.nodes); 1467f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 14681da177e4SLinus Torvalds me->il_next = next; 14691da177e4SLinus Torvalds return nid; 14701da177e4SLinus Torvalds } 14711da177e4SLinus Torvalds 1472dc85da15SChristoph Lameter /* 1473dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1474dc85da15SChristoph Lameter * next slab entry. 147552cd3b07SLee Schermerhorn * @policy must be protected by freeing by the caller. If @policy is 147652cd3b07SLee Schermerhorn * the current task's mempolicy, this protection is implicit, as only the 147752cd3b07SLee Schermerhorn * task can change it's policy. The system default policy requires no 147852cd3b07SLee Schermerhorn * such protection. 1479dc85da15SChristoph Lameter */ 1480dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy) 1481dc85da15SChristoph Lameter { 1482fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 1483bea904d5SLee Schermerhorn return numa_node_id(); 1484765c4507SChristoph Lameter 1485bea904d5SLee Schermerhorn switch (policy->mode) { 1486bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1487fc36b8d3SLee Schermerhorn /* 1488fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1489fc36b8d3SLee Schermerhorn */ 1490bea904d5SLee Schermerhorn return policy->v.preferred_node; 1491bea904d5SLee Schermerhorn 1492dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1493dc85da15SChristoph Lameter return interleave_nodes(policy); 1494dc85da15SChristoph Lameter 1495dd1a239fSMel Gorman case MPOL_BIND: { 1496dc85da15SChristoph Lameter /* 1497dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1498dc85da15SChristoph Lameter * first node. 1499dc85da15SChristoph Lameter */ 150019770b32SMel Gorman struct zonelist *zonelist; 150119770b32SMel Gorman struct zone *zone; 150219770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 150319770b32SMel Gorman zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0]; 150419770b32SMel Gorman (void)first_zones_zonelist(zonelist, highest_zoneidx, 150519770b32SMel Gorman &policy->v.nodes, 150619770b32SMel Gorman &zone); 150719770b32SMel Gorman return zone->node; 1508dd1a239fSMel Gorman } 1509dc85da15SChristoph Lameter 1510dc85da15SChristoph Lameter default: 1511bea904d5SLee Schermerhorn BUG(); 1512dc85da15SChristoph Lameter } 1513dc85da15SChristoph Lameter } 1514dc85da15SChristoph Lameter 15151da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */ 15161da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 15171da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long off) 15181da177e4SLinus Torvalds { 1519dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1520f5b087b5SDavid Rientjes unsigned target; 15211da177e4SLinus Torvalds int c; 15221da177e4SLinus Torvalds int nid = -1; 15231da177e4SLinus Torvalds 1524f5b087b5SDavid Rientjes if (!nnodes) 1525f5b087b5SDavid Rientjes return numa_node_id(); 1526f5b087b5SDavid Rientjes target = (unsigned int)off % nnodes; 15271da177e4SLinus Torvalds c = 0; 15281da177e4SLinus Torvalds do { 1529dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 15301da177e4SLinus Torvalds c++; 15311da177e4SLinus Torvalds } while (c <= target); 15321da177e4SLinus Torvalds return nid; 15331da177e4SLinus Torvalds } 15341da177e4SLinus Torvalds 15355da7ca86SChristoph Lameter /* Determine a node number for interleave */ 15365da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 15375da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 15385da7ca86SChristoph Lameter { 15395da7ca86SChristoph Lameter if (vma) { 15405da7ca86SChristoph Lameter unsigned long off; 15415da7ca86SChristoph Lameter 15423b98b087SNishanth Aravamudan /* 15433b98b087SNishanth Aravamudan * for small pages, there is no difference between 15443b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 15453b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 15463b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 15473b98b087SNishanth Aravamudan * a useful offset. 15483b98b087SNishanth Aravamudan */ 15493b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 15503b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 15515da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 15525da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 15535da7ca86SChristoph Lameter } else 15545da7ca86SChristoph Lameter return interleave_nodes(pol); 15555da7ca86SChristoph Lameter } 15565da7ca86SChristoph Lameter 155700ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1558480eccf9SLee Schermerhorn /* 1559480eccf9SLee Schermerhorn * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1560480eccf9SLee Schermerhorn * @vma = virtual memory area whose policy is sought 1561480eccf9SLee Schermerhorn * @addr = address in @vma for shared policy lookup and interleave policy 1562480eccf9SLee Schermerhorn * @gfp_flags = for requested zone 156319770b32SMel Gorman * @mpol = pointer to mempolicy pointer for reference counted mempolicy 156419770b32SMel Gorman * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask 1565480eccf9SLee Schermerhorn * 156652cd3b07SLee Schermerhorn * Returns a zonelist suitable for a huge page allocation and a pointer 156752cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 156852cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 156952cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1570480eccf9SLee Schermerhorn */ 1571396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 157219770b32SMel Gorman gfp_t gfp_flags, struct mempolicy **mpol, 157319770b32SMel Gorman nodemask_t **nodemask) 15745da7ca86SChristoph Lameter { 1575480eccf9SLee Schermerhorn struct zonelist *zl; 15765da7ca86SChristoph Lameter 157752cd3b07SLee Schermerhorn *mpol = get_vma_policy(current, vma, addr); 157819770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 15795da7ca86SChristoph Lameter 158052cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 158152cd3b07SLee Schermerhorn zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1582a5516438SAndi Kleen huge_page_shift(hstate_vma(vma))), gfp_flags); 158352cd3b07SLee Schermerhorn } else { 158452cd3b07SLee Schermerhorn zl = policy_zonelist(gfp_flags, *mpol); 158552cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 158652cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1587480eccf9SLee Schermerhorn } 1588480eccf9SLee Schermerhorn return zl; 15895da7ca86SChristoph Lameter } 159006808b08SLee Schermerhorn 159106808b08SLee Schermerhorn /* 159206808b08SLee Schermerhorn * init_nodemask_of_mempolicy 159306808b08SLee Schermerhorn * 159406808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 159506808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 159606808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 159706808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 159806808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 159906808b08SLee Schermerhorn * of non-default mempolicy. 160006808b08SLee Schermerhorn * 160106808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 160206808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 160306808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 160406808b08SLee Schermerhorn * 160506808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 160606808b08SLee Schermerhorn */ 160706808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 160806808b08SLee Schermerhorn { 160906808b08SLee Schermerhorn struct mempolicy *mempolicy; 161006808b08SLee Schermerhorn int nid; 161106808b08SLee Schermerhorn 161206808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 161306808b08SLee Schermerhorn return false; 161406808b08SLee Schermerhorn 161506808b08SLee Schermerhorn mempolicy = current->mempolicy; 161606808b08SLee Schermerhorn switch (mempolicy->mode) { 161706808b08SLee Schermerhorn case MPOL_PREFERRED: 161806808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 161906808b08SLee Schermerhorn nid = numa_node_id(); 162006808b08SLee Schermerhorn else 162106808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 162206808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 162306808b08SLee Schermerhorn break; 162406808b08SLee Schermerhorn 162506808b08SLee Schermerhorn case MPOL_BIND: 162606808b08SLee Schermerhorn /* Fall through */ 162706808b08SLee Schermerhorn case MPOL_INTERLEAVE: 162806808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 162906808b08SLee Schermerhorn break; 163006808b08SLee Schermerhorn 163106808b08SLee Schermerhorn default: 163206808b08SLee Schermerhorn BUG(); 163306808b08SLee Schermerhorn } 163406808b08SLee Schermerhorn 163506808b08SLee Schermerhorn return true; 163606808b08SLee Schermerhorn } 163700ac59adSChen, Kenneth W #endif 16385da7ca86SChristoph Lameter 16391da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 16401da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1641662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1642662f3a0bSAndi Kleen unsigned nid) 16431da177e4SLinus Torvalds { 16441da177e4SLinus Torvalds struct zonelist *zl; 16451da177e4SLinus Torvalds struct page *page; 16461da177e4SLinus Torvalds 16470e88460dSMel Gorman zl = node_zonelist(nid, gfp); 16481da177e4SLinus Torvalds page = __alloc_pages(gfp, order, zl); 1649dd1a239fSMel Gorman if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) 1650ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 16511da177e4SLinus Torvalds return page; 16521da177e4SLinus Torvalds } 16531da177e4SLinus Torvalds 16541da177e4SLinus Torvalds /** 16551da177e4SLinus Torvalds * alloc_page_vma - Allocate a page for a VMA. 16561da177e4SLinus Torvalds * 16571da177e4SLinus Torvalds * @gfp: 16581da177e4SLinus Torvalds * %GFP_USER user allocation. 16591da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 16601da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 16611da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 16621da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 16631da177e4SLinus Torvalds * 16641da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 16651da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 16661da177e4SLinus Torvalds * 16671da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 16681da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 16691da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 16701da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 16711da177e4SLinus Torvalds * all allocations for pages that will be mapped into 16721da177e4SLinus Torvalds * user space. Returns NULL when no page can be allocated. 16731da177e4SLinus Torvalds * 16741da177e4SLinus Torvalds * Should be called with the mm_sem of the vma hold. 16751da177e4SLinus Torvalds */ 16761da177e4SLinus Torvalds struct page * 1677dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) 16781da177e4SLinus Torvalds { 16796e21c8f1SChristoph Lameter struct mempolicy *pol = get_vma_policy(current, vma, addr); 1680480eccf9SLee Schermerhorn struct zonelist *zl; 16811da177e4SLinus Torvalds 168245c4745aSLee Schermerhorn if (unlikely(pol->mode == MPOL_INTERLEAVE)) { 16831da177e4SLinus Torvalds unsigned nid; 16845da7ca86SChristoph Lameter 16855da7ca86SChristoph Lameter nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 168652cd3b07SLee Schermerhorn mpol_cond_put(pol); 16871da177e4SLinus Torvalds return alloc_page_interleave(gfp, 0, nid); 16881da177e4SLinus Torvalds } 168952cd3b07SLee Schermerhorn zl = policy_zonelist(gfp, pol); 169052cd3b07SLee Schermerhorn if (unlikely(mpol_needs_cond_ref(pol))) { 1691480eccf9SLee Schermerhorn /* 169252cd3b07SLee Schermerhorn * slow path: ref counted shared policy 1693480eccf9SLee Schermerhorn */ 169419770b32SMel Gorman struct page *page = __alloc_pages_nodemask(gfp, 0, 169552cd3b07SLee Schermerhorn zl, policy_nodemask(gfp, pol)); 1696f0be3d32SLee Schermerhorn __mpol_put(pol); 1697480eccf9SLee Schermerhorn return page; 1698480eccf9SLee Schermerhorn } 1699480eccf9SLee Schermerhorn /* 1700480eccf9SLee Schermerhorn * fast path: default or task policy 1701480eccf9SLee Schermerhorn */ 170252cd3b07SLee Schermerhorn return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol)); 17031da177e4SLinus Torvalds } 17041da177e4SLinus Torvalds 17051da177e4SLinus Torvalds /** 17061da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 17071da177e4SLinus Torvalds * 17081da177e4SLinus Torvalds * @gfp: 17091da177e4SLinus Torvalds * %GFP_USER user allocation, 17101da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 17111da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 17121da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 17131da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 17141da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 17151da177e4SLinus Torvalds * 17161da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 17171da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 17181da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 17191da177e4SLinus Torvalds * 1720cf2a473cSPaul Jackson * Don't call cpuset_update_task_memory_state() unless 17211da177e4SLinus Torvalds * 1) it's ok to take cpuset_sem (can WAIT), and 17221da177e4SLinus Torvalds * 2) allocating for current task (not interrupt). 17231da177e4SLinus Torvalds */ 1724dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 17251da177e4SLinus Torvalds { 17261da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 17271da177e4SLinus Torvalds 17289b819d20SChristoph Lameter if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) 17291da177e4SLinus Torvalds pol = &default_policy; 173052cd3b07SLee Schermerhorn 173152cd3b07SLee Schermerhorn /* 173252cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 173352cd3b07SLee Schermerhorn * nor system default_policy 173452cd3b07SLee Schermerhorn */ 173545c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 17361da177e4SLinus Torvalds return alloc_page_interleave(gfp, order, interleave_nodes(pol)); 173719770b32SMel Gorman return __alloc_pages_nodemask(gfp, order, 173852cd3b07SLee Schermerhorn policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); 17391da177e4SLinus Torvalds } 17401da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 17411da177e4SLinus Torvalds 17424225399aSPaul Jackson /* 1743846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 17444225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 17454225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 17464225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 17474225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 17484225399aSPaul Jackson */ 17494225399aSPaul Jackson 1750846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 1751846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 17521da177e4SLinus Torvalds { 17531da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 17541da177e4SLinus Torvalds 17551da177e4SLinus Torvalds if (!new) 17561da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 175799ee4ca7SPaul E. McKenney rcu_read_lock(); 17584225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 17594225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 17604225399aSPaul Jackson mpol_rebind_policy(old, &mems); 17614225399aSPaul Jackson } 176299ee4ca7SPaul E. McKenney rcu_read_unlock(); 17631da177e4SLinus Torvalds *new = *old; 17641da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 17651da177e4SLinus Torvalds return new; 17661da177e4SLinus Torvalds } 17671da177e4SLinus Torvalds 176852cd3b07SLee Schermerhorn /* 176952cd3b07SLee Schermerhorn * If *frompol needs [has] an extra ref, copy *frompol to *tompol , 177052cd3b07SLee Schermerhorn * eliminate the * MPOL_F_* flags that require conditional ref and 177152cd3b07SLee Schermerhorn * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly 177252cd3b07SLee Schermerhorn * after return. Use the returned value. 177352cd3b07SLee Schermerhorn * 177452cd3b07SLee Schermerhorn * Allows use of a mempolicy for, e.g., multiple allocations with a single 177552cd3b07SLee Schermerhorn * policy lookup, even if the policy needs/has extra ref on lookup. 177652cd3b07SLee Schermerhorn * shmem_readahead needs this. 177752cd3b07SLee Schermerhorn */ 177852cd3b07SLee Schermerhorn struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, 177952cd3b07SLee Schermerhorn struct mempolicy *frompol) 178052cd3b07SLee Schermerhorn { 178152cd3b07SLee Schermerhorn if (!mpol_needs_cond_ref(frompol)) 178252cd3b07SLee Schermerhorn return frompol; 178352cd3b07SLee Schermerhorn 178452cd3b07SLee Schermerhorn *tompol = *frompol; 178552cd3b07SLee Schermerhorn tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */ 178652cd3b07SLee Schermerhorn __mpol_put(frompol); 178752cd3b07SLee Schermerhorn return tompol; 178852cd3b07SLee Schermerhorn } 178952cd3b07SLee Schermerhorn 17901da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 17911da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b) 17921da177e4SLinus Torvalds { 17931da177e4SLinus Torvalds if (!a || !b) 17941da177e4SLinus Torvalds return 0; 179545c4745aSLee Schermerhorn if (a->mode != b->mode) 17961da177e4SLinus Torvalds return 0; 179719800502SBob Liu if (a->flags != b->flags) 1798f5b087b5SDavid Rientjes return 0; 179919800502SBob Liu if (mpol_store_user_nodemask(a)) 180019800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 180119800502SBob Liu return 0; 180219800502SBob Liu 180345c4745aSLee Schermerhorn switch (a->mode) { 180419770b32SMel Gorman case MPOL_BIND: 180519770b32SMel Gorman /* Fall through */ 18061da177e4SLinus Torvalds case MPOL_INTERLEAVE: 1807dfcd3c0dSAndi Kleen return nodes_equal(a->v.nodes, b->v.nodes); 18081da177e4SLinus Torvalds case MPOL_PREFERRED: 1809fc36b8d3SLee Schermerhorn return a->v.preferred_node == b->v.preferred_node && 1810fc36b8d3SLee Schermerhorn a->flags == b->flags; 18111da177e4SLinus Torvalds default: 18121da177e4SLinus Torvalds BUG(); 18131da177e4SLinus Torvalds return 0; 18141da177e4SLinus Torvalds } 18151da177e4SLinus Torvalds } 18161da177e4SLinus Torvalds 18171da177e4SLinus Torvalds /* 18181da177e4SLinus Torvalds * Shared memory backing store policy support. 18191da177e4SLinus Torvalds * 18201da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 18211da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 18221da177e4SLinus Torvalds * They are protected by the sp->lock spinlock, which should be held 18231da177e4SLinus Torvalds * for any accesses to the tree. 18241da177e4SLinus Torvalds */ 18251da177e4SLinus Torvalds 18261da177e4SLinus Torvalds /* lookup first element intersecting start-end */ 18271da177e4SLinus Torvalds /* Caller holds sp->lock */ 18281da177e4SLinus Torvalds static struct sp_node * 18291da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 18301da177e4SLinus Torvalds { 18311da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 18321da177e4SLinus Torvalds 18331da177e4SLinus Torvalds while (n) { 18341da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 18351da177e4SLinus Torvalds 18361da177e4SLinus Torvalds if (start >= p->end) 18371da177e4SLinus Torvalds n = n->rb_right; 18381da177e4SLinus Torvalds else if (end <= p->start) 18391da177e4SLinus Torvalds n = n->rb_left; 18401da177e4SLinus Torvalds else 18411da177e4SLinus Torvalds break; 18421da177e4SLinus Torvalds } 18431da177e4SLinus Torvalds if (!n) 18441da177e4SLinus Torvalds return NULL; 18451da177e4SLinus Torvalds for (;;) { 18461da177e4SLinus Torvalds struct sp_node *w = NULL; 18471da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 18481da177e4SLinus Torvalds if (!prev) 18491da177e4SLinus Torvalds break; 18501da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 18511da177e4SLinus Torvalds if (w->end <= start) 18521da177e4SLinus Torvalds break; 18531da177e4SLinus Torvalds n = prev; 18541da177e4SLinus Torvalds } 18551da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 18561da177e4SLinus Torvalds } 18571da177e4SLinus Torvalds 18581da177e4SLinus Torvalds /* Insert a new shared policy into the list. */ 18591da177e4SLinus Torvalds /* Caller holds sp->lock */ 18601da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 18611da177e4SLinus Torvalds { 18621da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 18631da177e4SLinus Torvalds struct rb_node *parent = NULL; 18641da177e4SLinus Torvalds struct sp_node *nd; 18651da177e4SLinus Torvalds 18661da177e4SLinus Torvalds while (*p) { 18671da177e4SLinus Torvalds parent = *p; 18681da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 18691da177e4SLinus Torvalds if (new->start < nd->start) 18701da177e4SLinus Torvalds p = &(*p)->rb_left; 18711da177e4SLinus Torvalds else if (new->end > nd->end) 18721da177e4SLinus Torvalds p = &(*p)->rb_right; 18731da177e4SLinus Torvalds else 18741da177e4SLinus Torvalds BUG(); 18751da177e4SLinus Torvalds } 18761da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 18771da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 1878140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 187945c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 18801da177e4SLinus Torvalds } 18811da177e4SLinus Torvalds 18821da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 18831da177e4SLinus Torvalds struct mempolicy * 18841da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 18851da177e4SLinus Torvalds { 18861da177e4SLinus Torvalds struct mempolicy *pol = NULL; 18871da177e4SLinus Torvalds struct sp_node *sn; 18881da177e4SLinus Torvalds 18891da177e4SLinus Torvalds if (!sp->root.rb_node) 18901da177e4SLinus Torvalds return NULL; 18911da177e4SLinus Torvalds spin_lock(&sp->lock); 18921da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 18931da177e4SLinus Torvalds if (sn) { 18941da177e4SLinus Torvalds mpol_get(sn->policy); 18951da177e4SLinus Torvalds pol = sn->policy; 18961da177e4SLinus Torvalds } 18971da177e4SLinus Torvalds spin_unlock(&sp->lock); 18981da177e4SLinus Torvalds return pol; 18991da177e4SLinus Torvalds } 19001da177e4SLinus Torvalds 19011da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 19021da177e4SLinus Torvalds { 1903140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 19041da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 1905f0be3d32SLee Schermerhorn mpol_put(n->policy); 19061da177e4SLinus Torvalds kmem_cache_free(sn_cache, n); 19071da177e4SLinus Torvalds } 19081da177e4SLinus Torvalds 1909dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 1910dbcb0f19SAdrian Bunk struct mempolicy *pol) 19111da177e4SLinus Torvalds { 19121da177e4SLinus Torvalds struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 19131da177e4SLinus Torvalds 19141da177e4SLinus Torvalds if (!n) 19151da177e4SLinus Torvalds return NULL; 19161da177e4SLinus Torvalds n->start = start; 19171da177e4SLinus Torvalds n->end = end; 19181da177e4SLinus Torvalds mpol_get(pol); 1919aab0b102SLee Schermerhorn pol->flags |= MPOL_F_SHARED; /* for unref */ 19201da177e4SLinus Torvalds n->policy = pol; 19211da177e4SLinus Torvalds return n; 19221da177e4SLinus Torvalds } 19231da177e4SLinus Torvalds 19241da177e4SLinus Torvalds /* Replace a policy range. */ 19251da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 19261da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 19271da177e4SLinus Torvalds { 19281da177e4SLinus Torvalds struct sp_node *n, *new2 = NULL; 19291da177e4SLinus Torvalds 19301da177e4SLinus Torvalds restart: 19311da177e4SLinus Torvalds spin_lock(&sp->lock); 19321da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 19331da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 19341da177e4SLinus Torvalds while (n && n->start < end) { 19351da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 19361da177e4SLinus Torvalds if (n->start >= start) { 19371da177e4SLinus Torvalds if (n->end <= end) 19381da177e4SLinus Torvalds sp_delete(sp, n); 19391da177e4SLinus Torvalds else 19401da177e4SLinus Torvalds n->start = end; 19411da177e4SLinus Torvalds } else { 19421da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 19431da177e4SLinus Torvalds if (n->end > end) { 19441da177e4SLinus Torvalds if (!new2) { 19451da177e4SLinus Torvalds spin_unlock(&sp->lock); 19461da177e4SLinus Torvalds new2 = sp_alloc(end, n->end, n->policy); 19471da177e4SLinus Torvalds if (!new2) 19481da177e4SLinus Torvalds return -ENOMEM; 19491da177e4SLinus Torvalds goto restart; 19501da177e4SLinus Torvalds } 19511da177e4SLinus Torvalds n->end = start; 19521da177e4SLinus Torvalds sp_insert(sp, new2); 19531da177e4SLinus Torvalds new2 = NULL; 19541da177e4SLinus Torvalds break; 19551da177e4SLinus Torvalds } else 19561da177e4SLinus Torvalds n->end = start; 19571da177e4SLinus Torvalds } 19581da177e4SLinus Torvalds if (!next) 19591da177e4SLinus Torvalds break; 19601da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 19611da177e4SLinus Torvalds } 19621da177e4SLinus Torvalds if (new) 19631da177e4SLinus Torvalds sp_insert(sp, new); 19641da177e4SLinus Torvalds spin_unlock(&sp->lock); 19651da177e4SLinus Torvalds if (new2) { 1966f0be3d32SLee Schermerhorn mpol_put(new2->policy); 19671da177e4SLinus Torvalds kmem_cache_free(sn_cache, new2); 19681da177e4SLinus Torvalds } 19691da177e4SLinus Torvalds return 0; 19701da177e4SLinus Torvalds } 19711da177e4SLinus Torvalds 197271fe804bSLee Schermerhorn /** 197371fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 197471fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 197571fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 197671fe804bSLee Schermerhorn * 197771fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 197871fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 197971fe804bSLee Schermerhorn * This must be released on exit. 19804bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 198171fe804bSLee Schermerhorn */ 198271fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 19837339ff83SRobin Holt { 198458568d2aSMiao Xie int ret; 198558568d2aSMiao Xie 198671fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 198771fe804bSLee Schermerhorn spin_lock_init(&sp->lock); 19887339ff83SRobin Holt 198971fe804bSLee Schermerhorn if (mpol) { 19907339ff83SRobin Holt struct vm_area_struct pvma; 199171fe804bSLee Schermerhorn struct mempolicy *new; 19924bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 19937339ff83SRobin Holt 19944bfc4495SKAMEZAWA Hiroyuki if (!scratch) 19954bfc4495SKAMEZAWA Hiroyuki return; 199671fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 199771fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 199858568d2aSMiao Xie if (IS_ERR(new)) { 199971fe804bSLee Schermerhorn mpol_put(mpol); /* drop our ref on sb mpol */ 20004bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 200171fe804bSLee Schermerhorn return; /* no valid nodemask intersection */ 200258568d2aSMiao Xie } 200358568d2aSMiao Xie 200458568d2aSMiao Xie task_lock(current); 20054bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 200658568d2aSMiao Xie task_unlock(current); 200758568d2aSMiao Xie mpol_put(mpol); /* drop our ref on sb mpol */ 200858568d2aSMiao Xie if (ret) { 20094bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 201058568d2aSMiao Xie mpol_put(new); 201158568d2aSMiao Xie return; 201258568d2aSMiao Xie } 201371fe804bSLee Schermerhorn 201471fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 20157339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 201671fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 201771fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 201871fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 20194bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 20207339ff83SRobin Holt } 20217339ff83SRobin Holt } 20227339ff83SRobin Holt 20231da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 20241da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 20251da177e4SLinus Torvalds { 20261da177e4SLinus Torvalds int err; 20271da177e4SLinus Torvalds struct sp_node *new = NULL; 20281da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 20291da177e4SLinus Torvalds 2030028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 20311da177e4SLinus Torvalds vma->vm_pgoff, 203245c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2033028fec41SDavid Rientjes npol ? npol->flags : -1, 2034dfcd3c0dSAndi Kleen npol ? nodes_addr(npol->v.nodes)[0] : -1); 20351da177e4SLinus Torvalds 20361da177e4SLinus Torvalds if (npol) { 20371da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 20381da177e4SLinus Torvalds if (!new) 20391da177e4SLinus Torvalds return -ENOMEM; 20401da177e4SLinus Torvalds } 20411da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 20421da177e4SLinus Torvalds if (err && new) 20431da177e4SLinus Torvalds kmem_cache_free(sn_cache, new); 20441da177e4SLinus Torvalds return err; 20451da177e4SLinus Torvalds } 20461da177e4SLinus Torvalds 20471da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 20481da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 20491da177e4SLinus Torvalds { 20501da177e4SLinus Torvalds struct sp_node *n; 20511da177e4SLinus Torvalds struct rb_node *next; 20521da177e4SLinus Torvalds 20531da177e4SLinus Torvalds if (!p->root.rb_node) 20541da177e4SLinus Torvalds return; 20551da177e4SLinus Torvalds spin_lock(&p->lock); 20561da177e4SLinus Torvalds next = rb_first(&p->root); 20571da177e4SLinus Torvalds while (next) { 20581da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 20591da177e4SLinus Torvalds next = rb_next(&n->nd); 206090c5029eSAndi Kleen rb_erase(&n->nd, &p->root); 2061f0be3d32SLee Schermerhorn mpol_put(n->policy); 20621da177e4SLinus Torvalds kmem_cache_free(sn_cache, n); 20631da177e4SLinus Torvalds } 20641da177e4SLinus Torvalds spin_unlock(&p->lock); 20651da177e4SLinus Torvalds } 20661da177e4SLinus Torvalds 20671da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 20681da177e4SLinus Torvalds void __init numa_policy_init(void) 20691da177e4SLinus Torvalds { 2070b71636e2SPaul Mundt nodemask_t interleave_nodes; 2071b71636e2SPaul Mundt unsigned long largest = 0; 2072b71636e2SPaul Mundt int nid, prefer = 0; 2073b71636e2SPaul Mundt 20741da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 20751da177e4SLinus Torvalds sizeof(struct mempolicy), 207620c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 20771da177e4SLinus Torvalds 20781da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 20791da177e4SLinus Torvalds sizeof(struct sp_node), 208020c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 20811da177e4SLinus Torvalds 2082b71636e2SPaul Mundt /* 2083b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2084b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2085b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2086b71636e2SPaul Mundt */ 2087b71636e2SPaul Mundt nodes_clear(interleave_nodes); 208856bbd65dSChristoph Lameter for_each_node_state(nid, N_HIGH_MEMORY) { 2089b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 20901da177e4SLinus Torvalds 2091b71636e2SPaul Mundt /* Preserve the largest node */ 2092b71636e2SPaul Mundt if (largest < total_pages) { 2093b71636e2SPaul Mundt largest = total_pages; 2094b71636e2SPaul Mundt prefer = nid; 2095b71636e2SPaul Mundt } 2096b71636e2SPaul Mundt 2097b71636e2SPaul Mundt /* Interleave this node? */ 2098b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2099b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2100b71636e2SPaul Mundt } 2101b71636e2SPaul Mundt 2102b71636e2SPaul Mundt /* All too small, use the largest */ 2103b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2104b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2105b71636e2SPaul Mundt 2106028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 21071da177e4SLinus Torvalds printk("numa_policy_init: interleaving failed\n"); 21081da177e4SLinus Torvalds } 21091da177e4SLinus Torvalds 21108bccd85fSChristoph Lameter /* Reset policy of current process to default */ 21111da177e4SLinus Torvalds void numa_default_policy(void) 21121da177e4SLinus Torvalds { 2113028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 21141da177e4SLinus Torvalds } 211568860ec1SPaul Jackson 21164225399aSPaul Jackson /* 2117095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2118095f1fc4SLee Schermerhorn */ 2119095f1fc4SLee Schermerhorn 2120095f1fc4SLee Schermerhorn /* 2121fc36b8d3SLee Schermerhorn * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag 21223f226aa1SLee Schermerhorn * Used only for mpol_parse_str() and mpol_to_str() 21231a75a6c8SChristoph Lameter */ 212453f2556bSLee Schermerhorn #define MPOL_LOCAL (MPOL_INTERLEAVE + 1) 212515ad7cdcSHelge Deller static const char * const policy_types[] = 212653f2556bSLee Schermerhorn { "default", "prefer", "bind", "interleave", "local" }; 21271a75a6c8SChristoph Lameter 2128095f1fc4SLee Schermerhorn 2129095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2130095f1fc4SLee Schermerhorn /** 2131095f1fc4SLee Schermerhorn * mpol_parse_str - parse string to mempolicy 2132095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 213371fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 213471fe804bSLee Schermerhorn * @no_context: flag whether to "contextualize" the mempolicy 2135095f1fc4SLee Schermerhorn * 2136095f1fc4SLee Schermerhorn * Format of input: 2137095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2138095f1fc4SLee Schermerhorn * 213971fe804bSLee Schermerhorn * if @no_context is true, save the input nodemask in w.user_nodemask in 214071fe804bSLee Schermerhorn * the returned mempolicy. This will be used to "clone" the mempolicy in 214171fe804bSLee Schermerhorn * a specific context [cpuset] at a later time. Used to parse tmpfs mpol 214271fe804bSLee Schermerhorn * mount option. Note that if 'static' or 'relative' mode flags were 214371fe804bSLee Schermerhorn * specified, the input nodemask will already have been saved. Saving 214471fe804bSLee Schermerhorn * it again is redundant, but safe. 214571fe804bSLee Schermerhorn * 214671fe804bSLee Schermerhorn * On success, returns 0, else 1 2147095f1fc4SLee Schermerhorn */ 214871fe804bSLee Schermerhorn int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) 2149095f1fc4SLee Schermerhorn { 215071fe804bSLee Schermerhorn struct mempolicy *new = NULL; 215171fe804bSLee Schermerhorn unsigned short uninitialized_var(mode); 215271fe804bSLee Schermerhorn unsigned short uninitialized_var(mode_flags); 215371fe804bSLee Schermerhorn nodemask_t nodes; 2154095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2155095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2156095f1fc4SLee Schermerhorn int i; 2157095f1fc4SLee Schermerhorn int err = 1; 2158095f1fc4SLee Schermerhorn 2159095f1fc4SLee Schermerhorn if (nodelist) { 2160095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2161095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 216271fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2163095f1fc4SLee Schermerhorn goto out; 216471fe804bSLee Schermerhorn if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY])) 2165095f1fc4SLee Schermerhorn goto out; 216671fe804bSLee Schermerhorn } else 216771fe804bSLee Schermerhorn nodes_clear(nodes); 216871fe804bSLee Schermerhorn 2169095f1fc4SLee Schermerhorn if (flags) 2170095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2171095f1fc4SLee Schermerhorn 21723f226aa1SLee Schermerhorn for (i = 0; i <= MPOL_LOCAL; i++) { 2173095f1fc4SLee Schermerhorn if (!strcmp(str, policy_types[i])) { 217471fe804bSLee Schermerhorn mode = i; 2175095f1fc4SLee Schermerhorn break; 2176095f1fc4SLee Schermerhorn } 2177095f1fc4SLee Schermerhorn } 21783f226aa1SLee Schermerhorn if (i > MPOL_LOCAL) 2179095f1fc4SLee Schermerhorn goto out; 2180095f1fc4SLee Schermerhorn 218171fe804bSLee Schermerhorn switch (mode) { 2182095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 218371fe804bSLee Schermerhorn /* 218471fe804bSLee Schermerhorn * Insist on a nodelist of one node only 218571fe804bSLee Schermerhorn */ 2186095f1fc4SLee Schermerhorn if (nodelist) { 2187095f1fc4SLee Schermerhorn char *rest = nodelist; 2188095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2189095f1fc4SLee Schermerhorn rest++; 2190926f2ae0SKOSAKI Motohiro if (*rest) 2191926f2ae0SKOSAKI Motohiro goto out; 2192095f1fc4SLee Schermerhorn } 2193095f1fc4SLee Schermerhorn break; 2194095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2195095f1fc4SLee Schermerhorn /* 2196095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2197095f1fc4SLee Schermerhorn */ 2198095f1fc4SLee Schermerhorn if (!nodelist) 219971fe804bSLee Schermerhorn nodes = node_states[N_HIGH_MEMORY]; 22003f226aa1SLee Schermerhorn break; 220171fe804bSLee Schermerhorn case MPOL_LOCAL: 22023f226aa1SLee Schermerhorn /* 220371fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 22043f226aa1SLee Schermerhorn */ 220571fe804bSLee Schermerhorn if (nodelist) 22063f226aa1SLee Schermerhorn goto out; 220771fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 22083f226aa1SLee Schermerhorn break; 2209413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2210413b43deSRavikiran G Thirumalai /* 2211413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2212413b43deSRavikiran G Thirumalai */ 2213413b43deSRavikiran G Thirumalai if (!nodelist) 2214413b43deSRavikiran G Thirumalai err = 0; 2215413b43deSRavikiran G Thirumalai goto out; 2216d69b2e63SKOSAKI Motohiro case MPOL_BIND: 221771fe804bSLee Schermerhorn /* 2218d69b2e63SKOSAKI Motohiro * Insist on a nodelist 221971fe804bSLee Schermerhorn */ 2220d69b2e63SKOSAKI Motohiro if (!nodelist) 2221d69b2e63SKOSAKI Motohiro goto out; 2222095f1fc4SLee Schermerhorn } 2223095f1fc4SLee Schermerhorn 222471fe804bSLee Schermerhorn mode_flags = 0; 2225095f1fc4SLee Schermerhorn if (flags) { 2226095f1fc4SLee Schermerhorn /* 2227095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2228095f1fc4SLee Schermerhorn * mode flags. 2229095f1fc4SLee Schermerhorn */ 2230095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 223171fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2232095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 223371fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2234095f1fc4SLee Schermerhorn else 2235926f2ae0SKOSAKI Motohiro goto out; 2236095f1fc4SLee Schermerhorn } 223771fe804bSLee Schermerhorn 223871fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 223971fe804bSLee Schermerhorn if (IS_ERR(new)) 2240926f2ae0SKOSAKI Motohiro goto out; 2241926f2ae0SKOSAKI Motohiro 2242*e17f74afSLee Schermerhorn if (no_context) { 2243*e17f74afSLee Schermerhorn /* save for contextualization */ 2244*e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2245*e17f74afSLee Schermerhorn } else { 224658568d2aSMiao Xie int ret; 22474bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 22484bfc4495SKAMEZAWA Hiroyuki if (scratch) { 224958568d2aSMiao Xie task_lock(current); 22504bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &nodes, scratch); 225158568d2aSMiao Xie task_unlock(current); 22524bfc4495SKAMEZAWA Hiroyuki } else 22534bfc4495SKAMEZAWA Hiroyuki ret = -ENOMEM; 22544bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 22554bfc4495SKAMEZAWA Hiroyuki if (ret) { 22564bfc4495SKAMEZAWA Hiroyuki mpol_put(new); 2257926f2ae0SKOSAKI Motohiro goto out; 2258926f2ae0SKOSAKI Motohiro } 2259926f2ae0SKOSAKI Motohiro } 2260926f2ae0SKOSAKI Motohiro err = 0; 226171fe804bSLee Schermerhorn 2262095f1fc4SLee Schermerhorn out: 2263095f1fc4SLee Schermerhorn /* Restore string for error message */ 2264095f1fc4SLee Schermerhorn if (nodelist) 2265095f1fc4SLee Schermerhorn *--nodelist = ':'; 2266095f1fc4SLee Schermerhorn if (flags) 2267095f1fc4SLee Schermerhorn *--flags = '='; 226871fe804bSLee Schermerhorn if (!err) 226971fe804bSLee Schermerhorn *mpol = new; 2270095f1fc4SLee Schermerhorn return err; 2271095f1fc4SLee Schermerhorn } 2272095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2273095f1fc4SLee Schermerhorn 227471fe804bSLee Schermerhorn /** 227571fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 227671fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 227771fe804bSLee Schermerhorn * @maxlen: length of @buffer 227871fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 227971fe804bSLee Schermerhorn * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask 228071fe804bSLee Schermerhorn * 22811a75a6c8SChristoph Lameter * Convert a mempolicy into a string. 22821a75a6c8SChristoph Lameter * Returns the number of characters in buffer (if positive) 22831a75a6c8SChristoph Lameter * or an error (negative) 22841a75a6c8SChristoph Lameter */ 228571fe804bSLee Schermerhorn int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) 22861a75a6c8SChristoph Lameter { 22871a75a6c8SChristoph Lameter char *p = buffer; 22881a75a6c8SChristoph Lameter int l; 22891a75a6c8SChristoph Lameter nodemask_t nodes; 2290bea904d5SLee Schermerhorn unsigned short mode; 2291f5b087b5SDavid Rientjes unsigned short flags = pol ? pol->flags : 0; 22921a75a6c8SChristoph Lameter 22932291990aSLee Schermerhorn /* 22942291990aSLee Schermerhorn * Sanity check: room for longest mode, flag and some nodes 22952291990aSLee Schermerhorn */ 22962291990aSLee Schermerhorn VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16); 22972291990aSLee Schermerhorn 2298bea904d5SLee Schermerhorn if (!pol || pol == &default_policy) 2299bea904d5SLee Schermerhorn mode = MPOL_DEFAULT; 2300bea904d5SLee Schermerhorn else 2301bea904d5SLee Schermerhorn mode = pol->mode; 2302bea904d5SLee Schermerhorn 23031a75a6c8SChristoph Lameter switch (mode) { 23041a75a6c8SChristoph Lameter case MPOL_DEFAULT: 23051a75a6c8SChristoph Lameter nodes_clear(nodes); 23061a75a6c8SChristoph Lameter break; 23071a75a6c8SChristoph Lameter 23081a75a6c8SChristoph Lameter case MPOL_PREFERRED: 23091a75a6c8SChristoph Lameter nodes_clear(nodes); 2310fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 231153f2556bSLee Schermerhorn mode = MPOL_LOCAL; /* pseudo-policy */ 231253f2556bSLee Schermerhorn else 2313fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 23141a75a6c8SChristoph Lameter break; 23151a75a6c8SChristoph Lameter 23161a75a6c8SChristoph Lameter case MPOL_BIND: 231719770b32SMel Gorman /* Fall through */ 23181a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 231971fe804bSLee Schermerhorn if (no_context) 232071fe804bSLee Schermerhorn nodes = pol->w.user_nodemask; 232171fe804bSLee Schermerhorn else 23221a75a6c8SChristoph Lameter nodes = pol->v.nodes; 23231a75a6c8SChristoph Lameter break; 23241a75a6c8SChristoph Lameter 23251a75a6c8SChristoph Lameter default: 23261a75a6c8SChristoph Lameter BUG(); 23271a75a6c8SChristoph Lameter } 23281a75a6c8SChristoph Lameter 23291a75a6c8SChristoph Lameter l = strlen(policy_types[mode]); 23301a75a6c8SChristoph Lameter if (buffer + maxlen < p + l + 1) 23311a75a6c8SChristoph Lameter return -ENOSPC; 23321a75a6c8SChristoph Lameter 23331a75a6c8SChristoph Lameter strcpy(p, policy_types[mode]); 23341a75a6c8SChristoph Lameter p += l; 23351a75a6c8SChristoph Lameter 2336fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2337f5b087b5SDavid Rientjes if (buffer + maxlen < p + 2) 2338f5b087b5SDavid Rientjes return -ENOSPC; 2339f5b087b5SDavid Rientjes *p++ = '='; 2340f5b087b5SDavid Rientjes 23412291990aSLee Schermerhorn /* 23422291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 23432291990aSLee Schermerhorn */ 2344f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 23452291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 23462291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 23472291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2348f5b087b5SDavid Rientjes } 2349f5b087b5SDavid Rientjes 23501a75a6c8SChristoph Lameter if (!nodes_empty(nodes)) { 23511a75a6c8SChristoph Lameter if (buffer + maxlen < p + 2) 23521a75a6c8SChristoph Lameter return -ENOSPC; 2353095f1fc4SLee Schermerhorn *p++ = ':'; 23541a75a6c8SChristoph Lameter p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); 23551a75a6c8SChristoph Lameter } 23561a75a6c8SChristoph Lameter return p - buffer; 23571a75a6c8SChristoph Lameter } 23581a75a6c8SChristoph Lameter 23591a75a6c8SChristoph Lameter struct numa_maps { 23601a75a6c8SChristoph Lameter unsigned long pages; 23611a75a6c8SChristoph Lameter unsigned long anon; 2362397874dfSChristoph Lameter unsigned long active; 2363397874dfSChristoph Lameter unsigned long writeback; 23641a75a6c8SChristoph Lameter unsigned long mapcount_max; 2365397874dfSChristoph Lameter unsigned long dirty; 2366397874dfSChristoph Lameter unsigned long swapcache; 23671a75a6c8SChristoph Lameter unsigned long node[MAX_NUMNODES]; 23681a75a6c8SChristoph Lameter }; 23691a75a6c8SChristoph Lameter 2370397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty) 23711a75a6c8SChristoph Lameter { 23721a75a6c8SChristoph Lameter struct numa_maps *md = private; 23731a75a6c8SChristoph Lameter int count = page_mapcount(page); 23741a75a6c8SChristoph Lameter 23751a75a6c8SChristoph Lameter md->pages++; 2376397874dfSChristoph Lameter if (pte_dirty || PageDirty(page)) 2377397874dfSChristoph Lameter md->dirty++; 2378397874dfSChristoph Lameter 2379397874dfSChristoph Lameter if (PageSwapCache(page)) 2380397874dfSChristoph Lameter md->swapcache++; 2381397874dfSChristoph Lameter 2382894bc310SLee Schermerhorn if (PageActive(page) || PageUnevictable(page)) 2383397874dfSChristoph Lameter md->active++; 2384397874dfSChristoph Lameter 2385397874dfSChristoph Lameter if (PageWriteback(page)) 2386397874dfSChristoph Lameter md->writeback++; 23871a75a6c8SChristoph Lameter 23881a75a6c8SChristoph Lameter if (PageAnon(page)) 23891a75a6c8SChristoph Lameter md->anon++; 23901a75a6c8SChristoph Lameter 2391397874dfSChristoph Lameter if (count > md->mapcount_max) 2392397874dfSChristoph Lameter md->mapcount_max = count; 2393397874dfSChristoph Lameter 23941a75a6c8SChristoph Lameter md->node[page_to_nid(page)]++; 23951a75a6c8SChristoph Lameter } 23961a75a6c8SChristoph Lameter 23977f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE 2398397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma, 2399397874dfSChristoph Lameter unsigned long start, unsigned long end, 2400397874dfSChristoph Lameter struct numa_maps *md) 2401397874dfSChristoph Lameter { 2402397874dfSChristoph Lameter unsigned long addr; 2403397874dfSChristoph Lameter struct page *page; 2404a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2405a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 2406397874dfSChristoph Lameter 2407a5516438SAndi Kleen for (addr = start; addr < end; addr += sz) { 2408a5516438SAndi Kleen pte_t *ptep = huge_pte_offset(vma->vm_mm, 2409a5516438SAndi Kleen addr & huge_page_mask(h)); 2410397874dfSChristoph Lameter pte_t pte; 2411397874dfSChristoph Lameter 2412397874dfSChristoph Lameter if (!ptep) 2413397874dfSChristoph Lameter continue; 2414397874dfSChristoph Lameter 2415397874dfSChristoph Lameter pte = *ptep; 2416397874dfSChristoph Lameter if (pte_none(pte)) 2417397874dfSChristoph Lameter continue; 2418397874dfSChristoph Lameter 2419397874dfSChristoph Lameter page = pte_page(pte); 2420397874dfSChristoph Lameter if (!page) 2421397874dfSChristoph Lameter continue; 2422397874dfSChristoph Lameter 2423397874dfSChristoph Lameter gather_stats(page, md, pte_dirty(*ptep)); 2424397874dfSChristoph Lameter } 2425397874dfSChristoph Lameter } 24267f709ed0SAndrew Morton #else 24277f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma, 24287f709ed0SAndrew Morton unsigned long start, unsigned long end, 24297f709ed0SAndrew Morton struct numa_maps *md) 24307f709ed0SAndrew Morton { 24317f709ed0SAndrew Morton } 24327f709ed0SAndrew Morton #endif 2433397874dfSChristoph Lameter 243453f2556bSLee Schermerhorn /* 243553f2556bSLee Schermerhorn * Display pages allocated per node and memory policy via /proc. 243653f2556bSLee Schermerhorn */ 24371a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v) 24381a75a6c8SChristoph Lameter { 243999f89551SEric W. Biederman struct proc_maps_private *priv = m->private; 24401a75a6c8SChristoph Lameter struct vm_area_struct *vma = v; 24411a75a6c8SChristoph Lameter struct numa_maps *md; 2442397874dfSChristoph Lameter struct file *file = vma->vm_file; 2443397874dfSChristoph Lameter struct mm_struct *mm = vma->vm_mm; 2444480eccf9SLee Schermerhorn struct mempolicy *pol; 24451a75a6c8SChristoph Lameter int n; 24461a75a6c8SChristoph Lameter char buffer[50]; 24471a75a6c8SChristoph Lameter 2448397874dfSChristoph Lameter if (!mm) 24491a75a6c8SChristoph Lameter return 0; 24501a75a6c8SChristoph Lameter 24511a75a6c8SChristoph Lameter md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); 24521a75a6c8SChristoph Lameter if (!md) 24531a75a6c8SChristoph Lameter return 0; 24541a75a6c8SChristoph Lameter 2455480eccf9SLee Schermerhorn pol = get_vma_policy(priv->task, vma, vma->vm_start); 245671fe804bSLee Schermerhorn mpol_to_str(buffer, sizeof(buffer), pol, 0); 245752cd3b07SLee Schermerhorn mpol_cond_put(pol); 24581a75a6c8SChristoph Lameter 2459397874dfSChristoph Lameter seq_printf(m, "%08lx %s", vma->vm_start, buffer); 2460397874dfSChristoph Lameter 2461397874dfSChristoph Lameter if (file) { 2462397874dfSChristoph Lameter seq_printf(m, " file="); 2463c32c2f63SJan Blunck seq_path(m, &file->f_path, "\n\t= "); 2464397874dfSChristoph Lameter } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 2465397874dfSChristoph Lameter seq_printf(m, " heap"); 2466397874dfSChristoph Lameter } else if (vma->vm_start <= mm->start_stack && 2467397874dfSChristoph Lameter vma->vm_end >= mm->start_stack) { 2468397874dfSChristoph Lameter seq_printf(m, " stack"); 2469397874dfSChristoph Lameter } 2470397874dfSChristoph Lameter 2471397874dfSChristoph Lameter if (is_vm_hugetlb_page(vma)) { 2472397874dfSChristoph Lameter check_huge_range(vma, vma->vm_start, vma->vm_end, md); 2473397874dfSChristoph Lameter seq_printf(m, " huge"); 2474397874dfSChristoph Lameter } else { 2475397874dfSChristoph Lameter check_pgd_range(vma, vma->vm_start, vma->vm_end, 247656bbd65dSChristoph Lameter &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md); 2477397874dfSChristoph Lameter } 2478397874dfSChristoph Lameter 2479397874dfSChristoph Lameter if (!md->pages) 2480397874dfSChristoph Lameter goto out; 24811a75a6c8SChristoph Lameter 24821a75a6c8SChristoph Lameter if (md->anon) 24831a75a6c8SChristoph Lameter seq_printf(m," anon=%lu",md->anon); 24841a75a6c8SChristoph Lameter 2485397874dfSChristoph Lameter if (md->dirty) 2486397874dfSChristoph Lameter seq_printf(m," dirty=%lu",md->dirty); 2487397874dfSChristoph Lameter 2488397874dfSChristoph Lameter if (md->pages != md->anon && md->pages != md->dirty) 2489397874dfSChristoph Lameter seq_printf(m, " mapped=%lu", md->pages); 2490397874dfSChristoph Lameter 2491397874dfSChristoph Lameter if (md->mapcount_max > 1) 2492397874dfSChristoph Lameter seq_printf(m, " mapmax=%lu", md->mapcount_max); 2493397874dfSChristoph Lameter 2494397874dfSChristoph Lameter if (md->swapcache) 2495397874dfSChristoph Lameter seq_printf(m," swapcache=%lu", md->swapcache); 2496397874dfSChristoph Lameter 2497397874dfSChristoph Lameter if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 2498397874dfSChristoph Lameter seq_printf(m," active=%lu", md->active); 2499397874dfSChristoph Lameter 2500397874dfSChristoph Lameter if (md->writeback) 2501397874dfSChristoph Lameter seq_printf(m," writeback=%lu", md->writeback); 2502397874dfSChristoph Lameter 250356bbd65dSChristoph Lameter for_each_node_state(n, N_HIGH_MEMORY) 25041a75a6c8SChristoph Lameter if (md->node[n]) 25051a75a6c8SChristoph Lameter seq_printf(m, " N%d=%lu", n, md->node[n]); 2506397874dfSChristoph Lameter out: 25071a75a6c8SChristoph Lameter seq_putc(m, '\n'); 25081a75a6c8SChristoph Lameter kfree(md); 25091a75a6c8SChristoph Lameter 25101a75a6c8SChristoph Lameter if (m->count < m->size) 251199f89551SEric W. Biederman m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; 25121a75a6c8SChristoph Lameter return 0; 25131a75a6c8SChristoph Lameter } 2514