11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 291da177e4SLinus Torvalds * As a special case node -1 here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 681da177e4SLinus Torvalds #include <linux/mempolicy.h> 691da177e4SLinus Torvalds #include <linux/mm.h> 701da177e4SLinus Torvalds #include <linux/highmem.h> 711da177e4SLinus Torvalds #include <linux/hugetlb.h> 721da177e4SLinus Torvalds #include <linux/kernel.h> 731da177e4SLinus Torvalds #include <linux/sched.h> 741da177e4SLinus Torvalds #include <linux/nodemask.h> 751da177e4SLinus Torvalds #include <linux/cpuset.h> 761da177e4SLinus Torvalds #include <linux/gfp.h> 771da177e4SLinus Torvalds #include <linux/slab.h> 781da177e4SLinus Torvalds #include <linux/string.h> 791da177e4SLinus Torvalds #include <linux/module.h> 80b488893aSPavel Emelyanov #include <linux/nsproxy.h> 811da177e4SLinus Torvalds #include <linux/interrupt.h> 821da177e4SLinus Torvalds #include <linux/init.h> 831da177e4SLinus Torvalds #include <linux/compat.h> 84dc9aa5b9SChristoph Lameter #include <linux/swap.h> 851a75a6c8SChristoph Lameter #include <linux/seq_file.h> 861a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 87b20a3503SChristoph Lameter #include <linux/migrate.h> 8895a402c3SChristoph Lameter #include <linux/rmap.h> 8986c3a764SDavid Quigley #include <linux/security.h> 90dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 91095f1fc4SLee Schermerhorn #include <linux/ctype.h> 92dc9aa5b9SChristoph Lameter 931da177e4SLinus Torvalds #include <asm/tlbflush.h> 941da177e4SLinus Torvalds #include <asm/uaccess.h> 951da177e4SLinus Torvalds 9662695a84SNick Piggin #include "internal.h" 9762695a84SNick Piggin 9838e35860SChristoph Lameter /* Internal flags */ 99dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 1011a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ 102dc9aa5b9SChristoph Lameter 103fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 104fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1051da177e4SLinus Torvalds 1061da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1071da177e4SLinus Torvalds policied. */ 1086267276fSChristoph Lameter enum zone_type policy_zone = 0; 1091da177e4SLinus Torvalds 110bea904d5SLee Schermerhorn /* 111bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 112bea904d5SLee Schermerhorn */ 113d42c6997SAndi Kleen struct mempolicy default_policy = { 1141da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 115bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 116fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1171da177e4SLinus Torvalds }; 1181da177e4SLinus Torvalds 11937012946SDavid Rientjes static const struct mempolicy_operations { 12037012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 12137012946SDavid Rientjes void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 12237012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 12337012946SDavid Rientjes 12419770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */ 12537012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask) 1261da177e4SLinus Torvalds { 12719770b32SMel Gorman int nd, k; 1281da177e4SLinus Torvalds 12919770b32SMel Gorman /* Check that there is something useful in this mask */ 13019770b32SMel Gorman k = policy_zone; 13119770b32SMel Gorman 13219770b32SMel Gorman for_each_node_mask(nd, *nodemask) { 13319770b32SMel Gorman struct zone *z; 13419770b32SMel Gorman 13519770b32SMel Gorman for (k = 0; k <= policy_zone; k++) { 13619770b32SMel Gorman z = &NODE_DATA(nd)->node_zones[k]; 137dd942ae3SAndi Kleen if (z->present_pages > 0) 13819770b32SMel Gorman return 1; 139dd942ae3SAndi Kleen } 140dd942ae3SAndi Kleen } 14119770b32SMel Gorman 14219770b32SMel Gorman return 0; 1431da177e4SLinus Torvalds } 1441da177e4SLinus Torvalds 145f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 146f5b087b5SDavid Rientjes { 1474c50bc01SDavid Rientjes return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES); 1484c50bc01SDavid Rientjes } 1494c50bc01SDavid Rientjes 1504c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1514c50bc01SDavid Rientjes const nodemask_t *rel) 1524c50bc01SDavid Rientjes { 1534c50bc01SDavid Rientjes nodemask_t tmp; 1544c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1554c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 156f5b087b5SDavid Rientjes } 157f5b087b5SDavid Rientjes 15837012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 15937012946SDavid Rientjes { 16037012946SDavid Rientjes if (nodes_empty(*nodes)) 16137012946SDavid Rientjes return -EINVAL; 16237012946SDavid Rientjes pol->v.nodes = *nodes; 16337012946SDavid Rientjes return 0; 16437012946SDavid Rientjes } 16537012946SDavid Rientjes 16637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 16737012946SDavid Rientjes { 16837012946SDavid Rientjes if (!nodes) 169fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 17037012946SDavid Rientjes else if (nodes_empty(*nodes)) 17137012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 17237012946SDavid Rientjes else 17337012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 17437012946SDavid Rientjes return 0; 17537012946SDavid Rientjes } 17637012946SDavid Rientjes 17737012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 17837012946SDavid Rientjes { 17937012946SDavid Rientjes if (!is_valid_nodemask(nodes)) 18037012946SDavid Rientjes return -EINVAL; 18137012946SDavid Rientjes pol->v.nodes = *nodes; 18237012946SDavid Rientjes return 0; 18337012946SDavid Rientjes } 18437012946SDavid Rientjes 1851da177e4SLinus Torvalds /* Create a new policy */ 186028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 187028fec41SDavid Rientjes nodemask_t *nodes) 1881da177e4SLinus Torvalds { 1891da177e4SLinus Torvalds struct mempolicy *policy; 190f5b087b5SDavid Rientjes nodemask_t cpuset_context_nmask; 19137012946SDavid Rientjes int ret; 1921da177e4SLinus Torvalds 193028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 194028fec41SDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); 195140d5a49SPaul Mundt 1963e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 1973e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 19837012946SDavid Rientjes return ERR_PTR(-EINVAL); 199bea904d5SLee Schermerhorn return NULL; /* simply delete any existing policy */ 20037012946SDavid Rientjes } 2013e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2023e1f0645SDavid Rientjes 2033e1f0645SDavid Rientjes /* 2043e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2053e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2063e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2073e1f0645SDavid Rientjes */ 2083e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2093e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2103e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2113e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2123e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2133e1f0645SDavid Rientjes nodes = NULL; /* flag local alloc */ 2143e1f0645SDavid Rientjes } 2153e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2163e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2171da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2181da177e4SLinus Torvalds if (!policy) 2191da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2201da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 22145c4745aSLee Schermerhorn policy->mode = mode; 22237012946SDavid Rientjes policy->flags = flags; 2233e1f0645SDavid Rientjes 2243e1f0645SDavid Rientjes if (nodes) { 2253e1f0645SDavid Rientjes /* 2263e1f0645SDavid Rientjes * cpuset related setup doesn't apply to local allocation 2273e1f0645SDavid Rientjes */ 228f5b087b5SDavid Rientjes cpuset_update_task_memory_state(); 2294c50bc01SDavid Rientjes if (flags & MPOL_F_RELATIVE_NODES) 2304c50bc01SDavid Rientjes mpol_relative_nodemask(&cpuset_context_nmask, nodes, 2314c50bc01SDavid Rientjes &cpuset_current_mems_allowed); 2324c50bc01SDavid Rientjes else 2334c50bc01SDavid Rientjes nodes_and(cpuset_context_nmask, *nodes, 2344c50bc01SDavid Rientjes cpuset_current_mems_allowed); 235f5b087b5SDavid Rientjes if (mpol_store_user_nodemask(policy)) 236f5b087b5SDavid Rientjes policy->w.user_nodemask = *nodes; 237f5b087b5SDavid Rientjes else 23837012946SDavid Rientjes policy->w.cpuset_mems_allowed = 23937012946SDavid Rientjes cpuset_mems_allowed(current); 2401da177e4SLinus Torvalds } 2411da177e4SLinus Torvalds 24237012946SDavid Rientjes ret = mpol_ops[mode].create(policy, 2433e1f0645SDavid Rientjes nodes ? &cpuset_context_nmask : NULL); 24437012946SDavid Rientjes if (ret < 0) { 24537012946SDavid Rientjes kmem_cache_free(policy_cache, policy); 24637012946SDavid Rientjes return ERR_PTR(ret); 24737012946SDavid Rientjes } 24837012946SDavid Rientjes return policy; 24937012946SDavid Rientjes } 25037012946SDavid Rientjes 25152cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 25252cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 25352cd3b07SLee Schermerhorn { 25452cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 25552cd3b07SLee Schermerhorn return; 25652cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 25752cd3b07SLee Schermerhorn } 25852cd3b07SLee Schermerhorn 25937012946SDavid Rientjes static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 26037012946SDavid Rientjes { 26137012946SDavid Rientjes } 26237012946SDavid Rientjes 26337012946SDavid Rientjes static void mpol_rebind_nodemask(struct mempolicy *pol, 26437012946SDavid Rientjes const nodemask_t *nodes) 2651d0d2680SDavid Rientjes { 2661d0d2680SDavid Rientjes nodemask_t tmp; 2671d0d2680SDavid Rientjes 26837012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 26937012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 27037012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 27137012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 2721d0d2680SDavid Rientjes else { 27337012946SDavid Rientjes nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, 27437012946SDavid Rientjes *nodes); 27537012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 2761d0d2680SDavid Rientjes } 27737012946SDavid Rientjes 2781d0d2680SDavid Rientjes pol->v.nodes = tmp; 2791d0d2680SDavid Rientjes if (!node_isset(current->il_next, tmp)) { 2801d0d2680SDavid Rientjes current->il_next = next_node(current->il_next, tmp); 2811d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 2821d0d2680SDavid Rientjes current->il_next = first_node(tmp); 2831d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 2841d0d2680SDavid Rientjes current->il_next = numa_node_id(); 2851d0d2680SDavid Rientjes } 28637012946SDavid Rientjes } 28737012946SDavid Rientjes 28837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 28937012946SDavid Rientjes const nodemask_t *nodes) 29037012946SDavid Rientjes { 29137012946SDavid Rientjes nodemask_t tmp; 29237012946SDavid Rientjes 29337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 2941d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 2951d0d2680SDavid Rientjes 296fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 2971d0d2680SDavid Rientjes pol->v.preferred_node = node; 298fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 299fc36b8d3SLee Schermerhorn } else 300fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 30137012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 30237012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3031d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 304fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3051d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 30637012946SDavid Rientjes pol->w.cpuset_mems_allowed, 30737012946SDavid Rientjes *nodes); 30837012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3091d0d2680SDavid Rientjes } 3101d0d2680SDavid Rientjes } 31137012946SDavid Rientjes 31237012946SDavid Rientjes /* Migrate a policy to a different set of nodes */ 31337012946SDavid Rientjes static void mpol_rebind_policy(struct mempolicy *pol, 31437012946SDavid Rientjes const nodemask_t *newmask) 31537012946SDavid Rientjes { 31637012946SDavid Rientjes if (!pol) 31737012946SDavid Rientjes return; 31837012946SDavid Rientjes if (!mpol_store_user_nodemask(pol) && 31937012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 32037012946SDavid Rientjes return; 32145c4745aSLee Schermerhorn mpol_ops[pol->mode].rebind(pol, newmask); 3221d0d2680SDavid Rientjes } 3231d0d2680SDavid Rientjes 3241d0d2680SDavid Rientjes /* 3251d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3261d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 3271d0d2680SDavid Rientjes */ 3281d0d2680SDavid Rientjes 3291d0d2680SDavid Rientjes void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3301d0d2680SDavid Rientjes { 3311d0d2680SDavid Rientjes mpol_rebind_policy(tsk->mempolicy, new); 3321d0d2680SDavid Rientjes } 3331d0d2680SDavid Rientjes 3341d0d2680SDavid Rientjes /* 3351d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3361d0d2680SDavid Rientjes * 3371d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 3381d0d2680SDavid Rientjes */ 3391d0d2680SDavid Rientjes 3401d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3411d0d2680SDavid Rientjes { 3421d0d2680SDavid Rientjes struct vm_area_struct *vma; 3431d0d2680SDavid Rientjes 3441d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 3451d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 3461d0d2680SDavid Rientjes mpol_rebind_policy(vma->vm_policy, new); 3471d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 3481d0d2680SDavid Rientjes } 3491d0d2680SDavid Rientjes 35037012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 35137012946SDavid Rientjes [MPOL_DEFAULT] = { 35237012946SDavid Rientjes .rebind = mpol_rebind_default, 35337012946SDavid Rientjes }, 35437012946SDavid Rientjes [MPOL_INTERLEAVE] = { 35537012946SDavid Rientjes .create = mpol_new_interleave, 35637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 35737012946SDavid Rientjes }, 35837012946SDavid Rientjes [MPOL_PREFERRED] = { 35937012946SDavid Rientjes .create = mpol_new_preferred, 36037012946SDavid Rientjes .rebind = mpol_rebind_preferred, 36137012946SDavid Rientjes }, 36237012946SDavid Rientjes [MPOL_BIND] = { 36337012946SDavid Rientjes .create = mpol_new_bind, 36437012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 36537012946SDavid Rientjes }, 36637012946SDavid Rientjes }; 36737012946SDavid Rientjes 368397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty); 369fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 370fc301289SChristoph Lameter unsigned long flags); 3711a75a6c8SChristoph Lameter 37238e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */ 373b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 374dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 375dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 37638e35860SChristoph Lameter void *private) 3771da177e4SLinus Torvalds { 37891612e0dSHugh Dickins pte_t *orig_pte; 37991612e0dSHugh Dickins pte_t *pte; 380705e87c0SHugh Dickins spinlock_t *ptl; 381941150a3SHugh Dickins 382705e87c0SHugh Dickins orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 38391612e0dSHugh Dickins do { 3846aab341eSLinus Torvalds struct page *page; 38525ba77c1SAndy Whitcroft int nid; 38691612e0dSHugh Dickins 38791612e0dSHugh Dickins if (!pte_present(*pte)) 38891612e0dSHugh Dickins continue; 3896aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 3906aab341eSLinus Torvalds if (!page) 39191612e0dSHugh Dickins continue; 392053837fcSNick Piggin /* 393053837fcSNick Piggin * The check for PageReserved here is important to avoid 394053837fcSNick Piggin * handling zero pages and other pages that may have been 395053837fcSNick Piggin * marked special by the system. 396053837fcSNick Piggin * 397053837fcSNick Piggin * If the PageReserved would not be checked here then f.e. 398053837fcSNick Piggin * the location of the zero page could have an influence 399053837fcSNick Piggin * on MPOL_MF_STRICT, zero pages would be counted for 400053837fcSNick Piggin * the per node stats, and there would be useless attempts 401053837fcSNick Piggin * to put zero pages on the migration list. 402053837fcSNick Piggin */ 403f4598c8bSChristoph Lameter if (PageReserved(page)) 404f4598c8bSChristoph Lameter continue; 4056aab341eSLinus Torvalds nid = page_to_nid(page); 40638e35860SChristoph Lameter if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 40738e35860SChristoph Lameter continue; 40838e35860SChristoph Lameter 4091a75a6c8SChristoph Lameter if (flags & MPOL_MF_STATS) 410397874dfSChristoph Lameter gather_stats(page, private, pte_dirty(*pte)); 411053837fcSNick Piggin else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 412fc301289SChristoph Lameter migrate_page_add(page, private, flags); 413dc9aa5b9SChristoph Lameter else 4141da177e4SLinus Torvalds break; 41591612e0dSHugh Dickins } while (pte++, addr += PAGE_SIZE, addr != end); 416705e87c0SHugh Dickins pte_unmap_unlock(orig_pte, ptl); 41791612e0dSHugh Dickins return addr != end; 41891612e0dSHugh Dickins } 41991612e0dSHugh Dickins 420b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, 421dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 422dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 42338e35860SChristoph Lameter void *private) 42491612e0dSHugh Dickins { 42591612e0dSHugh Dickins pmd_t *pmd; 42691612e0dSHugh Dickins unsigned long next; 42791612e0dSHugh Dickins 42891612e0dSHugh Dickins pmd = pmd_offset(pud, addr); 42991612e0dSHugh Dickins do { 43091612e0dSHugh Dickins next = pmd_addr_end(addr, end); 43191612e0dSHugh Dickins if (pmd_none_or_clear_bad(pmd)) 43291612e0dSHugh Dickins continue; 433dc9aa5b9SChristoph Lameter if (check_pte_range(vma, pmd, addr, next, nodes, 43438e35860SChristoph Lameter flags, private)) 43591612e0dSHugh Dickins return -EIO; 43691612e0dSHugh Dickins } while (pmd++, addr = next, addr != end); 43791612e0dSHugh Dickins return 0; 43891612e0dSHugh Dickins } 43991612e0dSHugh Dickins 440b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 441dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 442dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 44338e35860SChristoph Lameter void *private) 44491612e0dSHugh Dickins { 44591612e0dSHugh Dickins pud_t *pud; 44691612e0dSHugh Dickins unsigned long next; 44791612e0dSHugh Dickins 44891612e0dSHugh Dickins pud = pud_offset(pgd, addr); 44991612e0dSHugh Dickins do { 45091612e0dSHugh Dickins next = pud_addr_end(addr, end); 45191612e0dSHugh Dickins if (pud_none_or_clear_bad(pud)) 45291612e0dSHugh Dickins continue; 453dc9aa5b9SChristoph Lameter if (check_pmd_range(vma, pud, addr, next, nodes, 45438e35860SChristoph Lameter flags, private)) 45591612e0dSHugh Dickins return -EIO; 45691612e0dSHugh Dickins } while (pud++, addr = next, addr != end); 45791612e0dSHugh Dickins return 0; 45891612e0dSHugh Dickins } 45991612e0dSHugh Dickins 460b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma, 461dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 462dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 46338e35860SChristoph Lameter void *private) 46491612e0dSHugh Dickins { 46591612e0dSHugh Dickins pgd_t *pgd; 46691612e0dSHugh Dickins unsigned long next; 46791612e0dSHugh Dickins 468b5810039SNick Piggin pgd = pgd_offset(vma->vm_mm, addr); 46991612e0dSHugh Dickins do { 47091612e0dSHugh Dickins next = pgd_addr_end(addr, end); 47191612e0dSHugh Dickins if (pgd_none_or_clear_bad(pgd)) 47291612e0dSHugh Dickins continue; 473dc9aa5b9SChristoph Lameter if (check_pud_range(vma, pgd, addr, next, nodes, 47438e35860SChristoph Lameter flags, private)) 47591612e0dSHugh Dickins return -EIO; 47691612e0dSHugh Dickins } while (pgd++, addr = next, addr != end); 47791612e0dSHugh Dickins return 0; 4781da177e4SLinus Torvalds } 4791da177e4SLinus Torvalds 480dc9aa5b9SChristoph Lameter /* 481dc9aa5b9SChristoph Lameter * Check if all pages in a range are on a set of nodes. 482dc9aa5b9SChristoph Lameter * If pagelist != NULL then isolate pages from the LRU and 483dc9aa5b9SChristoph Lameter * put them on the pagelist. 484dc9aa5b9SChristoph Lameter */ 4851da177e4SLinus Torvalds static struct vm_area_struct * 4861da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end, 48738e35860SChristoph Lameter const nodemask_t *nodes, unsigned long flags, void *private) 4881da177e4SLinus Torvalds { 4891da177e4SLinus Torvalds int err; 4901da177e4SLinus Torvalds struct vm_area_struct *first, *vma, *prev; 4911da177e4SLinus Torvalds 492053837fcSNick Piggin 4931da177e4SLinus Torvalds first = find_vma(mm, start); 4941da177e4SLinus Torvalds if (!first) 4951da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 4961da177e4SLinus Torvalds prev = NULL; 4971da177e4SLinus Torvalds for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 498dc9aa5b9SChristoph Lameter if (!(flags & MPOL_MF_DISCONTIG_OK)) { 4991da177e4SLinus Torvalds if (!vma->vm_next && vma->vm_end < end) 5001da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 5011da177e4SLinus Torvalds if (prev && prev->vm_end < vma->vm_start) 5021da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 503dc9aa5b9SChristoph Lameter } 504dc9aa5b9SChristoph Lameter if (!is_vm_hugetlb_page(vma) && 505dc9aa5b9SChristoph Lameter ((flags & MPOL_MF_STRICT) || 506dc9aa5b9SChristoph Lameter ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && 507dc9aa5b9SChristoph Lameter vma_migratable(vma)))) { 5085b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 509dc9aa5b9SChristoph Lameter 5105b952b3cSAndi Kleen if (endvma > end) 5115b952b3cSAndi Kleen endvma = end; 5125b952b3cSAndi Kleen if (vma->vm_start > start) 5135b952b3cSAndi Kleen start = vma->vm_start; 514dc9aa5b9SChristoph Lameter err = check_pgd_range(vma, start, endvma, nodes, 51538e35860SChristoph Lameter flags, private); 5161da177e4SLinus Torvalds if (err) { 5171da177e4SLinus Torvalds first = ERR_PTR(err); 5181da177e4SLinus Torvalds break; 5191da177e4SLinus Torvalds } 5201da177e4SLinus Torvalds } 5211da177e4SLinus Torvalds prev = vma; 5221da177e4SLinus Torvalds } 5231da177e4SLinus Torvalds return first; 5241da177e4SLinus Torvalds } 5251da177e4SLinus Torvalds 5261da177e4SLinus Torvalds /* Apply policy to a single VMA */ 5271da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new) 5281da177e4SLinus Torvalds { 5291da177e4SLinus Torvalds int err = 0; 5301da177e4SLinus Torvalds struct mempolicy *old = vma->vm_policy; 5311da177e4SLinus Torvalds 532140d5a49SPaul Mundt pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 5331da177e4SLinus Torvalds vma->vm_start, vma->vm_end, vma->vm_pgoff, 5341da177e4SLinus Torvalds vma->vm_ops, vma->vm_file, 5351da177e4SLinus Torvalds vma->vm_ops ? vma->vm_ops->set_policy : NULL); 5361da177e4SLinus Torvalds 5371da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->set_policy) 5381da177e4SLinus Torvalds err = vma->vm_ops->set_policy(vma, new); 5391da177e4SLinus Torvalds if (!err) { 5401da177e4SLinus Torvalds mpol_get(new); 5411da177e4SLinus Torvalds vma->vm_policy = new; 542f0be3d32SLee Schermerhorn mpol_put(old); 5431da177e4SLinus Torvalds } 5441da177e4SLinus Torvalds return err; 5451da177e4SLinus Torvalds } 5461da177e4SLinus Torvalds 5471da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 5481da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start, 5491da177e4SLinus Torvalds unsigned long end, struct mempolicy *new) 5501da177e4SLinus Torvalds { 5511da177e4SLinus Torvalds struct vm_area_struct *next; 5521da177e4SLinus Torvalds int err; 5531da177e4SLinus Torvalds 5541da177e4SLinus Torvalds err = 0; 5551da177e4SLinus Torvalds for (; vma && vma->vm_start < end; vma = next) { 5561da177e4SLinus Torvalds next = vma->vm_next; 5571da177e4SLinus Torvalds if (vma->vm_start < start) 5581da177e4SLinus Torvalds err = split_vma(vma->vm_mm, vma, start, 1); 5591da177e4SLinus Torvalds if (!err && vma->vm_end > end) 5601da177e4SLinus Torvalds err = split_vma(vma->vm_mm, vma, end, 0); 5611da177e4SLinus Torvalds if (!err) 5621da177e4SLinus Torvalds err = policy_vma(vma, new); 5631da177e4SLinus Torvalds if (err) 5641da177e4SLinus Torvalds break; 5651da177e4SLinus Torvalds } 5661da177e4SLinus Torvalds return err; 5671da177e4SLinus Torvalds } 5681da177e4SLinus Torvalds 569c61afb18SPaul Jackson /* 570c61afb18SPaul Jackson * Update task->flags PF_MEMPOLICY bit: set iff non-default 571c61afb18SPaul Jackson * mempolicy. Allows more rapid checking of this (combined perhaps 572c61afb18SPaul Jackson * with other PF_* flag bits) on memory allocation hot code paths. 573c61afb18SPaul Jackson * 574c61afb18SPaul Jackson * If called from outside this file, the task 'p' should -only- be 575c61afb18SPaul Jackson * a newly forked child not yet visible on the task list, because 576c61afb18SPaul Jackson * manipulating the task flags of a visible task is not safe. 577c61afb18SPaul Jackson * 578c61afb18SPaul Jackson * The above limitation is why this routine has the funny name 579c61afb18SPaul Jackson * mpol_fix_fork_child_flag(). 580c61afb18SPaul Jackson * 581c61afb18SPaul Jackson * It is also safe to call this with a task pointer of current, 582c61afb18SPaul Jackson * which the static wrapper mpol_set_task_struct_flag() does, 583c61afb18SPaul Jackson * for use within this file. 584c61afb18SPaul Jackson */ 585c61afb18SPaul Jackson 586c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p) 587c61afb18SPaul Jackson { 588c61afb18SPaul Jackson if (p->mempolicy) 589c61afb18SPaul Jackson p->flags |= PF_MEMPOLICY; 590c61afb18SPaul Jackson else 591c61afb18SPaul Jackson p->flags &= ~PF_MEMPOLICY; 592c61afb18SPaul Jackson } 593c61afb18SPaul Jackson 594c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void) 595c61afb18SPaul Jackson { 596c61afb18SPaul Jackson mpol_fix_fork_child_flag(current); 597c61afb18SPaul Jackson } 598c61afb18SPaul Jackson 5991da177e4SLinus Torvalds /* Set the process memory policy */ 600028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 601028fec41SDavid Rientjes nodemask_t *nodes) 6021da177e4SLinus Torvalds { 6031da177e4SLinus Torvalds struct mempolicy *new; 604f4e53d91SLee Schermerhorn struct mm_struct *mm = current->mm; 6051da177e4SLinus Torvalds 606028fec41SDavid Rientjes new = mpol_new(mode, flags, nodes); 6071da177e4SLinus Torvalds if (IS_ERR(new)) 6081da177e4SLinus Torvalds return PTR_ERR(new); 609f4e53d91SLee Schermerhorn 610f4e53d91SLee Schermerhorn /* 611f4e53d91SLee Schermerhorn * prevent changing our mempolicy while show_numa_maps() 612f4e53d91SLee Schermerhorn * is using it. 613f4e53d91SLee Schermerhorn * Note: do_set_mempolicy() can be called at init time 614f4e53d91SLee Schermerhorn * with no 'mm'. 615f4e53d91SLee Schermerhorn */ 616f4e53d91SLee Schermerhorn if (mm) 617f4e53d91SLee Schermerhorn down_write(&mm->mmap_sem); 618f0be3d32SLee Schermerhorn mpol_put(current->mempolicy); 6191da177e4SLinus Torvalds current->mempolicy = new; 620c61afb18SPaul Jackson mpol_set_task_struct_flag(); 62145c4745aSLee Schermerhorn if (new && new->mode == MPOL_INTERLEAVE && 622f5b087b5SDavid Rientjes nodes_weight(new->v.nodes)) 623dfcd3c0dSAndi Kleen current->il_next = first_node(new->v.nodes); 624f4e53d91SLee Schermerhorn if (mm) 625f4e53d91SLee Schermerhorn up_write(&mm->mmap_sem); 626f4e53d91SLee Schermerhorn 6271da177e4SLinus Torvalds return 0; 6281da177e4SLinus Torvalds } 6291da177e4SLinus Torvalds 630bea904d5SLee Schermerhorn /* 631bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 632bea904d5SLee Schermerhorn */ 633bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 6341da177e4SLinus Torvalds { 635dfcd3c0dSAndi Kleen nodes_clear(*nodes); 636bea904d5SLee Schermerhorn if (p == &default_policy) 637bea904d5SLee Schermerhorn return; 638bea904d5SLee Schermerhorn 63945c4745aSLee Schermerhorn switch (p->mode) { 64019770b32SMel Gorman case MPOL_BIND: 64119770b32SMel Gorman /* Fall through */ 6421da177e4SLinus Torvalds case MPOL_INTERLEAVE: 643dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 6441da177e4SLinus Torvalds break; 6451da177e4SLinus Torvalds case MPOL_PREFERRED: 646fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 647dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 64853f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 6491da177e4SLinus Torvalds break; 6501da177e4SLinus Torvalds default: 6511da177e4SLinus Torvalds BUG(); 6521da177e4SLinus Torvalds } 6531da177e4SLinus Torvalds } 6541da177e4SLinus Torvalds 6551da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr) 6561da177e4SLinus Torvalds { 6571da177e4SLinus Torvalds struct page *p; 6581da177e4SLinus Torvalds int err; 6591da177e4SLinus Torvalds 6601da177e4SLinus Torvalds err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 6611da177e4SLinus Torvalds if (err >= 0) { 6621da177e4SLinus Torvalds err = page_to_nid(p); 6631da177e4SLinus Torvalds put_page(p); 6641da177e4SLinus Torvalds } 6651da177e4SLinus Torvalds return err; 6661da177e4SLinus Torvalds } 6671da177e4SLinus Torvalds 6681da177e4SLinus Torvalds /* Retrieve NUMA policy */ 669dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 6701da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 6711da177e4SLinus Torvalds { 6728bccd85fSChristoph Lameter int err; 6731da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 6741da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 6751da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 6761da177e4SLinus Torvalds 677cf2a473cSPaul Jackson cpuset_update_task_memory_state(); 678754af6f5SLee Schermerhorn if (flags & 679754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 6801da177e4SLinus Torvalds return -EINVAL; 681754af6f5SLee Schermerhorn 682754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 683754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 684754af6f5SLee Schermerhorn return -EINVAL; 685754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 686754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 687754af6f5SLee Schermerhorn return 0; 688754af6f5SLee Schermerhorn } 689754af6f5SLee Schermerhorn 6901da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 691bea904d5SLee Schermerhorn /* 692bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 693bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 694bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 695bea904d5SLee Schermerhorn */ 6961da177e4SLinus Torvalds down_read(&mm->mmap_sem); 6971da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 6981da177e4SLinus Torvalds if (!vma) { 6991da177e4SLinus Torvalds up_read(&mm->mmap_sem); 7001da177e4SLinus Torvalds return -EFAULT; 7011da177e4SLinus Torvalds } 7021da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 7031da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 7041da177e4SLinus Torvalds else 7051da177e4SLinus Torvalds pol = vma->vm_policy; 7061da177e4SLinus Torvalds } else if (addr) 7071da177e4SLinus Torvalds return -EINVAL; 7081da177e4SLinus Torvalds 7091da177e4SLinus Torvalds if (!pol) 710bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 7111da177e4SLinus Torvalds 7121da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 7131da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 7141da177e4SLinus Torvalds err = lookup_node(mm, addr); 7151da177e4SLinus Torvalds if (err < 0) 7161da177e4SLinus Torvalds goto out; 7178bccd85fSChristoph Lameter *policy = err; 7181da177e4SLinus Torvalds } else if (pol == current->mempolicy && 71945c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 7208bccd85fSChristoph Lameter *policy = current->il_next; 7211da177e4SLinus Torvalds } else { 7221da177e4SLinus Torvalds err = -EINVAL; 7231da177e4SLinus Torvalds goto out; 7241da177e4SLinus Torvalds } 725bea904d5SLee Schermerhorn } else { 726bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 727bea904d5SLee Schermerhorn pol->mode; 728d79df630SDavid Rientjes /* 729d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 730d79df630SDavid Rientjes * the policy to userspace. 731d79df630SDavid Rientjes */ 732d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 733bea904d5SLee Schermerhorn } 7341da177e4SLinus Torvalds 7351da177e4SLinus Torvalds if (vma) { 7361da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 7371da177e4SLinus Torvalds vma = NULL; 7381da177e4SLinus Torvalds } 7391da177e4SLinus Torvalds 7401da177e4SLinus Torvalds err = 0; 7418bccd85fSChristoph Lameter if (nmask) 742bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 7431da177e4SLinus Torvalds 7441da177e4SLinus Torvalds out: 74552cd3b07SLee Schermerhorn mpol_cond_put(pol); 7461da177e4SLinus Torvalds if (vma) 7471da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 7481da177e4SLinus Torvalds return err; 7491da177e4SLinus Torvalds } 7501da177e4SLinus Torvalds 751b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 7528bccd85fSChristoph Lameter /* 7536ce3c4c0SChristoph Lameter * page migration 7546ce3c4c0SChristoph Lameter */ 755fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 756fc301289SChristoph Lameter unsigned long flags) 7576ce3c4c0SChristoph Lameter { 7586ce3c4c0SChristoph Lameter /* 759fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 7606ce3c4c0SChristoph Lameter */ 76162695a84SNick Piggin if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 76262695a84SNick Piggin if (!isolate_lru_page(page)) { 76362695a84SNick Piggin list_add_tail(&page->lru, pagelist); 76462695a84SNick Piggin } 76562695a84SNick Piggin } 7666ce3c4c0SChristoph Lameter } 7676ce3c4c0SChristoph Lameter 768742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 76995a402c3SChristoph Lameter { 770769848c0SMel Gorman return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0); 77195a402c3SChristoph Lameter } 77295a402c3SChristoph Lameter 7736ce3c4c0SChristoph Lameter /* 7747e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 7757e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 7767e2ab150SChristoph Lameter */ 777dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 778dbcb0f19SAdrian Bunk int flags) 7797e2ab150SChristoph Lameter { 7807e2ab150SChristoph Lameter nodemask_t nmask; 7817e2ab150SChristoph Lameter LIST_HEAD(pagelist); 7827e2ab150SChristoph Lameter int err = 0; 7837e2ab150SChristoph Lameter 7847e2ab150SChristoph Lameter nodes_clear(nmask); 7857e2ab150SChristoph Lameter node_set(source, nmask); 7867e2ab150SChristoph Lameter 7877e2ab150SChristoph Lameter check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask, 7887e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 7897e2ab150SChristoph Lameter 7907e2ab150SChristoph Lameter if (!list_empty(&pagelist)) 79195a402c3SChristoph Lameter err = migrate_pages(&pagelist, new_node_page, dest); 79295a402c3SChristoph Lameter 7937e2ab150SChristoph Lameter return err; 7947e2ab150SChristoph Lameter } 7957e2ab150SChristoph Lameter 7967e2ab150SChristoph Lameter /* 7977e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 7987e2ab150SChristoph Lameter * layout as much as possible. 79939743889SChristoph Lameter * 80039743889SChristoph Lameter * Returns the number of page that could not be moved. 80139743889SChristoph Lameter */ 80239743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm, 80339743889SChristoph Lameter const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 80439743889SChristoph Lameter { 8057e2ab150SChristoph Lameter int busy = 0; 8060aedadf9SChristoph Lameter int err; 8077e2ab150SChristoph Lameter nodemask_t tmp; 80839743889SChristoph Lameter 8090aedadf9SChristoph Lameter err = migrate_prep(); 8100aedadf9SChristoph Lameter if (err) 8110aedadf9SChristoph Lameter return err; 8120aedadf9SChristoph Lameter 81339743889SChristoph Lameter down_read(&mm->mmap_sem); 814d4984711SChristoph Lameter 8157b2259b3SChristoph Lameter err = migrate_vmas(mm, from_nodes, to_nodes, flags); 8167b2259b3SChristoph Lameter if (err) 8177b2259b3SChristoph Lameter goto out; 8187b2259b3SChristoph Lameter 8197e2ab150SChristoph Lameter /* 8207e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 8217e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 8227e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 8237e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 8247e2ab150SChristoph Lameter * 8257e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 8267e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 8277e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 8287e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 8297e2ab150SChristoph Lameter * 8307e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 8317e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 8327e2ab150SChristoph Lameter * (nothing left to migrate). 8337e2ab150SChristoph Lameter * 8347e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 8357e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 8367e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 8377e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 8387e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 8397e2ab150SChristoph Lameter * 8407e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 8417e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 8427e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 8437e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 8447e2ab150SChristoph Lameter * Otherwise when we finish scannng from_tmp, we at least have the 8457e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 8467e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 8477e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 8487e2ab150SChristoph Lameter */ 8497e2ab150SChristoph Lameter 8507e2ab150SChristoph Lameter tmp = *from_nodes; 8517e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 8527e2ab150SChristoph Lameter int s,d; 8537e2ab150SChristoph Lameter int source = -1; 8547e2ab150SChristoph Lameter int dest = 0; 8557e2ab150SChristoph Lameter 8567e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 8577e2ab150SChristoph Lameter d = node_remap(s, *from_nodes, *to_nodes); 8587e2ab150SChristoph Lameter if (s == d) 8597e2ab150SChristoph Lameter continue; 8607e2ab150SChristoph Lameter 8617e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 8627e2ab150SChristoph Lameter dest = d; 8637e2ab150SChristoph Lameter 8647e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 8657e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 8667e2ab150SChristoph Lameter break; 8677e2ab150SChristoph Lameter } 8687e2ab150SChristoph Lameter if (source == -1) 8697e2ab150SChristoph Lameter break; 8707e2ab150SChristoph Lameter 8717e2ab150SChristoph Lameter node_clear(source, tmp); 8727e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 8737e2ab150SChristoph Lameter if (err > 0) 8747e2ab150SChristoph Lameter busy += err; 8757e2ab150SChristoph Lameter if (err < 0) 8767e2ab150SChristoph Lameter break; 87739743889SChristoph Lameter } 8787b2259b3SChristoph Lameter out: 87939743889SChristoph Lameter up_read(&mm->mmap_sem); 8807e2ab150SChristoph Lameter if (err < 0) 8817e2ab150SChristoph Lameter return err; 8827e2ab150SChristoph Lameter return busy; 883b20a3503SChristoph Lameter 88439743889SChristoph Lameter } 88539743889SChristoph Lameter 8863ad33b24SLee Schermerhorn /* 8873ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 8883ad33b24SLee Schermerhorn * Start assuming that page is mapped by vma pointed to by @private. 8893ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 8903ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 8913ad33b24SLee Schermerhorn * is in virtual address order. 8923ad33b24SLee Schermerhorn */ 893742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 89495a402c3SChristoph Lameter { 89595a402c3SChristoph Lameter struct vm_area_struct *vma = (struct vm_area_struct *)private; 8963ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 89795a402c3SChristoph Lameter 8983ad33b24SLee Schermerhorn while (vma) { 8993ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 9003ad33b24SLee Schermerhorn if (address != -EFAULT) 9013ad33b24SLee Schermerhorn break; 9023ad33b24SLee Schermerhorn vma = vma->vm_next; 9033ad33b24SLee Schermerhorn } 9043ad33b24SLee Schermerhorn 9053ad33b24SLee Schermerhorn /* 9063ad33b24SLee Schermerhorn * if !vma, alloc_page_vma() will use task or system default policy 9073ad33b24SLee Schermerhorn */ 9083ad33b24SLee Schermerhorn return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 90995a402c3SChristoph Lameter } 910b20a3503SChristoph Lameter #else 911b20a3503SChristoph Lameter 912b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 913b20a3503SChristoph Lameter unsigned long flags) 914b20a3503SChristoph Lameter { 915b20a3503SChristoph Lameter } 916b20a3503SChristoph Lameter 917b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm, 918b20a3503SChristoph Lameter const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 919b20a3503SChristoph Lameter { 920b20a3503SChristoph Lameter return -ENOSYS; 921b20a3503SChristoph Lameter } 92295a402c3SChristoph Lameter 92369939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 92495a402c3SChristoph Lameter { 92595a402c3SChristoph Lameter return NULL; 92695a402c3SChristoph Lameter } 927b20a3503SChristoph Lameter #endif 928b20a3503SChristoph Lameter 929dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 930028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 931028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 9326ce3c4c0SChristoph Lameter { 9336ce3c4c0SChristoph Lameter struct vm_area_struct *vma; 9346ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 9356ce3c4c0SChristoph Lameter struct mempolicy *new; 9366ce3c4c0SChristoph Lameter unsigned long end; 9376ce3c4c0SChristoph Lameter int err; 9386ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 9396ce3c4c0SChristoph Lameter 940a3b51e01SDavid Rientjes if (flags & ~(unsigned long)(MPOL_MF_STRICT | 9416ce3c4c0SChristoph Lameter MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 9426ce3c4c0SChristoph Lameter return -EINVAL; 94374c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 9446ce3c4c0SChristoph Lameter return -EPERM; 9456ce3c4c0SChristoph Lameter 9466ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 9476ce3c4c0SChristoph Lameter return -EINVAL; 9486ce3c4c0SChristoph Lameter 9496ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 9506ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 9516ce3c4c0SChristoph Lameter 9526ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 9536ce3c4c0SChristoph Lameter end = start + len; 9546ce3c4c0SChristoph Lameter 9556ce3c4c0SChristoph Lameter if (end < start) 9566ce3c4c0SChristoph Lameter return -EINVAL; 9576ce3c4c0SChristoph Lameter if (end == start) 9586ce3c4c0SChristoph Lameter return 0; 9596ce3c4c0SChristoph Lameter 960028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 9616ce3c4c0SChristoph Lameter if (IS_ERR(new)) 9626ce3c4c0SChristoph Lameter return PTR_ERR(new); 9636ce3c4c0SChristoph Lameter 9646ce3c4c0SChristoph Lameter /* 9656ce3c4c0SChristoph Lameter * If we are using the default policy then operation 9666ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 9676ce3c4c0SChristoph Lameter */ 9686ce3c4c0SChristoph Lameter if (!new) 9696ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 9706ce3c4c0SChristoph Lameter 971028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 972028fec41SDavid Rientjes start, start + len, mode, mode_flags, 973028fec41SDavid Rientjes nmask ? nodes_addr(*nmask)[0] : -1); 9746ce3c4c0SChristoph Lameter 9750aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 9760aedadf9SChristoph Lameter 9770aedadf9SChristoph Lameter err = migrate_prep(); 9780aedadf9SChristoph Lameter if (err) 9790aedadf9SChristoph Lameter return err; 9800aedadf9SChristoph Lameter } 9816ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 9826ce3c4c0SChristoph Lameter vma = check_range(mm, start, end, nmask, 9836ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 9846ce3c4c0SChristoph Lameter 9856ce3c4c0SChristoph Lameter err = PTR_ERR(vma); 9866ce3c4c0SChristoph Lameter if (!IS_ERR(vma)) { 9876ce3c4c0SChristoph Lameter int nr_failed = 0; 9886ce3c4c0SChristoph Lameter 9896ce3c4c0SChristoph Lameter err = mbind_range(vma, start, end, new); 9907e2ab150SChristoph Lameter 9916ce3c4c0SChristoph Lameter if (!list_empty(&pagelist)) 99295a402c3SChristoph Lameter nr_failed = migrate_pages(&pagelist, new_vma_page, 99395a402c3SChristoph Lameter (unsigned long)vma); 9946ce3c4c0SChristoph Lameter 9956ce3c4c0SChristoph Lameter if (!err && nr_failed && (flags & MPOL_MF_STRICT)) 9966ce3c4c0SChristoph Lameter err = -EIO; 9976ce3c4c0SChristoph Lameter } 998b20a3503SChristoph Lameter 9996ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1000f0be3d32SLee Schermerhorn mpol_put(new); 10016ce3c4c0SChristoph Lameter return err; 10026ce3c4c0SChristoph Lameter } 10036ce3c4c0SChristoph Lameter 100439743889SChristoph Lameter /* 10058bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 10068bccd85fSChristoph Lameter */ 10078bccd85fSChristoph Lameter 10088bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 100939743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 10108bccd85fSChristoph Lameter unsigned long maxnode) 10118bccd85fSChristoph Lameter { 10128bccd85fSChristoph Lameter unsigned long k; 10138bccd85fSChristoph Lameter unsigned long nlongs; 10148bccd85fSChristoph Lameter unsigned long endmask; 10158bccd85fSChristoph Lameter 10168bccd85fSChristoph Lameter --maxnode; 10178bccd85fSChristoph Lameter nodes_clear(*nodes); 10188bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 10198bccd85fSChristoph Lameter return 0; 1020a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1021636f13c1SChris Wright return -EINVAL; 10228bccd85fSChristoph Lameter 10238bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 10248bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 10258bccd85fSChristoph Lameter endmask = ~0UL; 10268bccd85fSChristoph Lameter else 10278bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 10288bccd85fSChristoph Lameter 10298bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 10308bccd85fSChristoph Lameter if the non supported part is all zero. */ 10318bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 10328bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 10338bccd85fSChristoph Lameter return -EINVAL; 10348bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 10358bccd85fSChristoph Lameter unsigned long t; 10368bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 10378bccd85fSChristoph Lameter return -EFAULT; 10388bccd85fSChristoph Lameter if (k == nlongs - 1) { 10398bccd85fSChristoph Lameter if (t & endmask) 10408bccd85fSChristoph Lameter return -EINVAL; 10418bccd85fSChristoph Lameter } else if (t) 10428bccd85fSChristoph Lameter return -EINVAL; 10438bccd85fSChristoph Lameter } 10448bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 10458bccd85fSChristoph Lameter endmask = ~0UL; 10468bccd85fSChristoph Lameter } 10478bccd85fSChristoph Lameter 10488bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 10498bccd85fSChristoph Lameter return -EFAULT; 10508bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 10518bccd85fSChristoph Lameter return 0; 10528bccd85fSChristoph Lameter } 10538bccd85fSChristoph Lameter 10548bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 10558bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 10568bccd85fSChristoph Lameter nodemask_t *nodes) 10578bccd85fSChristoph Lameter { 10588bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 10598bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 10608bccd85fSChristoph Lameter 10618bccd85fSChristoph Lameter if (copy > nbytes) { 10628bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 10638bccd85fSChristoph Lameter return -EINVAL; 10648bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 10658bccd85fSChristoph Lameter return -EFAULT; 10668bccd85fSChristoph Lameter copy = nbytes; 10678bccd85fSChristoph Lameter } 10688bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 10698bccd85fSChristoph Lameter } 10708bccd85fSChristoph Lameter 1071*938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1072*938bb9f5SHeiko Carstens unsigned long, mode, unsigned long __user *, nmask, 1073*938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 10748bccd85fSChristoph Lameter { 10758bccd85fSChristoph Lameter nodemask_t nodes; 10768bccd85fSChristoph Lameter int err; 1077028fec41SDavid Rientjes unsigned short mode_flags; 10788bccd85fSChristoph Lameter 1079028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1080028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1081a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1082a3b51e01SDavid Rientjes return -EINVAL; 10834c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 10844c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 10854c50bc01SDavid Rientjes return -EINVAL; 10868bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 10878bccd85fSChristoph Lameter if (err) 10888bccd85fSChristoph Lameter return err; 1089028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 10908bccd85fSChristoph Lameter } 10918bccd85fSChristoph Lameter 10928bccd85fSChristoph Lameter /* Set the process memory policy */ 1093*938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, 1094*938bb9f5SHeiko Carstens unsigned long, maxnode) 10958bccd85fSChristoph Lameter { 10968bccd85fSChristoph Lameter int err; 10978bccd85fSChristoph Lameter nodemask_t nodes; 1098028fec41SDavid Rientjes unsigned short flags; 10998bccd85fSChristoph Lameter 1100028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1101028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1102028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 11038bccd85fSChristoph Lameter return -EINVAL; 11044c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 11054c50bc01SDavid Rientjes return -EINVAL; 11068bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 11078bccd85fSChristoph Lameter if (err) 11088bccd85fSChristoph Lameter return err; 1109028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 11108bccd85fSChristoph Lameter } 11118bccd85fSChristoph Lameter 1112*938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1113*938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1114*938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 111539743889SChristoph Lameter { 1116c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 111739743889SChristoph Lameter struct mm_struct *mm; 111839743889SChristoph Lameter struct task_struct *task; 111939743889SChristoph Lameter nodemask_t old; 112039743889SChristoph Lameter nodemask_t new; 112139743889SChristoph Lameter nodemask_t task_nodes; 112239743889SChristoph Lameter int err; 112339743889SChristoph Lameter 112439743889SChristoph Lameter err = get_nodes(&old, old_nodes, maxnode); 112539743889SChristoph Lameter if (err) 112639743889SChristoph Lameter return err; 112739743889SChristoph Lameter 112839743889SChristoph Lameter err = get_nodes(&new, new_nodes, maxnode); 112939743889SChristoph Lameter if (err) 113039743889SChristoph Lameter return err; 113139743889SChristoph Lameter 113239743889SChristoph Lameter /* Find the mm_struct */ 113339743889SChristoph Lameter read_lock(&tasklist_lock); 1134228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 113539743889SChristoph Lameter if (!task) { 113639743889SChristoph Lameter read_unlock(&tasklist_lock); 113739743889SChristoph Lameter return -ESRCH; 113839743889SChristoph Lameter } 113939743889SChristoph Lameter mm = get_task_mm(task); 114039743889SChristoph Lameter read_unlock(&tasklist_lock); 114139743889SChristoph Lameter 114239743889SChristoph Lameter if (!mm) 114339743889SChristoph Lameter return -EINVAL; 114439743889SChristoph Lameter 114539743889SChristoph Lameter /* 114639743889SChristoph Lameter * Check if this process has the right to modify the specified 114739743889SChristoph Lameter * process. The right exists if the process has administrative 11487f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 114939743889SChristoph Lameter * userid as the target process. 115039743889SChristoph Lameter */ 1151c69e8d9cSDavid Howells rcu_read_lock(); 1152c69e8d9cSDavid Howells tcred = __task_cred(task); 1153b6dff3ecSDavid Howells if (cred->euid != tcred->suid && cred->euid != tcred->uid && 1154b6dff3ecSDavid Howells cred->uid != tcred->suid && cred->uid != tcred->uid && 115574c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1156c69e8d9cSDavid Howells rcu_read_unlock(); 115739743889SChristoph Lameter err = -EPERM; 115839743889SChristoph Lameter goto out; 115939743889SChristoph Lameter } 1160c69e8d9cSDavid Howells rcu_read_unlock(); 116139743889SChristoph Lameter 116239743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 116339743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 116474c00241SChristoph Lameter if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) { 116539743889SChristoph Lameter err = -EPERM; 116639743889SChristoph Lameter goto out; 116739743889SChristoph Lameter } 116839743889SChristoph Lameter 116937b07e41SLee Schermerhorn if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) { 11703b42d28bSChristoph Lameter err = -EINVAL; 11713b42d28bSChristoph Lameter goto out; 11723b42d28bSChristoph Lameter } 11733b42d28bSChristoph Lameter 117486c3a764SDavid Quigley err = security_task_movememory(task); 117586c3a764SDavid Quigley if (err) 117686c3a764SDavid Quigley goto out; 117786c3a764SDavid Quigley 1178511030bcSChristoph Lameter err = do_migrate_pages(mm, &old, &new, 117974c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 118039743889SChristoph Lameter out: 118139743889SChristoph Lameter mmput(mm); 118239743889SChristoph Lameter return err; 118339743889SChristoph Lameter } 118439743889SChristoph Lameter 118539743889SChristoph Lameter 11868bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1187*938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1188*938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1189*938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 11908bccd85fSChristoph Lameter { 1191dbcb0f19SAdrian Bunk int err; 1192dbcb0f19SAdrian Bunk int uninitialized_var(pval); 11938bccd85fSChristoph Lameter nodemask_t nodes; 11948bccd85fSChristoph Lameter 11958bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 11968bccd85fSChristoph Lameter return -EINVAL; 11978bccd85fSChristoph Lameter 11988bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 11998bccd85fSChristoph Lameter 12008bccd85fSChristoph Lameter if (err) 12018bccd85fSChristoph Lameter return err; 12028bccd85fSChristoph Lameter 12038bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 12048bccd85fSChristoph Lameter return -EFAULT; 12058bccd85fSChristoph Lameter 12068bccd85fSChristoph Lameter if (nmask) 12078bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 12088bccd85fSChristoph Lameter 12098bccd85fSChristoph Lameter return err; 12108bccd85fSChristoph Lameter } 12118bccd85fSChristoph Lameter 12121da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 12131da177e4SLinus Torvalds 12141da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy, 12151da177e4SLinus Torvalds compat_ulong_t __user *nmask, 12161da177e4SLinus Torvalds compat_ulong_t maxnode, 12171da177e4SLinus Torvalds compat_ulong_t addr, compat_ulong_t flags) 12181da177e4SLinus Torvalds { 12191da177e4SLinus Torvalds long err; 12201da177e4SLinus Torvalds unsigned long __user *nm = NULL; 12211da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 12221da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 12231da177e4SLinus Torvalds 12241da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 12251da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 12261da177e4SLinus Torvalds 12271da177e4SLinus Torvalds if (nmask) 12281da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 12291da177e4SLinus Torvalds 12301da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 12311da177e4SLinus Torvalds 12321da177e4SLinus Torvalds if (!err && nmask) { 12331da177e4SLinus Torvalds err = copy_from_user(bm, nm, alloc_size); 12341da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 12351da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 12361da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 12371da177e4SLinus Torvalds } 12381da177e4SLinus Torvalds 12391da177e4SLinus Torvalds return err; 12401da177e4SLinus Torvalds } 12411da177e4SLinus Torvalds 12421da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, 12431da177e4SLinus Torvalds compat_ulong_t maxnode) 12441da177e4SLinus Torvalds { 12451da177e4SLinus Torvalds long err = 0; 12461da177e4SLinus Torvalds unsigned long __user *nm = NULL; 12471da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 12481da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 12491da177e4SLinus Torvalds 12501da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 12511da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 12521da177e4SLinus Torvalds 12531da177e4SLinus Torvalds if (nmask) { 12541da177e4SLinus Torvalds err = compat_get_bitmap(bm, nmask, nr_bits); 12551da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 12561da177e4SLinus Torvalds err |= copy_to_user(nm, bm, alloc_size); 12571da177e4SLinus Torvalds } 12581da177e4SLinus Torvalds 12591da177e4SLinus Torvalds if (err) 12601da177e4SLinus Torvalds return -EFAULT; 12611da177e4SLinus Torvalds 12621da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 12631da177e4SLinus Torvalds } 12641da177e4SLinus Torvalds 12651da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, 12661da177e4SLinus Torvalds compat_ulong_t mode, compat_ulong_t __user *nmask, 12671da177e4SLinus Torvalds compat_ulong_t maxnode, compat_ulong_t flags) 12681da177e4SLinus Torvalds { 12691da177e4SLinus Torvalds long err = 0; 12701da177e4SLinus Torvalds unsigned long __user *nm = NULL; 12711da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1272dfcd3c0dSAndi Kleen nodemask_t bm; 12731da177e4SLinus Torvalds 12741da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 12751da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 12761da177e4SLinus Torvalds 12771da177e4SLinus Torvalds if (nmask) { 1278dfcd3c0dSAndi Kleen err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 12791da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 1280dfcd3c0dSAndi Kleen err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 12811da177e4SLinus Torvalds } 12821da177e4SLinus Torvalds 12831da177e4SLinus Torvalds if (err) 12841da177e4SLinus Torvalds return -EFAULT; 12851da177e4SLinus Torvalds 12861da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 12871da177e4SLinus Torvalds } 12881da177e4SLinus Torvalds 12891da177e4SLinus Torvalds #endif 12901da177e4SLinus Torvalds 1291480eccf9SLee Schermerhorn /* 1292480eccf9SLee Schermerhorn * get_vma_policy(@task, @vma, @addr) 1293480eccf9SLee Schermerhorn * @task - task for fallback if vma policy == default 1294480eccf9SLee Schermerhorn * @vma - virtual memory area whose policy is sought 1295480eccf9SLee Schermerhorn * @addr - address in @vma for shared policy lookup 1296480eccf9SLee Schermerhorn * 1297480eccf9SLee Schermerhorn * Returns effective policy for a VMA at specified address. 1298480eccf9SLee Schermerhorn * Falls back to @task or system default policy, as necessary. 129952cd3b07SLee Schermerhorn * Current or other task's task mempolicy and non-shared vma policies 130052cd3b07SLee Schermerhorn * are protected by the task's mmap_sem, which must be held for read by 130152cd3b07SLee Schermerhorn * the caller. 130252cd3b07SLee Schermerhorn * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 130352cd3b07SLee Schermerhorn * count--added by the get_policy() vm_op, as appropriate--to protect against 130452cd3b07SLee Schermerhorn * freeing by another task. It is the caller's responsibility to free the 130552cd3b07SLee Schermerhorn * extra reference for shared policies. 1306480eccf9SLee Schermerhorn */ 130748fce342SChristoph Lameter static struct mempolicy *get_vma_policy(struct task_struct *task, 130848fce342SChristoph Lameter struct vm_area_struct *vma, unsigned long addr) 13091da177e4SLinus Torvalds { 13106e21c8f1SChristoph Lameter struct mempolicy *pol = task->mempolicy; 13111da177e4SLinus Torvalds 13121da177e4SLinus Torvalds if (vma) { 1313480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 1314ae4d8c16SLee Schermerhorn struct mempolicy *vpol = vma->vm_ops->get_policy(vma, 1315ae4d8c16SLee Schermerhorn addr); 1316ae4d8c16SLee Schermerhorn if (vpol) 1317ae4d8c16SLee Schermerhorn pol = vpol; 1318bea904d5SLee Schermerhorn } else if (vma->vm_policy) 13191da177e4SLinus Torvalds pol = vma->vm_policy; 13201da177e4SLinus Torvalds } 13211da177e4SLinus Torvalds if (!pol) 13221da177e4SLinus Torvalds pol = &default_policy; 13231da177e4SLinus Torvalds return pol; 13241da177e4SLinus Torvalds } 13251da177e4SLinus Torvalds 132652cd3b07SLee Schermerhorn /* 132752cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 132852cd3b07SLee Schermerhorn * page allocation 132952cd3b07SLee Schermerhorn */ 133052cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 133119770b32SMel Gorman { 133219770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 133345c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 133419770b32SMel Gorman gfp_zone(gfp) >= policy_zone && 133519770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 133619770b32SMel Gorman return &policy->v.nodes; 133719770b32SMel Gorman 133819770b32SMel Gorman return NULL; 133919770b32SMel Gorman } 134019770b32SMel Gorman 134152cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */ 134252cd3b07SLee Schermerhorn static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) 13431da177e4SLinus Torvalds { 1344fc36b8d3SLee Schermerhorn int nd = numa_node_id(); 13451da177e4SLinus Torvalds 134645c4745aSLee Schermerhorn switch (policy->mode) { 13471da177e4SLinus Torvalds case MPOL_PREFERRED: 1348fc36b8d3SLee Schermerhorn if (!(policy->flags & MPOL_F_LOCAL)) 13491da177e4SLinus Torvalds nd = policy->v.preferred_node; 13501da177e4SLinus Torvalds break; 13511da177e4SLinus Torvalds case MPOL_BIND: 135219770b32SMel Gorman /* 135352cd3b07SLee Schermerhorn * Normally, MPOL_BIND allocations are node-local within the 135452cd3b07SLee Schermerhorn * allowed nodemask. However, if __GFP_THISNODE is set and the 135552cd3b07SLee Schermerhorn * current node is part of the mask, we use the zonelist for 135652cd3b07SLee Schermerhorn * the first node in the mask instead. 135719770b32SMel Gorman */ 135819770b32SMel Gorman if (unlikely(gfp & __GFP_THISNODE) && 135919770b32SMel Gorman unlikely(!node_isset(nd, policy->v.nodes))) 136019770b32SMel Gorman nd = first_node(policy->v.nodes); 136119770b32SMel Gorman break; 13621da177e4SLinus Torvalds case MPOL_INTERLEAVE: /* should not happen */ 13631da177e4SLinus Torvalds break; 13641da177e4SLinus Torvalds default: 13651da177e4SLinus Torvalds BUG(); 13661da177e4SLinus Torvalds } 13670e88460dSMel Gorman return node_zonelist(nd, gfp); 13681da177e4SLinus Torvalds } 13691da177e4SLinus Torvalds 13701da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 13711da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 13721da177e4SLinus Torvalds { 13731da177e4SLinus Torvalds unsigned nid, next; 13741da177e4SLinus Torvalds struct task_struct *me = current; 13751da177e4SLinus Torvalds 13761da177e4SLinus Torvalds nid = me->il_next; 1377dfcd3c0dSAndi Kleen next = next_node(nid, policy->v.nodes); 13781da177e4SLinus Torvalds if (next >= MAX_NUMNODES) 1379dfcd3c0dSAndi Kleen next = first_node(policy->v.nodes); 1380f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 13811da177e4SLinus Torvalds me->il_next = next; 13821da177e4SLinus Torvalds return nid; 13831da177e4SLinus Torvalds } 13841da177e4SLinus Torvalds 1385dc85da15SChristoph Lameter /* 1386dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1387dc85da15SChristoph Lameter * next slab entry. 138852cd3b07SLee Schermerhorn * @policy must be protected by freeing by the caller. If @policy is 138952cd3b07SLee Schermerhorn * the current task's mempolicy, this protection is implicit, as only the 139052cd3b07SLee Schermerhorn * task can change it's policy. The system default policy requires no 139152cd3b07SLee Schermerhorn * such protection. 1392dc85da15SChristoph Lameter */ 1393dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy) 1394dc85da15SChristoph Lameter { 1395fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 1396bea904d5SLee Schermerhorn return numa_node_id(); 1397765c4507SChristoph Lameter 1398bea904d5SLee Schermerhorn switch (policy->mode) { 1399bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1400fc36b8d3SLee Schermerhorn /* 1401fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1402fc36b8d3SLee Schermerhorn */ 1403bea904d5SLee Schermerhorn return policy->v.preferred_node; 1404bea904d5SLee Schermerhorn 1405dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1406dc85da15SChristoph Lameter return interleave_nodes(policy); 1407dc85da15SChristoph Lameter 1408dd1a239fSMel Gorman case MPOL_BIND: { 1409dc85da15SChristoph Lameter /* 1410dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1411dc85da15SChristoph Lameter * first node. 1412dc85da15SChristoph Lameter */ 141319770b32SMel Gorman struct zonelist *zonelist; 141419770b32SMel Gorman struct zone *zone; 141519770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 141619770b32SMel Gorman zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0]; 141719770b32SMel Gorman (void)first_zones_zonelist(zonelist, highest_zoneidx, 141819770b32SMel Gorman &policy->v.nodes, 141919770b32SMel Gorman &zone); 142019770b32SMel Gorman return zone->node; 1421dd1a239fSMel Gorman } 1422dc85da15SChristoph Lameter 1423dc85da15SChristoph Lameter default: 1424bea904d5SLee Schermerhorn BUG(); 1425dc85da15SChristoph Lameter } 1426dc85da15SChristoph Lameter } 1427dc85da15SChristoph Lameter 14281da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */ 14291da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 14301da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long off) 14311da177e4SLinus Torvalds { 1432dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1433f5b087b5SDavid Rientjes unsigned target; 14341da177e4SLinus Torvalds int c; 14351da177e4SLinus Torvalds int nid = -1; 14361da177e4SLinus Torvalds 1437f5b087b5SDavid Rientjes if (!nnodes) 1438f5b087b5SDavid Rientjes return numa_node_id(); 1439f5b087b5SDavid Rientjes target = (unsigned int)off % nnodes; 14401da177e4SLinus Torvalds c = 0; 14411da177e4SLinus Torvalds do { 1442dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 14431da177e4SLinus Torvalds c++; 14441da177e4SLinus Torvalds } while (c <= target); 14451da177e4SLinus Torvalds return nid; 14461da177e4SLinus Torvalds } 14471da177e4SLinus Torvalds 14485da7ca86SChristoph Lameter /* Determine a node number for interleave */ 14495da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 14505da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 14515da7ca86SChristoph Lameter { 14525da7ca86SChristoph Lameter if (vma) { 14535da7ca86SChristoph Lameter unsigned long off; 14545da7ca86SChristoph Lameter 14553b98b087SNishanth Aravamudan /* 14563b98b087SNishanth Aravamudan * for small pages, there is no difference between 14573b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 14583b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 14593b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 14603b98b087SNishanth Aravamudan * a useful offset. 14613b98b087SNishanth Aravamudan */ 14623b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 14633b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 14645da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 14655da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 14665da7ca86SChristoph Lameter } else 14675da7ca86SChristoph Lameter return interleave_nodes(pol); 14685da7ca86SChristoph Lameter } 14695da7ca86SChristoph Lameter 147000ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1471480eccf9SLee Schermerhorn /* 1472480eccf9SLee Schermerhorn * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1473480eccf9SLee Schermerhorn * @vma = virtual memory area whose policy is sought 1474480eccf9SLee Schermerhorn * @addr = address in @vma for shared policy lookup and interleave policy 1475480eccf9SLee Schermerhorn * @gfp_flags = for requested zone 147619770b32SMel Gorman * @mpol = pointer to mempolicy pointer for reference counted mempolicy 147719770b32SMel Gorman * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask 1478480eccf9SLee Schermerhorn * 147952cd3b07SLee Schermerhorn * Returns a zonelist suitable for a huge page allocation and a pointer 148052cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 148152cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 148252cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1483480eccf9SLee Schermerhorn */ 1484396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 148519770b32SMel Gorman gfp_t gfp_flags, struct mempolicy **mpol, 148619770b32SMel Gorman nodemask_t **nodemask) 14875da7ca86SChristoph Lameter { 1488480eccf9SLee Schermerhorn struct zonelist *zl; 14895da7ca86SChristoph Lameter 149052cd3b07SLee Schermerhorn *mpol = get_vma_policy(current, vma, addr); 149119770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 14925da7ca86SChristoph Lameter 149352cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 149452cd3b07SLee Schermerhorn zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1495a5516438SAndi Kleen huge_page_shift(hstate_vma(vma))), gfp_flags); 149652cd3b07SLee Schermerhorn } else { 149752cd3b07SLee Schermerhorn zl = policy_zonelist(gfp_flags, *mpol); 149852cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 149952cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1500480eccf9SLee Schermerhorn } 1501480eccf9SLee Schermerhorn return zl; 15025da7ca86SChristoph Lameter } 150300ac59adSChen, Kenneth W #endif 15045da7ca86SChristoph Lameter 15051da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 15061da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1507662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1508662f3a0bSAndi Kleen unsigned nid) 15091da177e4SLinus Torvalds { 15101da177e4SLinus Torvalds struct zonelist *zl; 15111da177e4SLinus Torvalds struct page *page; 15121da177e4SLinus Torvalds 15130e88460dSMel Gorman zl = node_zonelist(nid, gfp); 15141da177e4SLinus Torvalds page = __alloc_pages(gfp, order, zl); 1515dd1a239fSMel Gorman if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) 1516ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 15171da177e4SLinus Torvalds return page; 15181da177e4SLinus Torvalds } 15191da177e4SLinus Torvalds 15201da177e4SLinus Torvalds /** 15211da177e4SLinus Torvalds * alloc_page_vma - Allocate a page for a VMA. 15221da177e4SLinus Torvalds * 15231da177e4SLinus Torvalds * @gfp: 15241da177e4SLinus Torvalds * %GFP_USER user allocation. 15251da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 15261da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 15271da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 15281da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 15291da177e4SLinus Torvalds * 15301da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 15311da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 15321da177e4SLinus Torvalds * 15331da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 15341da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 15351da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 15361da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 15371da177e4SLinus Torvalds * all allocations for pages that will be mapped into 15381da177e4SLinus Torvalds * user space. Returns NULL when no page can be allocated. 15391da177e4SLinus Torvalds * 15401da177e4SLinus Torvalds * Should be called with the mm_sem of the vma hold. 15411da177e4SLinus Torvalds */ 15421da177e4SLinus Torvalds struct page * 1543dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) 15441da177e4SLinus Torvalds { 15456e21c8f1SChristoph Lameter struct mempolicy *pol = get_vma_policy(current, vma, addr); 1546480eccf9SLee Schermerhorn struct zonelist *zl; 15471da177e4SLinus Torvalds 1548cf2a473cSPaul Jackson cpuset_update_task_memory_state(); 15491da177e4SLinus Torvalds 155045c4745aSLee Schermerhorn if (unlikely(pol->mode == MPOL_INTERLEAVE)) { 15511da177e4SLinus Torvalds unsigned nid; 15525da7ca86SChristoph Lameter 15535da7ca86SChristoph Lameter nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 155452cd3b07SLee Schermerhorn mpol_cond_put(pol); 15551da177e4SLinus Torvalds return alloc_page_interleave(gfp, 0, nid); 15561da177e4SLinus Torvalds } 155752cd3b07SLee Schermerhorn zl = policy_zonelist(gfp, pol); 155852cd3b07SLee Schermerhorn if (unlikely(mpol_needs_cond_ref(pol))) { 1559480eccf9SLee Schermerhorn /* 156052cd3b07SLee Schermerhorn * slow path: ref counted shared policy 1561480eccf9SLee Schermerhorn */ 156219770b32SMel Gorman struct page *page = __alloc_pages_nodemask(gfp, 0, 156352cd3b07SLee Schermerhorn zl, policy_nodemask(gfp, pol)); 1564f0be3d32SLee Schermerhorn __mpol_put(pol); 1565480eccf9SLee Schermerhorn return page; 1566480eccf9SLee Schermerhorn } 1567480eccf9SLee Schermerhorn /* 1568480eccf9SLee Schermerhorn * fast path: default or task policy 1569480eccf9SLee Schermerhorn */ 157052cd3b07SLee Schermerhorn return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol)); 15711da177e4SLinus Torvalds } 15721da177e4SLinus Torvalds 15731da177e4SLinus Torvalds /** 15741da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 15751da177e4SLinus Torvalds * 15761da177e4SLinus Torvalds * @gfp: 15771da177e4SLinus Torvalds * %GFP_USER user allocation, 15781da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 15791da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 15801da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 15811da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 15821da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 15831da177e4SLinus Torvalds * 15841da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 15851da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 15861da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 15871da177e4SLinus Torvalds * 1588cf2a473cSPaul Jackson * Don't call cpuset_update_task_memory_state() unless 15891da177e4SLinus Torvalds * 1) it's ok to take cpuset_sem (can WAIT), and 15901da177e4SLinus Torvalds * 2) allocating for current task (not interrupt). 15911da177e4SLinus Torvalds */ 1592dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 15931da177e4SLinus Torvalds { 15941da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 15951da177e4SLinus Torvalds 15961da177e4SLinus Torvalds if ((gfp & __GFP_WAIT) && !in_interrupt()) 1597cf2a473cSPaul Jackson cpuset_update_task_memory_state(); 15989b819d20SChristoph Lameter if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) 15991da177e4SLinus Torvalds pol = &default_policy; 160052cd3b07SLee Schermerhorn 160152cd3b07SLee Schermerhorn /* 160252cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 160352cd3b07SLee Schermerhorn * nor system default_policy 160452cd3b07SLee Schermerhorn */ 160545c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 16061da177e4SLinus Torvalds return alloc_page_interleave(gfp, order, interleave_nodes(pol)); 160719770b32SMel Gorman return __alloc_pages_nodemask(gfp, order, 160852cd3b07SLee Schermerhorn policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); 16091da177e4SLinus Torvalds } 16101da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 16111da177e4SLinus Torvalds 16124225399aSPaul Jackson /* 1613846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 16144225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 16154225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 16164225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 16174225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 16184225399aSPaul Jackson */ 16194225399aSPaul Jackson 1620846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 1621846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 16221da177e4SLinus Torvalds { 16231da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 16241da177e4SLinus Torvalds 16251da177e4SLinus Torvalds if (!new) 16261da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 16274225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 16284225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 16294225399aSPaul Jackson mpol_rebind_policy(old, &mems); 16304225399aSPaul Jackson } 16311da177e4SLinus Torvalds *new = *old; 16321da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 16331da177e4SLinus Torvalds return new; 16341da177e4SLinus Torvalds } 16351da177e4SLinus Torvalds 163652cd3b07SLee Schermerhorn /* 163752cd3b07SLee Schermerhorn * If *frompol needs [has] an extra ref, copy *frompol to *tompol , 163852cd3b07SLee Schermerhorn * eliminate the * MPOL_F_* flags that require conditional ref and 163952cd3b07SLee Schermerhorn * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly 164052cd3b07SLee Schermerhorn * after return. Use the returned value. 164152cd3b07SLee Schermerhorn * 164252cd3b07SLee Schermerhorn * Allows use of a mempolicy for, e.g., multiple allocations with a single 164352cd3b07SLee Schermerhorn * policy lookup, even if the policy needs/has extra ref on lookup. 164452cd3b07SLee Schermerhorn * shmem_readahead needs this. 164552cd3b07SLee Schermerhorn */ 164652cd3b07SLee Schermerhorn struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, 164752cd3b07SLee Schermerhorn struct mempolicy *frompol) 164852cd3b07SLee Schermerhorn { 164952cd3b07SLee Schermerhorn if (!mpol_needs_cond_ref(frompol)) 165052cd3b07SLee Schermerhorn return frompol; 165152cd3b07SLee Schermerhorn 165252cd3b07SLee Schermerhorn *tompol = *frompol; 165352cd3b07SLee Schermerhorn tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */ 165452cd3b07SLee Schermerhorn __mpol_put(frompol); 165552cd3b07SLee Schermerhorn return tompol; 165652cd3b07SLee Schermerhorn } 165752cd3b07SLee Schermerhorn 1658f5b087b5SDavid Rientjes static int mpol_match_intent(const struct mempolicy *a, 1659f5b087b5SDavid Rientjes const struct mempolicy *b) 1660f5b087b5SDavid Rientjes { 1661f5b087b5SDavid Rientjes if (a->flags != b->flags) 1662f5b087b5SDavid Rientjes return 0; 1663f5b087b5SDavid Rientjes if (!mpol_store_user_nodemask(a)) 1664f5b087b5SDavid Rientjes return 1; 1665f5b087b5SDavid Rientjes return nodes_equal(a->w.user_nodemask, b->w.user_nodemask); 1666f5b087b5SDavid Rientjes } 1667f5b087b5SDavid Rientjes 16681da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 16691da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b) 16701da177e4SLinus Torvalds { 16711da177e4SLinus Torvalds if (!a || !b) 16721da177e4SLinus Torvalds return 0; 167345c4745aSLee Schermerhorn if (a->mode != b->mode) 16741da177e4SLinus Torvalds return 0; 167545c4745aSLee Schermerhorn if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b)) 1676f5b087b5SDavid Rientjes return 0; 167745c4745aSLee Schermerhorn switch (a->mode) { 167819770b32SMel Gorman case MPOL_BIND: 167919770b32SMel Gorman /* Fall through */ 16801da177e4SLinus Torvalds case MPOL_INTERLEAVE: 1681dfcd3c0dSAndi Kleen return nodes_equal(a->v.nodes, b->v.nodes); 16821da177e4SLinus Torvalds case MPOL_PREFERRED: 1683fc36b8d3SLee Schermerhorn return a->v.preferred_node == b->v.preferred_node && 1684fc36b8d3SLee Schermerhorn a->flags == b->flags; 16851da177e4SLinus Torvalds default: 16861da177e4SLinus Torvalds BUG(); 16871da177e4SLinus Torvalds return 0; 16881da177e4SLinus Torvalds } 16891da177e4SLinus Torvalds } 16901da177e4SLinus Torvalds 16911da177e4SLinus Torvalds /* 16921da177e4SLinus Torvalds * Shared memory backing store policy support. 16931da177e4SLinus Torvalds * 16941da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 16951da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 16961da177e4SLinus Torvalds * They are protected by the sp->lock spinlock, which should be held 16971da177e4SLinus Torvalds * for any accesses to the tree. 16981da177e4SLinus Torvalds */ 16991da177e4SLinus Torvalds 17001da177e4SLinus Torvalds /* lookup first element intersecting start-end */ 17011da177e4SLinus Torvalds /* Caller holds sp->lock */ 17021da177e4SLinus Torvalds static struct sp_node * 17031da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 17041da177e4SLinus Torvalds { 17051da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 17061da177e4SLinus Torvalds 17071da177e4SLinus Torvalds while (n) { 17081da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 17091da177e4SLinus Torvalds 17101da177e4SLinus Torvalds if (start >= p->end) 17111da177e4SLinus Torvalds n = n->rb_right; 17121da177e4SLinus Torvalds else if (end <= p->start) 17131da177e4SLinus Torvalds n = n->rb_left; 17141da177e4SLinus Torvalds else 17151da177e4SLinus Torvalds break; 17161da177e4SLinus Torvalds } 17171da177e4SLinus Torvalds if (!n) 17181da177e4SLinus Torvalds return NULL; 17191da177e4SLinus Torvalds for (;;) { 17201da177e4SLinus Torvalds struct sp_node *w = NULL; 17211da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 17221da177e4SLinus Torvalds if (!prev) 17231da177e4SLinus Torvalds break; 17241da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 17251da177e4SLinus Torvalds if (w->end <= start) 17261da177e4SLinus Torvalds break; 17271da177e4SLinus Torvalds n = prev; 17281da177e4SLinus Torvalds } 17291da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 17301da177e4SLinus Torvalds } 17311da177e4SLinus Torvalds 17321da177e4SLinus Torvalds /* Insert a new shared policy into the list. */ 17331da177e4SLinus Torvalds /* Caller holds sp->lock */ 17341da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 17351da177e4SLinus Torvalds { 17361da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 17371da177e4SLinus Torvalds struct rb_node *parent = NULL; 17381da177e4SLinus Torvalds struct sp_node *nd; 17391da177e4SLinus Torvalds 17401da177e4SLinus Torvalds while (*p) { 17411da177e4SLinus Torvalds parent = *p; 17421da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 17431da177e4SLinus Torvalds if (new->start < nd->start) 17441da177e4SLinus Torvalds p = &(*p)->rb_left; 17451da177e4SLinus Torvalds else if (new->end > nd->end) 17461da177e4SLinus Torvalds p = &(*p)->rb_right; 17471da177e4SLinus Torvalds else 17481da177e4SLinus Torvalds BUG(); 17491da177e4SLinus Torvalds } 17501da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 17511da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 1752140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 175345c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 17541da177e4SLinus Torvalds } 17551da177e4SLinus Torvalds 17561da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 17571da177e4SLinus Torvalds struct mempolicy * 17581da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 17591da177e4SLinus Torvalds { 17601da177e4SLinus Torvalds struct mempolicy *pol = NULL; 17611da177e4SLinus Torvalds struct sp_node *sn; 17621da177e4SLinus Torvalds 17631da177e4SLinus Torvalds if (!sp->root.rb_node) 17641da177e4SLinus Torvalds return NULL; 17651da177e4SLinus Torvalds spin_lock(&sp->lock); 17661da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 17671da177e4SLinus Torvalds if (sn) { 17681da177e4SLinus Torvalds mpol_get(sn->policy); 17691da177e4SLinus Torvalds pol = sn->policy; 17701da177e4SLinus Torvalds } 17711da177e4SLinus Torvalds spin_unlock(&sp->lock); 17721da177e4SLinus Torvalds return pol; 17731da177e4SLinus Torvalds } 17741da177e4SLinus Torvalds 17751da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 17761da177e4SLinus Torvalds { 1777140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 17781da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 1779f0be3d32SLee Schermerhorn mpol_put(n->policy); 17801da177e4SLinus Torvalds kmem_cache_free(sn_cache, n); 17811da177e4SLinus Torvalds } 17821da177e4SLinus Torvalds 1783dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 1784dbcb0f19SAdrian Bunk struct mempolicy *pol) 17851da177e4SLinus Torvalds { 17861da177e4SLinus Torvalds struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 17871da177e4SLinus Torvalds 17881da177e4SLinus Torvalds if (!n) 17891da177e4SLinus Torvalds return NULL; 17901da177e4SLinus Torvalds n->start = start; 17911da177e4SLinus Torvalds n->end = end; 17921da177e4SLinus Torvalds mpol_get(pol); 1793aab0b102SLee Schermerhorn pol->flags |= MPOL_F_SHARED; /* for unref */ 17941da177e4SLinus Torvalds n->policy = pol; 17951da177e4SLinus Torvalds return n; 17961da177e4SLinus Torvalds } 17971da177e4SLinus Torvalds 17981da177e4SLinus Torvalds /* Replace a policy range. */ 17991da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 18001da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 18011da177e4SLinus Torvalds { 18021da177e4SLinus Torvalds struct sp_node *n, *new2 = NULL; 18031da177e4SLinus Torvalds 18041da177e4SLinus Torvalds restart: 18051da177e4SLinus Torvalds spin_lock(&sp->lock); 18061da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 18071da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 18081da177e4SLinus Torvalds while (n && n->start < end) { 18091da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 18101da177e4SLinus Torvalds if (n->start >= start) { 18111da177e4SLinus Torvalds if (n->end <= end) 18121da177e4SLinus Torvalds sp_delete(sp, n); 18131da177e4SLinus Torvalds else 18141da177e4SLinus Torvalds n->start = end; 18151da177e4SLinus Torvalds } else { 18161da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 18171da177e4SLinus Torvalds if (n->end > end) { 18181da177e4SLinus Torvalds if (!new2) { 18191da177e4SLinus Torvalds spin_unlock(&sp->lock); 18201da177e4SLinus Torvalds new2 = sp_alloc(end, n->end, n->policy); 18211da177e4SLinus Torvalds if (!new2) 18221da177e4SLinus Torvalds return -ENOMEM; 18231da177e4SLinus Torvalds goto restart; 18241da177e4SLinus Torvalds } 18251da177e4SLinus Torvalds n->end = start; 18261da177e4SLinus Torvalds sp_insert(sp, new2); 18271da177e4SLinus Torvalds new2 = NULL; 18281da177e4SLinus Torvalds break; 18291da177e4SLinus Torvalds } else 18301da177e4SLinus Torvalds n->end = start; 18311da177e4SLinus Torvalds } 18321da177e4SLinus Torvalds if (!next) 18331da177e4SLinus Torvalds break; 18341da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 18351da177e4SLinus Torvalds } 18361da177e4SLinus Torvalds if (new) 18371da177e4SLinus Torvalds sp_insert(sp, new); 18381da177e4SLinus Torvalds spin_unlock(&sp->lock); 18391da177e4SLinus Torvalds if (new2) { 1840f0be3d32SLee Schermerhorn mpol_put(new2->policy); 18411da177e4SLinus Torvalds kmem_cache_free(sn_cache, new2); 18421da177e4SLinus Torvalds } 18431da177e4SLinus Torvalds return 0; 18441da177e4SLinus Torvalds } 18451da177e4SLinus Torvalds 184671fe804bSLee Schermerhorn /** 184771fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 184871fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 184971fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 185071fe804bSLee Schermerhorn * 185171fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 185271fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 185371fe804bSLee Schermerhorn * This must be released on exit. 185471fe804bSLee Schermerhorn */ 185571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 18567339ff83SRobin Holt { 185771fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 185871fe804bSLee Schermerhorn spin_lock_init(&sp->lock); 18597339ff83SRobin Holt 186071fe804bSLee Schermerhorn if (mpol) { 18617339ff83SRobin Holt struct vm_area_struct pvma; 186271fe804bSLee Schermerhorn struct mempolicy *new; 18637339ff83SRobin Holt 186471fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 186571fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 186671fe804bSLee Schermerhorn mpol_put(mpol); /* drop our ref on sb mpol */ 186771fe804bSLee Schermerhorn if (IS_ERR(new)) 186871fe804bSLee Schermerhorn return; /* no valid nodemask intersection */ 186971fe804bSLee Schermerhorn 187071fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 18717339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 187271fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 187371fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 187471fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 18757339ff83SRobin Holt } 18767339ff83SRobin Holt } 18777339ff83SRobin Holt 18781da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 18791da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 18801da177e4SLinus Torvalds { 18811da177e4SLinus Torvalds int err; 18821da177e4SLinus Torvalds struct sp_node *new = NULL; 18831da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 18841da177e4SLinus Torvalds 1885028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 18861da177e4SLinus Torvalds vma->vm_pgoff, 188745c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 1888028fec41SDavid Rientjes npol ? npol->flags : -1, 1889dfcd3c0dSAndi Kleen npol ? nodes_addr(npol->v.nodes)[0] : -1); 18901da177e4SLinus Torvalds 18911da177e4SLinus Torvalds if (npol) { 18921da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 18931da177e4SLinus Torvalds if (!new) 18941da177e4SLinus Torvalds return -ENOMEM; 18951da177e4SLinus Torvalds } 18961da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 18971da177e4SLinus Torvalds if (err && new) 18981da177e4SLinus Torvalds kmem_cache_free(sn_cache, new); 18991da177e4SLinus Torvalds return err; 19001da177e4SLinus Torvalds } 19011da177e4SLinus Torvalds 19021da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 19031da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 19041da177e4SLinus Torvalds { 19051da177e4SLinus Torvalds struct sp_node *n; 19061da177e4SLinus Torvalds struct rb_node *next; 19071da177e4SLinus Torvalds 19081da177e4SLinus Torvalds if (!p->root.rb_node) 19091da177e4SLinus Torvalds return; 19101da177e4SLinus Torvalds spin_lock(&p->lock); 19111da177e4SLinus Torvalds next = rb_first(&p->root); 19121da177e4SLinus Torvalds while (next) { 19131da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 19141da177e4SLinus Torvalds next = rb_next(&n->nd); 191590c5029eSAndi Kleen rb_erase(&n->nd, &p->root); 1916f0be3d32SLee Schermerhorn mpol_put(n->policy); 19171da177e4SLinus Torvalds kmem_cache_free(sn_cache, n); 19181da177e4SLinus Torvalds } 19191da177e4SLinus Torvalds spin_unlock(&p->lock); 19201da177e4SLinus Torvalds } 19211da177e4SLinus Torvalds 19221da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 19231da177e4SLinus Torvalds void __init numa_policy_init(void) 19241da177e4SLinus Torvalds { 1925b71636e2SPaul Mundt nodemask_t interleave_nodes; 1926b71636e2SPaul Mundt unsigned long largest = 0; 1927b71636e2SPaul Mundt int nid, prefer = 0; 1928b71636e2SPaul Mundt 19291da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 19301da177e4SLinus Torvalds sizeof(struct mempolicy), 193120c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 19321da177e4SLinus Torvalds 19331da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 19341da177e4SLinus Torvalds sizeof(struct sp_node), 193520c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 19361da177e4SLinus Torvalds 1937b71636e2SPaul Mundt /* 1938b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 1939b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 1940b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 1941b71636e2SPaul Mundt */ 1942b71636e2SPaul Mundt nodes_clear(interleave_nodes); 194356bbd65dSChristoph Lameter for_each_node_state(nid, N_HIGH_MEMORY) { 1944b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 19451da177e4SLinus Torvalds 1946b71636e2SPaul Mundt /* Preserve the largest node */ 1947b71636e2SPaul Mundt if (largest < total_pages) { 1948b71636e2SPaul Mundt largest = total_pages; 1949b71636e2SPaul Mundt prefer = nid; 1950b71636e2SPaul Mundt } 1951b71636e2SPaul Mundt 1952b71636e2SPaul Mundt /* Interleave this node? */ 1953b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 1954b71636e2SPaul Mundt node_set(nid, interleave_nodes); 1955b71636e2SPaul Mundt } 1956b71636e2SPaul Mundt 1957b71636e2SPaul Mundt /* All too small, use the largest */ 1958b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 1959b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 1960b71636e2SPaul Mundt 1961028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 19621da177e4SLinus Torvalds printk("numa_policy_init: interleaving failed\n"); 19631da177e4SLinus Torvalds } 19641da177e4SLinus Torvalds 19658bccd85fSChristoph Lameter /* Reset policy of current process to default */ 19661da177e4SLinus Torvalds void numa_default_policy(void) 19671da177e4SLinus Torvalds { 1968028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 19691da177e4SLinus Torvalds } 197068860ec1SPaul Jackson 19714225399aSPaul Jackson /* 1972095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 1973095f1fc4SLee Schermerhorn */ 1974095f1fc4SLee Schermerhorn 1975095f1fc4SLee Schermerhorn /* 1976fc36b8d3SLee Schermerhorn * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag 19773f226aa1SLee Schermerhorn * Used only for mpol_parse_str() and mpol_to_str() 19781a75a6c8SChristoph Lameter */ 197953f2556bSLee Schermerhorn #define MPOL_LOCAL (MPOL_INTERLEAVE + 1) 198015ad7cdcSHelge Deller static const char * const policy_types[] = 198153f2556bSLee Schermerhorn { "default", "prefer", "bind", "interleave", "local" }; 19821a75a6c8SChristoph Lameter 1983095f1fc4SLee Schermerhorn 1984095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 1985095f1fc4SLee Schermerhorn /** 1986095f1fc4SLee Schermerhorn * mpol_parse_str - parse string to mempolicy 1987095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 198871fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 198971fe804bSLee Schermerhorn * @no_context: flag whether to "contextualize" the mempolicy 1990095f1fc4SLee Schermerhorn * 1991095f1fc4SLee Schermerhorn * Format of input: 1992095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 1993095f1fc4SLee Schermerhorn * 199471fe804bSLee Schermerhorn * if @no_context is true, save the input nodemask in w.user_nodemask in 199571fe804bSLee Schermerhorn * the returned mempolicy. This will be used to "clone" the mempolicy in 199671fe804bSLee Schermerhorn * a specific context [cpuset] at a later time. Used to parse tmpfs mpol 199771fe804bSLee Schermerhorn * mount option. Note that if 'static' or 'relative' mode flags were 199871fe804bSLee Schermerhorn * specified, the input nodemask will already have been saved. Saving 199971fe804bSLee Schermerhorn * it again is redundant, but safe. 200071fe804bSLee Schermerhorn * 200171fe804bSLee Schermerhorn * On success, returns 0, else 1 2002095f1fc4SLee Schermerhorn */ 200371fe804bSLee Schermerhorn int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) 2004095f1fc4SLee Schermerhorn { 200571fe804bSLee Schermerhorn struct mempolicy *new = NULL; 200671fe804bSLee Schermerhorn unsigned short uninitialized_var(mode); 200771fe804bSLee Schermerhorn unsigned short uninitialized_var(mode_flags); 200871fe804bSLee Schermerhorn nodemask_t nodes; 2009095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2010095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2011095f1fc4SLee Schermerhorn int i; 2012095f1fc4SLee Schermerhorn int err = 1; 2013095f1fc4SLee Schermerhorn 2014095f1fc4SLee Schermerhorn if (nodelist) { 2015095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2016095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 201771fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2018095f1fc4SLee Schermerhorn goto out; 201971fe804bSLee Schermerhorn if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY])) 2020095f1fc4SLee Schermerhorn goto out; 202171fe804bSLee Schermerhorn } else 202271fe804bSLee Schermerhorn nodes_clear(nodes); 202371fe804bSLee Schermerhorn 2024095f1fc4SLee Schermerhorn if (flags) 2025095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2026095f1fc4SLee Schermerhorn 20273f226aa1SLee Schermerhorn for (i = 0; i <= MPOL_LOCAL; i++) { 2028095f1fc4SLee Schermerhorn if (!strcmp(str, policy_types[i])) { 202971fe804bSLee Schermerhorn mode = i; 2030095f1fc4SLee Schermerhorn break; 2031095f1fc4SLee Schermerhorn } 2032095f1fc4SLee Schermerhorn } 20333f226aa1SLee Schermerhorn if (i > MPOL_LOCAL) 2034095f1fc4SLee Schermerhorn goto out; 2035095f1fc4SLee Schermerhorn 203671fe804bSLee Schermerhorn switch (mode) { 2037095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 203871fe804bSLee Schermerhorn /* 203971fe804bSLee Schermerhorn * Insist on a nodelist of one node only 204071fe804bSLee Schermerhorn */ 2041095f1fc4SLee Schermerhorn if (nodelist) { 2042095f1fc4SLee Schermerhorn char *rest = nodelist; 2043095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2044095f1fc4SLee Schermerhorn rest++; 2045095f1fc4SLee Schermerhorn if (!*rest) 2046095f1fc4SLee Schermerhorn err = 0; 2047095f1fc4SLee Schermerhorn } 2048095f1fc4SLee Schermerhorn break; 2049095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2050095f1fc4SLee Schermerhorn /* 2051095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2052095f1fc4SLee Schermerhorn */ 2053095f1fc4SLee Schermerhorn if (!nodelist) 205471fe804bSLee Schermerhorn nodes = node_states[N_HIGH_MEMORY]; 2055095f1fc4SLee Schermerhorn err = 0; 20563f226aa1SLee Schermerhorn break; 205771fe804bSLee Schermerhorn case MPOL_LOCAL: 20583f226aa1SLee Schermerhorn /* 205971fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 20603f226aa1SLee Schermerhorn */ 206171fe804bSLee Schermerhorn if (nodelist) 20623f226aa1SLee Schermerhorn goto out; 206371fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 20643f226aa1SLee Schermerhorn break; 206571fe804bSLee Schermerhorn 206671fe804bSLee Schermerhorn /* 206771fe804bSLee Schermerhorn * case MPOL_BIND: mpol_new() enforces non-empty nodemask. 206871fe804bSLee Schermerhorn * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags. 206971fe804bSLee Schermerhorn */ 2070095f1fc4SLee Schermerhorn } 2071095f1fc4SLee Schermerhorn 207271fe804bSLee Schermerhorn mode_flags = 0; 2073095f1fc4SLee Schermerhorn if (flags) { 2074095f1fc4SLee Schermerhorn /* 2075095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2076095f1fc4SLee Schermerhorn * mode flags. 2077095f1fc4SLee Schermerhorn */ 2078095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 207971fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2080095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 208171fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2082095f1fc4SLee Schermerhorn else 2083095f1fc4SLee Schermerhorn err = 1; 2084095f1fc4SLee Schermerhorn } 208571fe804bSLee Schermerhorn 208671fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 208771fe804bSLee Schermerhorn if (IS_ERR(new)) 208871fe804bSLee Schermerhorn err = 1; 208971fe804bSLee Schermerhorn else if (no_context) 209071fe804bSLee Schermerhorn new->w.user_nodemask = nodes; /* save for contextualization */ 209171fe804bSLee Schermerhorn 2092095f1fc4SLee Schermerhorn out: 2093095f1fc4SLee Schermerhorn /* Restore string for error message */ 2094095f1fc4SLee Schermerhorn if (nodelist) 2095095f1fc4SLee Schermerhorn *--nodelist = ':'; 2096095f1fc4SLee Schermerhorn if (flags) 2097095f1fc4SLee Schermerhorn *--flags = '='; 209871fe804bSLee Schermerhorn if (!err) 209971fe804bSLee Schermerhorn *mpol = new; 2100095f1fc4SLee Schermerhorn return err; 2101095f1fc4SLee Schermerhorn } 2102095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2103095f1fc4SLee Schermerhorn 210471fe804bSLee Schermerhorn /** 210571fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 210671fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 210771fe804bSLee Schermerhorn * @maxlen: length of @buffer 210871fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 210971fe804bSLee Schermerhorn * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask 211071fe804bSLee Schermerhorn * 21111a75a6c8SChristoph Lameter * Convert a mempolicy into a string. 21121a75a6c8SChristoph Lameter * Returns the number of characters in buffer (if positive) 21131a75a6c8SChristoph Lameter * or an error (negative) 21141a75a6c8SChristoph Lameter */ 211571fe804bSLee Schermerhorn int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) 21161a75a6c8SChristoph Lameter { 21171a75a6c8SChristoph Lameter char *p = buffer; 21181a75a6c8SChristoph Lameter int l; 21191a75a6c8SChristoph Lameter nodemask_t nodes; 2120bea904d5SLee Schermerhorn unsigned short mode; 2121f5b087b5SDavid Rientjes unsigned short flags = pol ? pol->flags : 0; 21221a75a6c8SChristoph Lameter 21232291990aSLee Schermerhorn /* 21242291990aSLee Schermerhorn * Sanity check: room for longest mode, flag and some nodes 21252291990aSLee Schermerhorn */ 21262291990aSLee Schermerhorn VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16); 21272291990aSLee Schermerhorn 2128bea904d5SLee Schermerhorn if (!pol || pol == &default_policy) 2129bea904d5SLee Schermerhorn mode = MPOL_DEFAULT; 2130bea904d5SLee Schermerhorn else 2131bea904d5SLee Schermerhorn mode = pol->mode; 2132bea904d5SLee Schermerhorn 21331a75a6c8SChristoph Lameter switch (mode) { 21341a75a6c8SChristoph Lameter case MPOL_DEFAULT: 21351a75a6c8SChristoph Lameter nodes_clear(nodes); 21361a75a6c8SChristoph Lameter break; 21371a75a6c8SChristoph Lameter 21381a75a6c8SChristoph Lameter case MPOL_PREFERRED: 21391a75a6c8SChristoph Lameter nodes_clear(nodes); 2140fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 214153f2556bSLee Schermerhorn mode = MPOL_LOCAL; /* pseudo-policy */ 214253f2556bSLee Schermerhorn else 2143fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 21441a75a6c8SChristoph Lameter break; 21451a75a6c8SChristoph Lameter 21461a75a6c8SChristoph Lameter case MPOL_BIND: 214719770b32SMel Gorman /* Fall through */ 21481a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 214971fe804bSLee Schermerhorn if (no_context) 215071fe804bSLee Schermerhorn nodes = pol->w.user_nodemask; 215171fe804bSLee Schermerhorn else 21521a75a6c8SChristoph Lameter nodes = pol->v.nodes; 21531a75a6c8SChristoph Lameter break; 21541a75a6c8SChristoph Lameter 21551a75a6c8SChristoph Lameter default: 21561a75a6c8SChristoph Lameter BUG(); 21571a75a6c8SChristoph Lameter } 21581a75a6c8SChristoph Lameter 21591a75a6c8SChristoph Lameter l = strlen(policy_types[mode]); 21601a75a6c8SChristoph Lameter if (buffer + maxlen < p + l + 1) 21611a75a6c8SChristoph Lameter return -ENOSPC; 21621a75a6c8SChristoph Lameter 21631a75a6c8SChristoph Lameter strcpy(p, policy_types[mode]); 21641a75a6c8SChristoph Lameter p += l; 21651a75a6c8SChristoph Lameter 2166fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2167f5b087b5SDavid Rientjes if (buffer + maxlen < p + 2) 2168f5b087b5SDavid Rientjes return -ENOSPC; 2169f5b087b5SDavid Rientjes *p++ = '='; 2170f5b087b5SDavid Rientjes 21712291990aSLee Schermerhorn /* 21722291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 21732291990aSLee Schermerhorn */ 2174f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 21752291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 21762291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 21772291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2178f5b087b5SDavid Rientjes } 2179f5b087b5SDavid Rientjes 21801a75a6c8SChristoph Lameter if (!nodes_empty(nodes)) { 21811a75a6c8SChristoph Lameter if (buffer + maxlen < p + 2) 21821a75a6c8SChristoph Lameter return -ENOSPC; 2183095f1fc4SLee Schermerhorn *p++ = ':'; 21841a75a6c8SChristoph Lameter p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); 21851a75a6c8SChristoph Lameter } 21861a75a6c8SChristoph Lameter return p - buffer; 21871a75a6c8SChristoph Lameter } 21881a75a6c8SChristoph Lameter 21891a75a6c8SChristoph Lameter struct numa_maps { 21901a75a6c8SChristoph Lameter unsigned long pages; 21911a75a6c8SChristoph Lameter unsigned long anon; 2192397874dfSChristoph Lameter unsigned long active; 2193397874dfSChristoph Lameter unsigned long writeback; 21941a75a6c8SChristoph Lameter unsigned long mapcount_max; 2195397874dfSChristoph Lameter unsigned long dirty; 2196397874dfSChristoph Lameter unsigned long swapcache; 21971a75a6c8SChristoph Lameter unsigned long node[MAX_NUMNODES]; 21981a75a6c8SChristoph Lameter }; 21991a75a6c8SChristoph Lameter 2200397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty) 22011a75a6c8SChristoph Lameter { 22021a75a6c8SChristoph Lameter struct numa_maps *md = private; 22031a75a6c8SChristoph Lameter int count = page_mapcount(page); 22041a75a6c8SChristoph Lameter 22051a75a6c8SChristoph Lameter md->pages++; 2206397874dfSChristoph Lameter if (pte_dirty || PageDirty(page)) 2207397874dfSChristoph Lameter md->dirty++; 2208397874dfSChristoph Lameter 2209397874dfSChristoph Lameter if (PageSwapCache(page)) 2210397874dfSChristoph Lameter md->swapcache++; 2211397874dfSChristoph Lameter 2212894bc310SLee Schermerhorn if (PageActive(page) || PageUnevictable(page)) 2213397874dfSChristoph Lameter md->active++; 2214397874dfSChristoph Lameter 2215397874dfSChristoph Lameter if (PageWriteback(page)) 2216397874dfSChristoph Lameter md->writeback++; 22171a75a6c8SChristoph Lameter 22181a75a6c8SChristoph Lameter if (PageAnon(page)) 22191a75a6c8SChristoph Lameter md->anon++; 22201a75a6c8SChristoph Lameter 2221397874dfSChristoph Lameter if (count > md->mapcount_max) 2222397874dfSChristoph Lameter md->mapcount_max = count; 2223397874dfSChristoph Lameter 22241a75a6c8SChristoph Lameter md->node[page_to_nid(page)]++; 22251a75a6c8SChristoph Lameter } 22261a75a6c8SChristoph Lameter 22277f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE 2228397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma, 2229397874dfSChristoph Lameter unsigned long start, unsigned long end, 2230397874dfSChristoph Lameter struct numa_maps *md) 2231397874dfSChristoph Lameter { 2232397874dfSChristoph Lameter unsigned long addr; 2233397874dfSChristoph Lameter struct page *page; 2234a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2235a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 2236397874dfSChristoph Lameter 2237a5516438SAndi Kleen for (addr = start; addr < end; addr += sz) { 2238a5516438SAndi Kleen pte_t *ptep = huge_pte_offset(vma->vm_mm, 2239a5516438SAndi Kleen addr & huge_page_mask(h)); 2240397874dfSChristoph Lameter pte_t pte; 2241397874dfSChristoph Lameter 2242397874dfSChristoph Lameter if (!ptep) 2243397874dfSChristoph Lameter continue; 2244397874dfSChristoph Lameter 2245397874dfSChristoph Lameter pte = *ptep; 2246397874dfSChristoph Lameter if (pte_none(pte)) 2247397874dfSChristoph Lameter continue; 2248397874dfSChristoph Lameter 2249397874dfSChristoph Lameter page = pte_page(pte); 2250397874dfSChristoph Lameter if (!page) 2251397874dfSChristoph Lameter continue; 2252397874dfSChristoph Lameter 2253397874dfSChristoph Lameter gather_stats(page, md, pte_dirty(*ptep)); 2254397874dfSChristoph Lameter } 2255397874dfSChristoph Lameter } 22567f709ed0SAndrew Morton #else 22577f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma, 22587f709ed0SAndrew Morton unsigned long start, unsigned long end, 22597f709ed0SAndrew Morton struct numa_maps *md) 22607f709ed0SAndrew Morton { 22617f709ed0SAndrew Morton } 22627f709ed0SAndrew Morton #endif 2263397874dfSChristoph Lameter 226453f2556bSLee Schermerhorn /* 226553f2556bSLee Schermerhorn * Display pages allocated per node and memory policy via /proc. 226653f2556bSLee Schermerhorn */ 22671a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v) 22681a75a6c8SChristoph Lameter { 226999f89551SEric W. Biederman struct proc_maps_private *priv = m->private; 22701a75a6c8SChristoph Lameter struct vm_area_struct *vma = v; 22711a75a6c8SChristoph Lameter struct numa_maps *md; 2272397874dfSChristoph Lameter struct file *file = vma->vm_file; 2273397874dfSChristoph Lameter struct mm_struct *mm = vma->vm_mm; 2274480eccf9SLee Schermerhorn struct mempolicy *pol; 22751a75a6c8SChristoph Lameter int n; 22761a75a6c8SChristoph Lameter char buffer[50]; 22771a75a6c8SChristoph Lameter 2278397874dfSChristoph Lameter if (!mm) 22791a75a6c8SChristoph Lameter return 0; 22801a75a6c8SChristoph Lameter 22811a75a6c8SChristoph Lameter md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); 22821a75a6c8SChristoph Lameter if (!md) 22831a75a6c8SChristoph Lameter return 0; 22841a75a6c8SChristoph Lameter 2285480eccf9SLee Schermerhorn pol = get_vma_policy(priv->task, vma, vma->vm_start); 228671fe804bSLee Schermerhorn mpol_to_str(buffer, sizeof(buffer), pol, 0); 228752cd3b07SLee Schermerhorn mpol_cond_put(pol); 22881a75a6c8SChristoph Lameter 2289397874dfSChristoph Lameter seq_printf(m, "%08lx %s", vma->vm_start, buffer); 2290397874dfSChristoph Lameter 2291397874dfSChristoph Lameter if (file) { 2292397874dfSChristoph Lameter seq_printf(m, " file="); 2293c32c2f63SJan Blunck seq_path(m, &file->f_path, "\n\t= "); 2294397874dfSChristoph Lameter } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 2295397874dfSChristoph Lameter seq_printf(m, " heap"); 2296397874dfSChristoph Lameter } else if (vma->vm_start <= mm->start_stack && 2297397874dfSChristoph Lameter vma->vm_end >= mm->start_stack) { 2298397874dfSChristoph Lameter seq_printf(m, " stack"); 2299397874dfSChristoph Lameter } 2300397874dfSChristoph Lameter 2301397874dfSChristoph Lameter if (is_vm_hugetlb_page(vma)) { 2302397874dfSChristoph Lameter check_huge_range(vma, vma->vm_start, vma->vm_end, md); 2303397874dfSChristoph Lameter seq_printf(m, " huge"); 2304397874dfSChristoph Lameter } else { 2305397874dfSChristoph Lameter check_pgd_range(vma, vma->vm_start, vma->vm_end, 230656bbd65dSChristoph Lameter &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md); 2307397874dfSChristoph Lameter } 2308397874dfSChristoph Lameter 2309397874dfSChristoph Lameter if (!md->pages) 2310397874dfSChristoph Lameter goto out; 23111a75a6c8SChristoph Lameter 23121a75a6c8SChristoph Lameter if (md->anon) 23131a75a6c8SChristoph Lameter seq_printf(m," anon=%lu",md->anon); 23141a75a6c8SChristoph Lameter 2315397874dfSChristoph Lameter if (md->dirty) 2316397874dfSChristoph Lameter seq_printf(m," dirty=%lu",md->dirty); 2317397874dfSChristoph Lameter 2318397874dfSChristoph Lameter if (md->pages != md->anon && md->pages != md->dirty) 2319397874dfSChristoph Lameter seq_printf(m, " mapped=%lu", md->pages); 2320397874dfSChristoph Lameter 2321397874dfSChristoph Lameter if (md->mapcount_max > 1) 2322397874dfSChristoph Lameter seq_printf(m, " mapmax=%lu", md->mapcount_max); 2323397874dfSChristoph Lameter 2324397874dfSChristoph Lameter if (md->swapcache) 2325397874dfSChristoph Lameter seq_printf(m," swapcache=%lu", md->swapcache); 2326397874dfSChristoph Lameter 2327397874dfSChristoph Lameter if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 2328397874dfSChristoph Lameter seq_printf(m," active=%lu", md->active); 2329397874dfSChristoph Lameter 2330397874dfSChristoph Lameter if (md->writeback) 2331397874dfSChristoph Lameter seq_printf(m," writeback=%lu", md->writeback); 2332397874dfSChristoph Lameter 233356bbd65dSChristoph Lameter for_each_node_state(n, N_HIGH_MEMORY) 23341a75a6c8SChristoph Lameter if (md->node[n]) 23351a75a6c8SChristoph Lameter seq_printf(m, " N%d=%lu", n, md->node[n]); 2336397874dfSChristoph Lameter out: 23371a75a6c8SChristoph Lameter seq_putc(m, '\n'); 23381a75a6c8SChristoph Lameter kfree(md); 23391a75a6c8SChristoph Lameter 23401a75a6c8SChristoph Lameter if (m->count < m->size) 234199f89551SEric W. Biederman m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; 23421a75a6c8SChristoph Lameter return 0; 23431a75a6c8SChristoph Lameter } 2344