11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 291da177e4SLinus Torvalds * As a special case node -1 here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 681da177e4SLinus Torvalds #include <linux/mempolicy.h> 691da177e4SLinus Torvalds #include <linux/mm.h> 701da177e4SLinus Torvalds #include <linux/highmem.h> 711da177e4SLinus Torvalds #include <linux/hugetlb.h> 721da177e4SLinus Torvalds #include <linux/kernel.h> 731da177e4SLinus Torvalds #include <linux/sched.h> 741da177e4SLinus Torvalds #include <linux/nodemask.h> 751da177e4SLinus Torvalds #include <linux/cpuset.h> 761da177e4SLinus Torvalds #include <linux/slab.h> 771da177e4SLinus Torvalds #include <linux/string.h> 781da177e4SLinus Torvalds #include <linux/module.h> 79b488893aSPavel Emelyanov #include <linux/nsproxy.h> 801da177e4SLinus Torvalds #include <linux/interrupt.h> 811da177e4SLinus Torvalds #include <linux/init.h> 821da177e4SLinus Torvalds #include <linux/compat.h> 83dc9aa5b9SChristoph Lameter #include <linux/swap.h> 841a75a6c8SChristoph Lameter #include <linux/seq_file.h> 851a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 86b20a3503SChristoph Lameter #include <linux/migrate.h> 8762b61f61SHugh Dickins #include <linux/ksm.h> 8895a402c3SChristoph Lameter #include <linux/rmap.h> 8986c3a764SDavid Quigley #include <linux/security.h> 90dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 91095f1fc4SLee Schermerhorn #include <linux/ctype.h> 926d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 93dc9aa5b9SChristoph Lameter 941da177e4SLinus Torvalds #include <asm/tlbflush.h> 951da177e4SLinus Torvalds #include <asm/uaccess.h> 961da177e4SLinus Torvalds 9762695a84SNick Piggin #include "internal.h" 9862695a84SNick Piggin 9938e35860SChristoph Lameter /* Internal flags */ 100dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10138e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 1021a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ 103dc9aa5b9SChristoph Lameter 104fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 105fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1061da177e4SLinus Torvalds 1071da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1081da177e4SLinus Torvalds policied. */ 1096267276fSChristoph Lameter enum zone_type policy_zone = 0; 1101da177e4SLinus Torvalds 111bea904d5SLee Schermerhorn /* 112bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 113bea904d5SLee Schermerhorn */ 114d42c6997SAndi Kleen struct mempolicy default_policy = { 1151da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 116bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 117fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1181da177e4SLinus Torvalds }; 1191da177e4SLinus Torvalds 12037012946SDavid Rientjes static const struct mempolicy_operations { 12137012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 122708c1bbcSMiao Xie /* 123708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 124708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 125708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 126708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 127708c1bbcSMiao Xie * page. 128708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 129708c1bbcSMiao Xie * rebind directly. 130708c1bbcSMiao Xie * 131708c1bbcSMiao Xie * step: 132708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 133708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 134708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 135708c1bbcSMiao Xie */ 136708c1bbcSMiao Xie void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, 137708c1bbcSMiao Xie enum mpol_rebind_step step); 13837012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 13937012946SDavid Rientjes 14019770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */ 14137012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask) 1421da177e4SLinus Torvalds { 14319770b32SMel Gorman int nd, k; 1441da177e4SLinus Torvalds 14519770b32SMel Gorman for_each_node_mask(nd, *nodemask) { 14619770b32SMel Gorman struct zone *z; 14719770b32SMel Gorman 14819770b32SMel Gorman for (k = 0; k <= policy_zone; k++) { 14919770b32SMel Gorman z = &NODE_DATA(nd)->node_zones[k]; 150dd942ae3SAndi Kleen if (z->present_pages > 0) 15119770b32SMel Gorman return 1; 152dd942ae3SAndi Kleen } 153dd942ae3SAndi Kleen } 15419770b32SMel Gorman 15519770b32SMel Gorman return 0; 1561da177e4SLinus Torvalds } 1571da177e4SLinus Torvalds 158f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 159f5b087b5SDavid Rientjes { 1606d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1614c50bc01SDavid Rientjes } 1624c50bc01SDavid Rientjes 1634c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1644c50bc01SDavid Rientjes const nodemask_t *rel) 1654c50bc01SDavid Rientjes { 1664c50bc01SDavid Rientjes nodemask_t tmp; 1674c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1684c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 169f5b087b5SDavid Rientjes } 170f5b087b5SDavid Rientjes 17137012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 17237012946SDavid Rientjes { 17337012946SDavid Rientjes if (nodes_empty(*nodes)) 17437012946SDavid Rientjes return -EINVAL; 17537012946SDavid Rientjes pol->v.nodes = *nodes; 17637012946SDavid Rientjes return 0; 17737012946SDavid Rientjes } 17837012946SDavid Rientjes 17937012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 18037012946SDavid Rientjes { 18137012946SDavid Rientjes if (!nodes) 182fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 18337012946SDavid Rientjes else if (nodes_empty(*nodes)) 18437012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 18537012946SDavid Rientjes else 18637012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 18737012946SDavid Rientjes return 0; 18837012946SDavid Rientjes } 18937012946SDavid Rientjes 19037012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 19137012946SDavid Rientjes { 19237012946SDavid Rientjes if (!is_valid_nodemask(nodes)) 19337012946SDavid Rientjes return -EINVAL; 19437012946SDavid Rientjes pol->v.nodes = *nodes; 19537012946SDavid Rientjes return 0; 19637012946SDavid Rientjes } 19737012946SDavid Rientjes 19858568d2aSMiao Xie /* 19958568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 20058568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 20158568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 20258568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 20358568d2aSMiao Xie * 20458568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 20558568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 20658568d2aSMiao Xie */ 2074bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2084bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 20958568d2aSMiao Xie { 21058568d2aSMiao Xie int ret; 21158568d2aSMiao Xie 21258568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 21358568d2aSMiao Xie if (pol == NULL) 21458568d2aSMiao Xie return 0; 2154bfc4495SKAMEZAWA Hiroyuki /* Check N_HIGH_MEMORY */ 2164bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 2174bfc4495SKAMEZAWA Hiroyuki cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]); 21858568d2aSMiao Xie 21958568d2aSMiao Xie VM_BUG_ON(!nodes); 22058568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 22158568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 22258568d2aSMiao Xie else { 22358568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2244bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1); 22558568d2aSMiao Xie else 2264bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2274bfc4495SKAMEZAWA Hiroyuki 22858568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 22958568d2aSMiao Xie pol->w.user_nodemask = *nodes; 23058568d2aSMiao Xie else 23158568d2aSMiao Xie pol->w.cpuset_mems_allowed = 23258568d2aSMiao Xie cpuset_current_mems_allowed; 23358568d2aSMiao Xie } 23458568d2aSMiao Xie 2354bfc4495SKAMEZAWA Hiroyuki if (nodes) 2364bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2374bfc4495SKAMEZAWA Hiroyuki else 2384bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 23958568d2aSMiao Xie return ret; 24058568d2aSMiao Xie } 24158568d2aSMiao Xie 24258568d2aSMiao Xie /* 24358568d2aSMiao Xie * This function just creates a new policy, does some check and simple 24458568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 24558568d2aSMiao Xie */ 246028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 247028fec41SDavid Rientjes nodemask_t *nodes) 2481da177e4SLinus Torvalds { 2491da177e4SLinus Torvalds struct mempolicy *policy; 2501da177e4SLinus Torvalds 251028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 252028fec41SDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); 253140d5a49SPaul Mundt 2543e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2553e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 25637012946SDavid Rientjes return ERR_PTR(-EINVAL); 257bea904d5SLee Schermerhorn return NULL; /* simply delete any existing policy */ 25837012946SDavid Rientjes } 2593e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2603e1f0645SDavid Rientjes 2613e1f0645SDavid Rientjes /* 2623e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2633e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2643e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2653e1f0645SDavid Rientjes */ 2663e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2673e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2683e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2693e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2703e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2713e1f0645SDavid Rientjes } 2723e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2733e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2741da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2751da177e4SLinus Torvalds if (!policy) 2761da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2771da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 27845c4745aSLee Schermerhorn policy->mode = mode; 27937012946SDavid Rientjes policy->flags = flags; 2803e1f0645SDavid Rientjes 28137012946SDavid Rientjes return policy; 28237012946SDavid Rientjes } 28337012946SDavid Rientjes 28452cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 28552cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 28652cd3b07SLee Schermerhorn { 28752cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 28852cd3b07SLee Schermerhorn return; 28952cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 29052cd3b07SLee Schermerhorn } 29152cd3b07SLee Schermerhorn 292708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, 293708c1bbcSMiao Xie enum mpol_rebind_step step) 29437012946SDavid Rientjes { 29537012946SDavid Rientjes } 29637012946SDavid Rientjes 297708c1bbcSMiao Xie /* 298708c1bbcSMiao Xie * step: 299708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 300708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 301708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 302708c1bbcSMiao Xie */ 303708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, 304708c1bbcSMiao Xie enum mpol_rebind_step step) 3051d0d2680SDavid Rientjes { 3061d0d2680SDavid Rientjes nodemask_t tmp; 3071d0d2680SDavid Rientjes 30837012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 30937012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 31037012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 31137012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3121d0d2680SDavid Rientjes else { 313708c1bbcSMiao Xie /* 314708c1bbcSMiao Xie * if step == 1, we use ->w.cpuset_mems_allowed to cache the 315708c1bbcSMiao Xie * result 316708c1bbcSMiao Xie */ 317708c1bbcSMiao Xie if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { 318708c1bbcSMiao Xie nodes_remap(tmp, pol->v.nodes, 319708c1bbcSMiao Xie pol->w.cpuset_mems_allowed, *nodes); 320708c1bbcSMiao Xie pol->w.cpuset_mems_allowed = step ? tmp : *nodes; 321708c1bbcSMiao Xie } else if (step == MPOL_REBIND_STEP2) { 322708c1bbcSMiao Xie tmp = pol->w.cpuset_mems_allowed; 32337012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 324708c1bbcSMiao Xie } else 325708c1bbcSMiao Xie BUG(); 3261d0d2680SDavid Rientjes } 32737012946SDavid Rientjes 328708c1bbcSMiao Xie if (nodes_empty(tmp)) 329708c1bbcSMiao Xie tmp = *nodes; 330708c1bbcSMiao Xie 331708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 332708c1bbcSMiao Xie nodes_or(pol->v.nodes, pol->v.nodes, tmp); 333708c1bbcSMiao Xie else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) 3341d0d2680SDavid Rientjes pol->v.nodes = tmp; 335708c1bbcSMiao Xie else 336708c1bbcSMiao Xie BUG(); 337708c1bbcSMiao Xie 3381d0d2680SDavid Rientjes if (!node_isset(current->il_next, tmp)) { 3391d0d2680SDavid Rientjes current->il_next = next_node(current->il_next, tmp); 3401d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3411d0d2680SDavid Rientjes current->il_next = first_node(tmp); 3421d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3431d0d2680SDavid Rientjes current->il_next = numa_node_id(); 3441d0d2680SDavid Rientjes } 34537012946SDavid Rientjes } 34637012946SDavid Rientjes 34737012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 348708c1bbcSMiao Xie const nodemask_t *nodes, 349708c1bbcSMiao Xie enum mpol_rebind_step step) 35037012946SDavid Rientjes { 35137012946SDavid Rientjes nodemask_t tmp; 35237012946SDavid Rientjes 35337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3541d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3551d0d2680SDavid Rientjes 356fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3571d0d2680SDavid Rientjes pol->v.preferred_node = node; 358fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 359fc36b8d3SLee Schermerhorn } else 360fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 36137012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 36237012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3631d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 364fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3651d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 36637012946SDavid Rientjes pol->w.cpuset_mems_allowed, 36737012946SDavid Rientjes *nodes); 36837012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3691d0d2680SDavid Rientjes } 3701d0d2680SDavid Rientjes } 37137012946SDavid Rientjes 372708c1bbcSMiao Xie /* 373708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 374708c1bbcSMiao Xie * 375708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 376708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 377708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 378708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 379708c1bbcSMiao Xie * page. 380708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 381708c1bbcSMiao Xie * rebind directly. 382708c1bbcSMiao Xie * 383708c1bbcSMiao Xie * step: 384708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 385708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 386708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 387708c1bbcSMiao Xie */ 388708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, 389708c1bbcSMiao Xie enum mpol_rebind_step step) 39037012946SDavid Rientjes { 39137012946SDavid Rientjes if (!pol) 39237012946SDavid Rientjes return; 393708c1bbcSMiao Xie if (!mpol_store_user_nodemask(pol) && step == 0 && 39437012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 39537012946SDavid Rientjes return; 396708c1bbcSMiao Xie 397708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) 398708c1bbcSMiao Xie return; 399708c1bbcSMiao Xie 400708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) 401708c1bbcSMiao Xie BUG(); 402708c1bbcSMiao Xie 403708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 404708c1bbcSMiao Xie pol->flags |= MPOL_F_REBINDING; 405708c1bbcSMiao Xie else if (step == MPOL_REBIND_STEP2) 406708c1bbcSMiao Xie pol->flags &= ~MPOL_F_REBINDING; 407708c1bbcSMiao Xie else if (step >= MPOL_REBIND_NSTEP) 408708c1bbcSMiao Xie BUG(); 409708c1bbcSMiao Xie 410708c1bbcSMiao Xie mpol_ops[pol->mode].rebind(pol, newmask, step); 4111d0d2680SDavid Rientjes } 4121d0d2680SDavid Rientjes 4131d0d2680SDavid Rientjes /* 4141d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 4151d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 41658568d2aSMiao Xie * 41758568d2aSMiao Xie * Called with task's alloc_lock held. 4181d0d2680SDavid Rientjes */ 4191d0d2680SDavid Rientjes 420708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 421708c1bbcSMiao Xie enum mpol_rebind_step step) 4221d0d2680SDavid Rientjes { 423708c1bbcSMiao Xie mpol_rebind_policy(tsk->mempolicy, new, step); 4241d0d2680SDavid Rientjes } 4251d0d2680SDavid Rientjes 4261d0d2680SDavid Rientjes /* 4271d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 4281d0d2680SDavid Rientjes * 4291d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 4301d0d2680SDavid Rientjes */ 4311d0d2680SDavid Rientjes 4321d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 4331d0d2680SDavid Rientjes { 4341d0d2680SDavid Rientjes struct vm_area_struct *vma; 4351d0d2680SDavid Rientjes 4361d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 4371d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 438708c1bbcSMiao Xie mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); 4391d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 4401d0d2680SDavid Rientjes } 4411d0d2680SDavid Rientjes 44237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 44337012946SDavid Rientjes [MPOL_DEFAULT] = { 44437012946SDavid Rientjes .rebind = mpol_rebind_default, 44537012946SDavid Rientjes }, 44637012946SDavid Rientjes [MPOL_INTERLEAVE] = { 44737012946SDavid Rientjes .create = mpol_new_interleave, 44837012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 44937012946SDavid Rientjes }, 45037012946SDavid Rientjes [MPOL_PREFERRED] = { 45137012946SDavid Rientjes .create = mpol_new_preferred, 45237012946SDavid Rientjes .rebind = mpol_rebind_preferred, 45337012946SDavid Rientjes }, 45437012946SDavid Rientjes [MPOL_BIND] = { 45537012946SDavid Rientjes .create = mpol_new_bind, 45637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 45737012946SDavid Rientjes }, 45837012946SDavid Rientjes }; 45937012946SDavid Rientjes 460397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty); 461fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 462fc301289SChristoph Lameter unsigned long flags); 4631a75a6c8SChristoph Lameter 46438e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */ 465b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 466dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 467dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 46838e35860SChristoph Lameter void *private) 4691da177e4SLinus Torvalds { 47091612e0dSHugh Dickins pte_t *orig_pte; 47191612e0dSHugh Dickins pte_t *pte; 472705e87c0SHugh Dickins spinlock_t *ptl; 473941150a3SHugh Dickins 474705e87c0SHugh Dickins orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 47591612e0dSHugh Dickins do { 4766aab341eSLinus Torvalds struct page *page; 47725ba77c1SAndy Whitcroft int nid; 47891612e0dSHugh Dickins 47991612e0dSHugh Dickins if (!pte_present(*pte)) 48091612e0dSHugh Dickins continue; 4816aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 4826aab341eSLinus Torvalds if (!page) 48391612e0dSHugh Dickins continue; 484053837fcSNick Piggin /* 48562b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 48662b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 48762b61f61SHugh Dickins * And we cannot move PageKsm pages sensibly or safely yet. 488053837fcSNick Piggin */ 48962b61f61SHugh Dickins if (PageReserved(page) || PageKsm(page)) 490f4598c8bSChristoph Lameter continue; 4916aab341eSLinus Torvalds nid = page_to_nid(page); 49238e35860SChristoph Lameter if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 49338e35860SChristoph Lameter continue; 49438e35860SChristoph Lameter 4951a75a6c8SChristoph Lameter if (flags & MPOL_MF_STATS) 496397874dfSChristoph Lameter gather_stats(page, private, pte_dirty(*pte)); 497053837fcSNick Piggin else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 498fc301289SChristoph Lameter migrate_page_add(page, private, flags); 499dc9aa5b9SChristoph Lameter else 5001da177e4SLinus Torvalds break; 50191612e0dSHugh Dickins } while (pte++, addr += PAGE_SIZE, addr != end); 502705e87c0SHugh Dickins pte_unmap_unlock(orig_pte, ptl); 50391612e0dSHugh Dickins return addr != end; 50491612e0dSHugh Dickins } 50591612e0dSHugh Dickins 506b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, 507dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 508dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 50938e35860SChristoph Lameter void *private) 51091612e0dSHugh Dickins { 51191612e0dSHugh Dickins pmd_t *pmd; 51291612e0dSHugh Dickins unsigned long next; 51391612e0dSHugh Dickins 51491612e0dSHugh Dickins pmd = pmd_offset(pud, addr); 51591612e0dSHugh Dickins do { 51691612e0dSHugh Dickins next = pmd_addr_end(addr, end); 51791612e0dSHugh Dickins if (pmd_none_or_clear_bad(pmd)) 51891612e0dSHugh Dickins continue; 519dc9aa5b9SChristoph Lameter if (check_pte_range(vma, pmd, addr, next, nodes, 52038e35860SChristoph Lameter flags, private)) 52191612e0dSHugh Dickins return -EIO; 52291612e0dSHugh Dickins } while (pmd++, addr = next, addr != end); 52391612e0dSHugh Dickins return 0; 52491612e0dSHugh Dickins } 52591612e0dSHugh Dickins 526b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 527dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 528dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 52938e35860SChristoph Lameter void *private) 53091612e0dSHugh Dickins { 53191612e0dSHugh Dickins pud_t *pud; 53291612e0dSHugh Dickins unsigned long next; 53391612e0dSHugh Dickins 53491612e0dSHugh Dickins pud = pud_offset(pgd, addr); 53591612e0dSHugh Dickins do { 53691612e0dSHugh Dickins next = pud_addr_end(addr, end); 53791612e0dSHugh Dickins if (pud_none_or_clear_bad(pud)) 53891612e0dSHugh Dickins continue; 539dc9aa5b9SChristoph Lameter if (check_pmd_range(vma, pud, addr, next, nodes, 54038e35860SChristoph Lameter flags, private)) 54191612e0dSHugh Dickins return -EIO; 54291612e0dSHugh Dickins } while (pud++, addr = next, addr != end); 54391612e0dSHugh Dickins return 0; 54491612e0dSHugh Dickins } 54591612e0dSHugh Dickins 546b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma, 547dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 548dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 54938e35860SChristoph Lameter void *private) 55091612e0dSHugh Dickins { 55191612e0dSHugh Dickins pgd_t *pgd; 55291612e0dSHugh Dickins unsigned long next; 55391612e0dSHugh Dickins 554b5810039SNick Piggin pgd = pgd_offset(vma->vm_mm, addr); 55591612e0dSHugh Dickins do { 55691612e0dSHugh Dickins next = pgd_addr_end(addr, end); 55791612e0dSHugh Dickins if (pgd_none_or_clear_bad(pgd)) 55891612e0dSHugh Dickins continue; 559dc9aa5b9SChristoph Lameter if (check_pud_range(vma, pgd, addr, next, nodes, 56038e35860SChristoph Lameter flags, private)) 56191612e0dSHugh Dickins return -EIO; 56291612e0dSHugh Dickins } while (pgd++, addr = next, addr != end); 56391612e0dSHugh Dickins return 0; 5641da177e4SLinus Torvalds } 5651da177e4SLinus Torvalds 566dc9aa5b9SChristoph Lameter /* 567dc9aa5b9SChristoph Lameter * Check if all pages in a range are on a set of nodes. 568dc9aa5b9SChristoph Lameter * If pagelist != NULL then isolate pages from the LRU and 569dc9aa5b9SChristoph Lameter * put them on the pagelist. 570dc9aa5b9SChristoph Lameter */ 5711da177e4SLinus Torvalds static struct vm_area_struct * 5721da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end, 57338e35860SChristoph Lameter const nodemask_t *nodes, unsigned long flags, void *private) 5741da177e4SLinus Torvalds { 5751da177e4SLinus Torvalds int err; 5761da177e4SLinus Torvalds struct vm_area_struct *first, *vma, *prev; 5771da177e4SLinus Torvalds 578053837fcSNick Piggin 5791da177e4SLinus Torvalds first = find_vma(mm, start); 5801da177e4SLinus Torvalds if (!first) 5811da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 5821da177e4SLinus Torvalds prev = NULL; 5831da177e4SLinus Torvalds for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 584dc9aa5b9SChristoph Lameter if (!(flags & MPOL_MF_DISCONTIG_OK)) { 5851da177e4SLinus Torvalds if (!vma->vm_next && vma->vm_end < end) 5861da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 5871da177e4SLinus Torvalds if (prev && prev->vm_end < vma->vm_start) 5881da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 589dc9aa5b9SChristoph Lameter } 590dc9aa5b9SChristoph Lameter if (!is_vm_hugetlb_page(vma) && 591dc9aa5b9SChristoph Lameter ((flags & MPOL_MF_STRICT) || 592dc9aa5b9SChristoph Lameter ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && 593dc9aa5b9SChristoph Lameter vma_migratable(vma)))) { 5945b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 595dc9aa5b9SChristoph Lameter 5965b952b3cSAndi Kleen if (endvma > end) 5975b952b3cSAndi Kleen endvma = end; 5985b952b3cSAndi Kleen if (vma->vm_start > start) 5995b952b3cSAndi Kleen start = vma->vm_start; 600dc9aa5b9SChristoph Lameter err = check_pgd_range(vma, start, endvma, nodes, 60138e35860SChristoph Lameter flags, private); 6021da177e4SLinus Torvalds if (err) { 6031da177e4SLinus Torvalds first = ERR_PTR(err); 6041da177e4SLinus Torvalds break; 6051da177e4SLinus Torvalds } 6061da177e4SLinus Torvalds } 6071da177e4SLinus Torvalds prev = vma; 6081da177e4SLinus Torvalds } 6091da177e4SLinus Torvalds return first; 6101da177e4SLinus Torvalds } 6111da177e4SLinus Torvalds 6121da177e4SLinus Torvalds /* Apply policy to a single VMA */ 6131da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new) 6141da177e4SLinus Torvalds { 6151da177e4SLinus Torvalds int err = 0; 6161da177e4SLinus Torvalds struct mempolicy *old = vma->vm_policy; 6171da177e4SLinus Torvalds 618140d5a49SPaul Mundt pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 6191da177e4SLinus Torvalds vma->vm_start, vma->vm_end, vma->vm_pgoff, 6201da177e4SLinus Torvalds vma->vm_ops, vma->vm_file, 6211da177e4SLinus Torvalds vma->vm_ops ? vma->vm_ops->set_policy : NULL); 6221da177e4SLinus Torvalds 6231da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->set_policy) 6241da177e4SLinus Torvalds err = vma->vm_ops->set_policy(vma, new); 6251da177e4SLinus Torvalds if (!err) { 6261da177e4SLinus Torvalds mpol_get(new); 6271da177e4SLinus Torvalds vma->vm_policy = new; 628f0be3d32SLee Schermerhorn mpol_put(old); 6291da177e4SLinus Torvalds } 6301da177e4SLinus Torvalds return err; 6311da177e4SLinus Torvalds } 6321da177e4SLinus Torvalds 6331da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 6349d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 6359d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 6361da177e4SLinus Torvalds { 6371da177e4SLinus Torvalds struct vm_area_struct *next; 6389d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 6399d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 6409d8cebd4SKOSAKI Motohiro int err = 0; 6419d8cebd4SKOSAKI Motohiro pgoff_t pgoff; 6429d8cebd4SKOSAKI Motohiro unsigned long vmstart; 6439d8cebd4SKOSAKI Motohiro unsigned long vmend; 6441da177e4SLinus Torvalds 6459d8cebd4SKOSAKI Motohiro vma = find_vma_prev(mm, start, &prev); 6469d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 6479d8cebd4SKOSAKI Motohiro return -EFAULT; 6489d8cebd4SKOSAKI Motohiro 6499d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 6501da177e4SLinus Torvalds next = vma->vm_next; 6519d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 6529d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 6539d8cebd4SKOSAKI Motohiro 6549d8cebd4SKOSAKI Motohiro pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 6559d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 6569d8cebd4SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, new_pol); 6579d8cebd4SKOSAKI Motohiro if (prev) { 6589d8cebd4SKOSAKI Motohiro vma = prev; 6599d8cebd4SKOSAKI Motohiro next = vma->vm_next; 6609d8cebd4SKOSAKI Motohiro continue; 6611da177e4SLinus Torvalds } 6629d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 6639d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 6649d8cebd4SKOSAKI Motohiro if (err) 6659d8cebd4SKOSAKI Motohiro goto out; 6669d8cebd4SKOSAKI Motohiro } 6679d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 6689d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 6699d8cebd4SKOSAKI Motohiro if (err) 6709d8cebd4SKOSAKI Motohiro goto out; 6719d8cebd4SKOSAKI Motohiro } 6729d8cebd4SKOSAKI Motohiro err = policy_vma(vma, new_pol); 6739d8cebd4SKOSAKI Motohiro if (err) 6749d8cebd4SKOSAKI Motohiro goto out; 6759d8cebd4SKOSAKI Motohiro } 6769d8cebd4SKOSAKI Motohiro 6779d8cebd4SKOSAKI Motohiro out: 6781da177e4SLinus Torvalds return err; 6791da177e4SLinus Torvalds } 6801da177e4SLinus Torvalds 681c61afb18SPaul Jackson /* 682c61afb18SPaul Jackson * Update task->flags PF_MEMPOLICY bit: set iff non-default 683c61afb18SPaul Jackson * mempolicy. Allows more rapid checking of this (combined perhaps 684c61afb18SPaul Jackson * with other PF_* flag bits) on memory allocation hot code paths. 685c61afb18SPaul Jackson * 686c61afb18SPaul Jackson * If called from outside this file, the task 'p' should -only- be 687c61afb18SPaul Jackson * a newly forked child not yet visible on the task list, because 688c61afb18SPaul Jackson * manipulating the task flags of a visible task is not safe. 689c61afb18SPaul Jackson * 690c61afb18SPaul Jackson * The above limitation is why this routine has the funny name 691c61afb18SPaul Jackson * mpol_fix_fork_child_flag(). 692c61afb18SPaul Jackson * 693c61afb18SPaul Jackson * It is also safe to call this with a task pointer of current, 694c61afb18SPaul Jackson * which the static wrapper mpol_set_task_struct_flag() does, 695c61afb18SPaul Jackson * for use within this file. 696c61afb18SPaul Jackson */ 697c61afb18SPaul Jackson 698c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p) 699c61afb18SPaul Jackson { 700c61afb18SPaul Jackson if (p->mempolicy) 701c61afb18SPaul Jackson p->flags |= PF_MEMPOLICY; 702c61afb18SPaul Jackson else 703c61afb18SPaul Jackson p->flags &= ~PF_MEMPOLICY; 704c61afb18SPaul Jackson } 705c61afb18SPaul Jackson 706c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void) 707c61afb18SPaul Jackson { 708c61afb18SPaul Jackson mpol_fix_fork_child_flag(current); 709c61afb18SPaul Jackson } 710c61afb18SPaul Jackson 7111da177e4SLinus Torvalds /* Set the process memory policy */ 712028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 713028fec41SDavid Rientjes nodemask_t *nodes) 7141da177e4SLinus Torvalds { 71558568d2aSMiao Xie struct mempolicy *new, *old; 716f4e53d91SLee Schermerhorn struct mm_struct *mm = current->mm; 7174bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 71858568d2aSMiao Xie int ret; 7191da177e4SLinus Torvalds 7204bfc4495SKAMEZAWA Hiroyuki if (!scratch) 7214bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 722f4e53d91SLee Schermerhorn 7234bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 7244bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 7254bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 7264bfc4495SKAMEZAWA Hiroyuki goto out; 7274bfc4495SKAMEZAWA Hiroyuki } 728f4e53d91SLee Schermerhorn /* 729f4e53d91SLee Schermerhorn * prevent changing our mempolicy while show_numa_maps() 730f4e53d91SLee Schermerhorn * is using it. 731f4e53d91SLee Schermerhorn * Note: do_set_mempolicy() can be called at init time 732f4e53d91SLee Schermerhorn * with no 'mm'. 733f4e53d91SLee Schermerhorn */ 734f4e53d91SLee Schermerhorn if (mm) 735f4e53d91SLee Schermerhorn down_write(&mm->mmap_sem); 73658568d2aSMiao Xie task_lock(current); 7374bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 73858568d2aSMiao Xie if (ret) { 73958568d2aSMiao Xie task_unlock(current); 74058568d2aSMiao Xie if (mm) 74158568d2aSMiao Xie up_write(&mm->mmap_sem); 74258568d2aSMiao Xie mpol_put(new); 7434bfc4495SKAMEZAWA Hiroyuki goto out; 74458568d2aSMiao Xie } 74558568d2aSMiao Xie old = current->mempolicy; 7461da177e4SLinus Torvalds current->mempolicy = new; 747c61afb18SPaul Jackson mpol_set_task_struct_flag(); 74845c4745aSLee Schermerhorn if (new && new->mode == MPOL_INTERLEAVE && 749f5b087b5SDavid Rientjes nodes_weight(new->v.nodes)) 750dfcd3c0dSAndi Kleen current->il_next = first_node(new->v.nodes); 75158568d2aSMiao Xie task_unlock(current); 752f4e53d91SLee Schermerhorn if (mm) 753f4e53d91SLee Schermerhorn up_write(&mm->mmap_sem); 754f4e53d91SLee Schermerhorn 75558568d2aSMiao Xie mpol_put(old); 7564bfc4495SKAMEZAWA Hiroyuki ret = 0; 7574bfc4495SKAMEZAWA Hiroyuki out: 7584bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 7594bfc4495SKAMEZAWA Hiroyuki return ret; 7601da177e4SLinus Torvalds } 7611da177e4SLinus Torvalds 762bea904d5SLee Schermerhorn /* 763bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 76458568d2aSMiao Xie * 76558568d2aSMiao Xie * Called with task's alloc_lock held 766bea904d5SLee Schermerhorn */ 767bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 7681da177e4SLinus Torvalds { 769dfcd3c0dSAndi Kleen nodes_clear(*nodes); 770bea904d5SLee Schermerhorn if (p == &default_policy) 771bea904d5SLee Schermerhorn return; 772bea904d5SLee Schermerhorn 77345c4745aSLee Schermerhorn switch (p->mode) { 77419770b32SMel Gorman case MPOL_BIND: 77519770b32SMel Gorman /* Fall through */ 7761da177e4SLinus Torvalds case MPOL_INTERLEAVE: 777dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 7781da177e4SLinus Torvalds break; 7791da177e4SLinus Torvalds case MPOL_PREFERRED: 780fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 781dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 78253f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 7831da177e4SLinus Torvalds break; 7841da177e4SLinus Torvalds default: 7851da177e4SLinus Torvalds BUG(); 7861da177e4SLinus Torvalds } 7871da177e4SLinus Torvalds } 7881da177e4SLinus Torvalds 7891da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr) 7901da177e4SLinus Torvalds { 7911da177e4SLinus Torvalds struct page *p; 7921da177e4SLinus Torvalds int err; 7931da177e4SLinus Torvalds 7941da177e4SLinus Torvalds err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 7951da177e4SLinus Torvalds if (err >= 0) { 7961da177e4SLinus Torvalds err = page_to_nid(p); 7971da177e4SLinus Torvalds put_page(p); 7981da177e4SLinus Torvalds } 7991da177e4SLinus Torvalds return err; 8001da177e4SLinus Torvalds } 8011da177e4SLinus Torvalds 8021da177e4SLinus Torvalds /* Retrieve NUMA policy */ 803dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8041da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8051da177e4SLinus Torvalds { 8068bccd85fSChristoph Lameter int err; 8071da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8081da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8091da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 8101da177e4SLinus Torvalds 811754af6f5SLee Schermerhorn if (flags & 812754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8131da177e4SLinus Torvalds return -EINVAL; 814754af6f5SLee Schermerhorn 815754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 816754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 817754af6f5SLee Schermerhorn return -EINVAL; 818754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 81958568d2aSMiao Xie task_lock(current); 820754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 82158568d2aSMiao Xie task_unlock(current); 822754af6f5SLee Schermerhorn return 0; 823754af6f5SLee Schermerhorn } 824754af6f5SLee Schermerhorn 8251da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 826bea904d5SLee Schermerhorn /* 827bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 828bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 829bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 830bea904d5SLee Schermerhorn */ 8311da177e4SLinus Torvalds down_read(&mm->mmap_sem); 8321da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 8331da177e4SLinus Torvalds if (!vma) { 8341da177e4SLinus Torvalds up_read(&mm->mmap_sem); 8351da177e4SLinus Torvalds return -EFAULT; 8361da177e4SLinus Torvalds } 8371da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 8381da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 8391da177e4SLinus Torvalds else 8401da177e4SLinus Torvalds pol = vma->vm_policy; 8411da177e4SLinus Torvalds } else if (addr) 8421da177e4SLinus Torvalds return -EINVAL; 8431da177e4SLinus Torvalds 8441da177e4SLinus Torvalds if (!pol) 845bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 8461da177e4SLinus Torvalds 8471da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 8481da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 8491da177e4SLinus Torvalds err = lookup_node(mm, addr); 8501da177e4SLinus Torvalds if (err < 0) 8511da177e4SLinus Torvalds goto out; 8528bccd85fSChristoph Lameter *policy = err; 8531da177e4SLinus Torvalds } else if (pol == current->mempolicy && 85445c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 8558bccd85fSChristoph Lameter *policy = current->il_next; 8561da177e4SLinus Torvalds } else { 8571da177e4SLinus Torvalds err = -EINVAL; 8581da177e4SLinus Torvalds goto out; 8591da177e4SLinus Torvalds } 860bea904d5SLee Schermerhorn } else { 861bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 862bea904d5SLee Schermerhorn pol->mode; 863d79df630SDavid Rientjes /* 864d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 865d79df630SDavid Rientjes * the policy to userspace. 866d79df630SDavid Rientjes */ 867d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 868bea904d5SLee Schermerhorn } 8691da177e4SLinus Torvalds 8701da177e4SLinus Torvalds if (vma) { 8711da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 8721da177e4SLinus Torvalds vma = NULL; 8731da177e4SLinus Torvalds } 8741da177e4SLinus Torvalds 8751da177e4SLinus Torvalds err = 0; 87658568d2aSMiao Xie if (nmask) { 877c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 878c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 879c6b6ef8bSLee Schermerhorn } else { 88058568d2aSMiao Xie task_lock(current); 881bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 88258568d2aSMiao Xie task_unlock(current); 88358568d2aSMiao Xie } 884c6b6ef8bSLee Schermerhorn } 8851da177e4SLinus Torvalds 8861da177e4SLinus Torvalds out: 88752cd3b07SLee Schermerhorn mpol_cond_put(pol); 8881da177e4SLinus Torvalds if (vma) 8891da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 8901da177e4SLinus Torvalds return err; 8911da177e4SLinus Torvalds } 8921da177e4SLinus Torvalds 893b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 8948bccd85fSChristoph Lameter /* 8956ce3c4c0SChristoph Lameter * page migration 8966ce3c4c0SChristoph Lameter */ 897fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 898fc301289SChristoph Lameter unsigned long flags) 8996ce3c4c0SChristoph Lameter { 9006ce3c4c0SChristoph Lameter /* 901fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9026ce3c4c0SChristoph Lameter */ 90362695a84SNick Piggin if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 90462695a84SNick Piggin if (!isolate_lru_page(page)) { 90562695a84SNick Piggin list_add_tail(&page->lru, pagelist); 9066d9c285aSKOSAKI Motohiro inc_zone_page_state(page, NR_ISOLATED_ANON + 9076d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 90862695a84SNick Piggin } 90962695a84SNick Piggin } 9106ce3c4c0SChristoph Lameter } 9116ce3c4c0SChristoph Lameter 912742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 91395a402c3SChristoph Lameter { 9146484eb3eSMel Gorman return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); 91595a402c3SChristoph Lameter } 91695a402c3SChristoph Lameter 9176ce3c4c0SChristoph Lameter /* 9187e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 9197e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 9207e2ab150SChristoph Lameter */ 921dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 922dbcb0f19SAdrian Bunk int flags) 9237e2ab150SChristoph Lameter { 9247e2ab150SChristoph Lameter nodemask_t nmask; 9257e2ab150SChristoph Lameter LIST_HEAD(pagelist); 9267e2ab150SChristoph Lameter int err = 0; 9277e2ab150SChristoph Lameter 9287e2ab150SChristoph Lameter nodes_clear(nmask); 9297e2ab150SChristoph Lameter node_set(source, nmask); 9307e2ab150SChristoph Lameter 9316ec3a127SGreg Thelen check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 9327e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 9337e2ab150SChristoph Lameter 9347e2ab150SChristoph Lameter if (!list_empty(&pagelist)) 93562b61f61SHugh Dickins err = migrate_pages(&pagelist, new_node_page, dest, 0); 93695a402c3SChristoph Lameter 9377e2ab150SChristoph Lameter return err; 9387e2ab150SChristoph Lameter } 9397e2ab150SChristoph Lameter 9407e2ab150SChristoph Lameter /* 9417e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 9427e2ab150SChristoph Lameter * layout as much as possible. 94339743889SChristoph Lameter * 94439743889SChristoph Lameter * Returns the number of page that could not be moved. 94539743889SChristoph Lameter */ 94639743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm, 94739743889SChristoph Lameter const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 94839743889SChristoph Lameter { 9497e2ab150SChristoph Lameter int busy = 0; 9500aedadf9SChristoph Lameter int err; 9517e2ab150SChristoph Lameter nodemask_t tmp; 95239743889SChristoph Lameter 9530aedadf9SChristoph Lameter err = migrate_prep(); 9540aedadf9SChristoph Lameter if (err) 9550aedadf9SChristoph Lameter return err; 9560aedadf9SChristoph Lameter 95739743889SChristoph Lameter down_read(&mm->mmap_sem); 958d4984711SChristoph Lameter 9597b2259b3SChristoph Lameter err = migrate_vmas(mm, from_nodes, to_nodes, flags); 9607b2259b3SChristoph Lameter if (err) 9617b2259b3SChristoph Lameter goto out; 9627b2259b3SChristoph Lameter 9637e2ab150SChristoph Lameter /* 9647e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 9657e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 9667e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 9677e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 9687e2ab150SChristoph Lameter * 9697e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 9707e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 9717e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 9727e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 9737e2ab150SChristoph Lameter * 9747e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 9757e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 9767e2ab150SChristoph Lameter * (nothing left to migrate). 9777e2ab150SChristoph Lameter * 9787e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 9797e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 9807e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 9817e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 9827e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 9837e2ab150SChristoph Lameter * 9847e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 9857e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 9867e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 9877e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 9887e2ab150SChristoph Lameter * Otherwise when we finish scannng from_tmp, we at least have the 9897e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 9907e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 9917e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 9927e2ab150SChristoph Lameter */ 9937e2ab150SChristoph Lameter 9947e2ab150SChristoph Lameter tmp = *from_nodes; 9957e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 9967e2ab150SChristoph Lameter int s,d; 9977e2ab150SChristoph Lameter int source = -1; 9987e2ab150SChristoph Lameter int dest = 0; 9997e2ab150SChristoph Lameter 10007e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 10017e2ab150SChristoph Lameter d = node_remap(s, *from_nodes, *to_nodes); 10027e2ab150SChristoph Lameter if (s == d) 10037e2ab150SChristoph Lameter continue; 10047e2ab150SChristoph Lameter 10057e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 10067e2ab150SChristoph Lameter dest = d; 10077e2ab150SChristoph Lameter 10087e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 10097e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 10107e2ab150SChristoph Lameter break; 10117e2ab150SChristoph Lameter } 10127e2ab150SChristoph Lameter if (source == -1) 10137e2ab150SChristoph Lameter break; 10147e2ab150SChristoph Lameter 10157e2ab150SChristoph Lameter node_clear(source, tmp); 10167e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 10177e2ab150SChristoph Lameter if (err > 0) 10187e2ab150SChristoph Lameter busy += err; 10197e2ab150SChristoph Lameter if (err < 0) 10207e2ab150SChristoph Lameter break; 102139743889SChristoph Lameter } 10227b2259b3SChristoph Lameter out: 102339743889SChristoph Lameter up_read(&mm->mmap_sem); 10247e2ab150SChristoph Lameter if (err < 0) 10257e2ab150SChristoph Lameter return err; 10267e2ab150SChristoph Lameter return busy; 1027b20a3503SChristoph Lameter 102839743889SChristoph Lameter } 102939743889SChristoph Lameter 10303ad33b24SLee Schermerhorn /* 10313ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 10323ad33b24SLee Schermerhorn * Start assuming that page is mapped by vma pointed to by @private. 10333ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 10343ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 10353ad33b24SLee Schermerhorn * is in virtual address order. 10363ad33b24SLee Schermerhorn */ 1037742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 103895a402c3SChristoph Lameter { 103995a402c3SChristoph Lameter struct vm_area_struct *vma = (struct vm_area_struct *)private; 10403ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 104195a402c3SChristoph Lameter 10423ad33b24SLee Schermerhorn while (vma) { 10433ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 10443ad33b24SLee Schermerhorn if (address != -EFAULT) 10453ad33b24SLee Schermerhorn break; 10463ad33b24SLee Schermerhorn vma = vma->vm_next; 10473ad33b24SLee Schermerhorn } 10483ad33b24SLee Schermerhorn 10493ad33b24SLee Schermerhorn /* 10503ad33b24SLee Schermerhorn * if !vma, alloc_page_vma() will use task or system default policy 10513ad33b24SLee Schermerhorn */ 10523ad33b24SLee Schermerhorn return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 105395a402c3SChristoph Lameter } 1054b20a3503SChristoph Lameter #else 1055b20a3503SChristoph Lameter 1056b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1057b20a3503SChristoph Lameter unsigned long flags) 1058b20a3503SChristoph Lameter { 1059b20a3503SChristoph Lameter } 1060b20a3503SChristoph Lameter 1061b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm, 1062b20a3503SChristoph Lameter const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 1063b20a3503SChristoph Lameter { 1064b20a3503SChristoph Lameter return -ENOSYS; 1065b20a3503SChristoph Lameter } 106695a402c3SChristoph Lameter 106769939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 106895a402c3SChristoph Lameter { 106995a402c3SChristoph Lameter return NULL; 107095a402c3SChristoph Lameter } 1071b20a3503SChristoph Lameter #endif 1072b20a3503SChristoph Lameter 1073dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1074028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1075028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 10766ce3c4c0SChristoph Lameter { 10776ce3c4c0SChristoph Lameter struct vm_area_struct *vma; 10786ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 10796ce3c4c0SChristoph Lameter struct mempolicy *new; 10806ce3c4c0SChristoph Lameter unsigned long end; 10816ce3c4c0SChristoph Lameter int err; 10826ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 10836ce3c4c0SChristoph Lameter 1084a3b51e01SDavid Rientjes if (flags & ~(unsigned long)(MPOL_MF_STRICT | 10856ce3c4c0SChristoph Lameter MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 10866ce3c4c0SChristoph Lameter return -EINVAL; 108774c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 10886ce3c4c0SChristoph Lameter return -EPERM; 10896ce3c4c0SChristoph Lameter 10906ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 10916ce3c4c0SChristoph Lameter return -EINVAL; 10926ce3c4c0SChristoph Lameter 10936ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 10946ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 10956ce3c4c0SChristoph Lameter 10966ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 10976ce3c4c0SChristoph Lameter end = start + len; 10986ce3c4c0SChristoph Lameter 10996ce3c4c0SChristoph Lameter if (end < start) 11006ce3c4c0SChristoph Lameter return -EINVAL; 11016ce3c4c0SChristoph Lameter if (end == start) 11026ce3c4c0SChristoph Lameter return 0; 11036ce3c4c0SChristoph Lameter 1104028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 11056ce3c4c0SChristoph Lameter if (IS_ERR(new)) 11066ce3c4c0SChristoph Lameter return PTR_ERR(new); 11076ce3c4c0SChristoph Lameter 11086ce3c4c0SChristoph Lameter /* 11096ce3c4c0SChristoph Lameter * If we are using the default policy then operation 11106ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 11116ce3c4c0SChristoph Lameter */ 11126ce3c4c0SChristoph Lameter if (!new) 11136ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 11146ce3c4c0SChristoph Lameter 1115028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1116028fec41SDavid Rientjes start, start + len, mode, mode_flags, 1117028fec41SDavid Rientjes nmask ? nodes_addr(*nmask)[0] : -1); 11186ce3c4c0SChristoph Lameter 11190aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 11200aedadf9SChristoph Lameter 11210aedadf9SChristoph Lameter err = migrate_prep(); 11220aedadf9SChristoph Lameter if (err) 1123b05ca738SKOSAKI Motohiro goto mpol_out; 11240aedadf9SChristoph Lameter } 11254bfc4495SKAMEZAWA Hiroyuki { 11264bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 11274bfc4495SKAMEZAWA Hiroyuki if (scratch) { 11286ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 112958568d2aSMiao Xie task_lock(current); 11304bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 113158568d2aSMiao Xie task_unlock(current); 11324bfc4495SKAMEZAWA Hiroyuki if (err) 113358568d2aSMiao Xie up_write(&mm->mmap_sem); 11344bfc4495SKAMEZAWA Hiroyuki } else 11354bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 11364bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 11374bfc4495SKAMEZAWA Hiroyuki } 1138b05ca738SKOSAKI Motohiro if (err) 1139b05ca738SKOSAKI Motohiro goto mpol_out; 1140b05ca738SKOSAKI Motohiro 11416ce3c4c0SChristoph Lameter vma = check_range(mm, start, end, nmask, 11426ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 11436ce3c4c0SChristoph Lameter 11446ce3c4c0SChristoph Lameter err = PTR_ERR(vma); 11456ce3c4c0SChristoph Lameter if (!IS_ERR(vma)) { 11466ce3c4c0SChristoph Lameter int nr_failed = 0; 11476ce3c4c0SChristoph Lameter 11489d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 11497e2ab150SChristoph Lameter 11506ce3c4c0SChristoph Lameter if (!list_empty(&pagelist)) 115195a402c3SChristoph Lameter nr_failed = migrate_pages(&pagelist, new_vma_page, 115262b61f61SHugh Dickins (unsigned long)vma, 0); 11536ce3c4c0SChristoph Lameter 11546ce3c4c0SChristoph Lameter if (!err && nr_failed && (flags & MPOL_MF_STRICT)) 11556ce3c4c0SChristoph Lameter err = -EIO; 1156ab8a3e14SKOSAKI Motohiro } else 1157ab8a3e14SKOSAKI Motohiro putback_lru_pages(&pagelist); 1158b20a3503SChristoph Lameter 11596ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1160b05ca738SKOSAKI Motohiro mpol_out: 1161f0be3d32SLee Schermerhorn mpol_put(new); 11626ce3c4c0SChristoph Lameter return err; 11636ce3c4c0SChristoph Lameter } 11646ce3c4c0SChristoph Lameter 116539743889SChristoph Lameter /* 11668bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 11678bccd85fSChristoph Lameter */ 11688bccd85fSChristoph Lameter 11698bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 117039743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 11718bccd85fSChristoph Lameter unsigned long maxnode) 11728bccd85fSChristoph Lameter { 11738bccd85fSChristoph Lameter unsigned long k; 11748bccd85fSChristoph Lameter unsigned long nlongs; 11758bccd85fSChristoph Lameter unsigned long endmask; 11768bccd85fSChristoph Lameter 11778bccd85fSChristoph Lameter --maxnode; 11788bccd85fSChristoph Lameter nodes_clear(*nodes); 11798bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 11808bccd85fSChristoph Lameter return 0; 1181a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1182636f13c1SChris Wright return -EINVAL; 11838bccd85fSChristoph Lameter 11848bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 11858bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 11868bccd85fSChristoph Lameter endmask = ~0UL; 11878bccd85fSChristoph Lameter else 11888bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 11898bccd85fSChristoph Lameter 11908bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 11918bccd85fSChristoph Lameter if the non supported part is all zero. */ 11928bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 11938bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 11948bccd85fSChristoph Lameter return -EINVAL; 11958bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 11968bccd85fSChristoph Lameter unsigned long t; 11978bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 11988bccd85fSChristoph Lameter return -EFAULT; 11998bccd85fSChristoph Lameter if (k == nlongs - 1) { 12008bccd85fSChristoph Lameter if (t & endmask) 12018bccd85fSChristoph Lameter return -EINVAL; 12028bccd85fSChristoph Lameter } else if (t) 12038bccd85fSChristoph Lameter return -EINVAL; 12048bccd85fSChristoph Lameter } 12058bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 12068bccd85fSChristoph Lameter endmask = ~0UL; 12078bccd85fSChristoph Lameter } 12088bccd85fSChristoph Lameter 12098bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 12108bccd85fSChristoph Lameter return -EFAULT; 12118bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 12128bccd85fSChristoph Lameter return 0; 12138bccd85fSChristoph Lameter } 12148bccd85fSChristoph Lameter 12158bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 12168bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 12178bccd85fSChristoph Lameter nodemask_t *nodes) 12188bccd85fSChristoph Lameter { 12198bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 12208bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 12218bccd85fSChristoph Lameter 12228bccd85fSChristoph Lameter if (copy > nbytes) { 12238bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 12248bccd85fSChristoph Lameter return -EINVAL; 12258bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 12268bccd85fSChristoph Lameter return -EFAULT; 12278bccd85fSChristoph Lameter copy = nbytes; 12288bccd85fSChristoph Lameter } 12298bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 12308bccd85fSChristoph Lameter } 12318bccd85fSChristoph Lameter 1232938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1233938bb9f5SHeiko Carstens unsigned long, mode, unsigned long __user *, nmask, 1234938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 12358bccd85fSChristoph Lameter { 12368bccd85fSChristoph Lameter nodemask_t nodes; 12378bccd85fSChristoph Lameter int err; 1238028fec41SDavid Rientjes unsigned short mode_flags; 12398bccd85fSChristoph Lameter 1240028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1241028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1242a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1243a3b51e01SDavid Rientjes return -EINVAL; 12444c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 12454c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 12464c50bc01SDavid Rientjes return -EINVAL; 12478bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 12488bccd85fSChristoph Lameter if (err) 12498bccd85fSChristoph Lameter return err; 1250028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 12518bccd85fSChristoph Lameter } 12528bccd85fSChristoph Lameter 12538bccd85fSChristoph Lameter /* Set the process memory policy */ 1254938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, 1255938bb9f5SHeiko Carstens unsigned long, maxnode) 12568bccd85fSChristoph Lameter { 12578bccd85fSChristoph Lameter int err; 12588bccd85fSChristoph Lameter nodemask_t nodes; 1259028fec41SDavid Rientjes unsigned short flags; 12608bccd85fSChristoph Lameter 1261028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1262028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1263028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 12648bccd85fSChristoph Lameter return -EINVAL; 12654c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 12664c50bc01SDavid Rientjes return -EINVAL; 12678bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 12688bccd85fSChristoph Lameter if (err) 12698bccd85fSChristoph Lameter return err; 1270028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 12718bccd85fSChristoph Lameter } 12728bccd85fSChristoph Lameter 1273938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1274938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1275938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 127639743889SChristoph Lameter { 1277c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 1278*596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 127939743889SChristoph Lameter struct task_struct *task; 128039743889SChristoph Lameter nodemask_t task_nodes; 128139743889SChristoph Lameter int err; 1282*596d7cfaSKOSAKI Motohiro nodemask_t *old; 1283*596d7cfaSKOSAKI Motohiro nodemask_t *new; 1284*596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 128539743889SChristoph Lameter 1286*596d7cfaSKOSAKI Motohiro if (!scratch) 1287*596d7cfaSKOSAKI Motohiro return -ENOMEM; 128839743889SChristoph Lameter 1289*596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1290*596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1291*596d7cfaSKOSAKI Motohiro 1292*596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 129339743889SChristoph Lameter if (err) 1294*596d7cfaSKOSAKI Motohiro goto out; 1295*596d7cfaSKOSAKI Motohiro 1296*596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1297*596d7cfaSKOSAKI Motohiro if (err) 1298*596d7cfaSKOSAKI Motohiro goto out; 129939743889SChristoph Lameter 130039743889SChristoph Lameter /* Find the mm_struct */ 130139743889SChristoph Lameter read_lock(&tasklist_lock); 1302228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 130339743889SChristoph Lameter if (!task) { 130439743889SChristoph Lameter read_unlock(&tasklist_lock); 1305*596d7cfaSKOSAKI Motohiro err = -ESRCH; 1306*596d7cfaSKOSAKI Motohiro goto out; 130739743889SChristoph Lameter } 130839743889SChristoph Lameter mm = get_task_mm(task); 130939743889SChristoph Lameter read_unlock(&tasklist_lock); 131039743889SChristoph Lameter 1311*596d7cfaSKOSAKI Motohiro err = -EINVAL; 131239743889SChristoph Lameter if (!mm) 1313*596d7cfaSKOSAKI Motohiro goto out; 131439743889SChristoph Lameter 131539743889SChristoph Lameter /* 131639743889SChristoph Lameter * Check if this process has the right to modify the specified 131739743889SChristoph Lameter * process. The right exists if the process has administrative 13187f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 131939743889SChristoph Lameter * userid as the target process. 132039743889SChristoph Lameter */ 1321c69e8d9cSDavid Howells rcu_read_lock(); 1322c69e8d9cSDavid Howells tcred = __task_cred(task); 1323b6dff3ecSDavid Howells if (cred->euid != tcred->suid && cred->euid != tcred->uid && 1324b6dff3ecSDavid Howells cred->uid != tcred->suid && cred->uid != tcred->uid && 132574c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1326c69e8d9cSDavid Howells rcu_read_unlock(); 132739743889SChristoph Lameter err = -EPERM; 132839743889SChristoph Lameter goto out; 132939743889SChristoph Lameter } 1330c69e8d9cSDavid Howells rcu_read_unlock(); 133139743889SChristoph Lameter 133239743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 133339743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1334*596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 133539743889SChristoph Lameter err = -EPERM; 133639743889SChristoph Lameter goto out; 133739743889SChristoph Lameter } 133839743889SChristoph Lameter 1339*596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) { 13403b42d28bSChristoph Lameter err = -EINVAL; 13413b42d28bSChristoph Lameter goto out; 13423b42d28bSChristoph Lameter } 13433b42d28bSChristoph Lameter 134486c3a764SDavid Quigley err = security_task_movememory(task); 134586c3a764SDavid Quigley if (err) 134686c3a764SDavid Quigley goto out; 134786c3a764SDavid Quigley 1348*596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 134974c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 135039743889SChristoph Lameter out: 1351*596d7cfaSKOSAKI Motohiro if (mm) 135239743889SChristoph Lameter mmput(mm); 1353*596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1354*596d7cfaSKOSAKI Motohiro 135539743889SChristoph Lameter return err; 135639743889SChristoph Lameter } 135739743889SChristoph Lameter 135839743889SChristoph Lameter 13598bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1360938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1361938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1362938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 13638bccd85fSChristoph Lameter { 1364dbcb0f19SAdrian Bunk int err; 1365dbcb0f19SAdrian Bunk int uninitialized_var(pval); 13668bccd85fSChristoph Lameter nodemask_t nodes; 13678bccd85fSChristoph Lameter 13688bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 13698bccd85fSChristoph Lameter return -EINVAL; 13708bccd85fSChristoph Lameter 13718bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 13728bccd85fSChristoph Lameter 13738bccd85fSChristoph Lameter if (err) 13748bccd85fSChristoph Lameter return err; 13758bccd85fSChristoph Lameter 13768bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 13778bccd85fSChristoph Lameter return -EFAULT; 13788bccd85fSChristoph Lameter 13798bccd85fSChristoph Lameter if (nmask) 13808bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 13818bccd85fSChristoph Lameter 13828bccd85fSChristoph Lameter return err; 13838bccd85fSChristoph Lameter } 13848bccd85fSChristoph Lameter 13851da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 13861da177e4SLinus Torvalds 13871da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy, 13881da177e4SLinus Torvalds compat_ulong_t __user *nmask, 13891da177e4SLinus Torvalds compat_ulong_t maxnode, 13901da177e4SLinus Torvalds compat_ulong_t addr, compat_ulong_t flags) 13911da177e4SLinus Torvalds { 13921da177e4SLinus Torvalds long err; 13931da177e4SLinus Torvalds unsigned long __user *nm = NULL; 13941da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 13951da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 13961da177e4SLinus Torvalds 13971da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 13981da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 13991da177e4SLinus Torvalds 14001da177e4SLinus Torvalds if (nmask) 14011da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 14021da177e4SLinus Torvalds 14031da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 14041da177e4SLinus Torvalds 14051da177e4SLinus Torvalds if (!err && nmask) { 14061da177e4SLinus Torvalds err = copy_from_user(bm, nm, alloc_size); 14071da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 14081da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 14091da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 14101da177e4SLinus Torvalds } 14111da177e4SLinus Torvalds 14121da177e4SLinus Torvalds return err; 14131da177e4SLinus Torvalds } 14141da177e4SLinus Torvalds 14151da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, 14161da177e4SLinus Torvalds compat_ulong_t maxnode) 14171da177e4SLinus Torvalds { 14181da177e4SLinus Torvalds long err = 0; 14191da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14201da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 14211da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 14221da177e4SLinus Torvalds 14231da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 14241da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 14251da177e4SLinus Torvalds 14261da177e4SLinus Torvalds if (nmask) { 14271da177e4SLinus Torvalds err = compat_get_bitmap(bm, nmask, nr_bits); 14281da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 14291da177e4SLinus Torvalds err |= copy_to_user(nm, bm, alloc_size); 14301da177e4SLinus Torvalds } 14311da177e4SLinus Torvalds 14321da177e4SLinus Torvalds if (err) 14331da177e4SLinus Torvalds return -EFAULT; 14341da177e4SLinus Torvalds 14351da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 14361da177e4SLinus Torvalds } 14371da177e4SLinus Torvalds 14381da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, 14391da177e4SLinus Torvalds compat_ulong_t mode, compat_ulong_t __user *nmask, 14401da177e4SLinus Torvalds compat_ulong_t maxnode, compat_ulong_t flags) 14411da177e4SLinus Torvalds { 14421da177e4SLinus Torvalds long err = 0; 14431da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14441da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1445dfcd3c0dSAndi Kleen nodemask_t bm; 14461da177e4SLinus Torvalds 14471da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 14481da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 14491da177e4SLinus Torvalds 14501da177e4SLinus Torvalds if (nmask) { 1451dfcd3c0dSAndi Kleen err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 14521da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 1453dfcd3c0dSAndi Kleen err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 14541da177e4SLinus Torvalds } 14551da177e4SLinus Torvalds 14561da177e4SLinus Torvalds if (err) 14571da177e4SLinus Torvalds return -EFAULT; 14581da177e4SLinus Torvalds 14591da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 14601da177e4SLinus Torvalds } 14611da177e4SLinus Torvalds 14621da177e4SLinus Torvalds #endif 14631da177e4SLinus Torvalds 1464480eccf9SLee Schermerhorn /* 1465480eccf9SLee Schermerhorn * get_vma_policy(@task, @vma, @addr) 1466480eccf9SLee Schermerhorn * @task - task for fallback if vma policy == default 1467480eccf9SLee Schermerhorn * @vma - virtual memory area whose policy is sought 1468480eccf9SLee Schermerhorn * @addr - address in @vma for shared policy lookup 1469480eccf9SLee Schermerhorn * 1470480eccf9SLee Schermerhorn * Returns effective policy for a VMA at specified address. 1471480eccf9SLee Schermerhorn * Falls back to @task or system default policy, as necessary. 147252cd3b07SLee Schermerhorn * Current or other task's task mempolicy and non-shared vma policies 147352cd3b07SLee Schermerhorn * are protected by the task's mmap_sem, which must be held for read by 147452cd3b07SLee Schermerhorn * the caller. 147552cd3b07SLee Schermerhorn * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 147652cd3b07SLee Schermerhorn * count--added by the get_policy() vm_op, as appropriate--to protect against 147752cd3b07SLee Schermerhorn * freeing by another task. It is the caller's responsibility to free the 147852cd3b07SLee Schermerhorn * extra reference for shared policies. 1479480eccf9SLee Schermerhorn */ 148048fce342SChristoph Lameter static struct mempolicy *get_vma_policy(struct task_struct *task, 148148fce342SChristoph Lameter struct vm_area_struct *vma, unsigned long addr) 14821da177e4SLinus Torvalds { 14836e21c8f1SChristoph Lameter struct mempolicy *pol = task->mempolicy; 14841da177e4SLinus Torvalds 14851da177e4SLinus Torvalds if (vma) { 1486480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 1487ae4d8c16SLee Schermerhorn struct mempolicy *vpol = vma->vm_ops->get_policy(vma, 1488ae4d8c16SLee Schermerhorn addr); 1489ae4d8c16SLee Schermerhorn if (vpol) 1490ae4d8c16SLee Schermerhorn pol = vpol; 1491bea904d5SLee Schermerhorn } else if (vma->vm_policy) 14921da177e4SLinus Torvalds pol = vma->vm_policy; 14931da177e4SLinus Torvalds } 14941da177e4SLinus Torvalds if (!pol) 14951da177e4SLinus Torvalds pol = &default_policy; 14961da177e4SLinus Torvalds return pol; 14971da177e4SLinus Torvalds } 14981da177e4SLinus Torvalds 149952cd3b07SLee Schermerhorn /* 150052cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 150152cd3b07SLee Schermerhorn * page allocation 150252cd3b07SLee Schermerhorn */ 150352cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 150419770b32SMel Gorman { 150519770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 150645c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 150719770b32SMel Gorman gfp_zone(gfp) >= policy_zone && 150819770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 150919770b32SMel Gorman return &policy->v.nodes; 151019770b32SMel Gorman 151119770b32SMel Gorman return NULL; 151219770b32SMel Gorman } 151319770b32SMel Gorman 151452cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */ 151552cd3b07SLee Schermerhorn static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) 15161da177e4SLinus Torvalds { 1517fc36b8d3SLee Schermerhorn int nd = numa_node_id(); 15181da177e4SLinus Torvalds 151945c4745aSLee Schermerhorn switch (policy->mode) { 15201da177e4SLinus Torvalds case MPOL_PREFERRED: 1521fc36b8d3SLee Schermerhorn if (!(policy->flags & MPOL_F_LOCAL)) 15221da177e4SLinus Torvalds nd = policy->v.preferred_node; 15231da177e4SLinus Torvalds break; 15241da177e4SLinus Torvalds case MPOL_BIND: 152519770b32SMel Gorman /* 152652cd3b07SLee Schermerhorn * Normally, MPOL_BIND allocations are node-local within the 152752cd3b07SLee Schermerhorn * allowed nodemask. However, if __GFP_THISNODE is set and the 15286eb27e1fSBob Liu * current node isn't part of the mask, we use the zonelist for 152952cd3b07SLee Schermerhorn * the first node in the mask instead. 153019770b32SMel Gorman */ 153119770b32SMel Gorman if (unlikely(gfp & __GFP_THISNODE) && 153219770b32SMel Gorman unlikely(!node_isset(nd, policy->v.nodes))) 153319770b32SMel Gorman nd = first_node(policy->v.nodes); 153419770b32SMel Gorman break; 15351da177e4SLinus Torvalds default: 15361da177e4SLinus Torvalds BUG(); 15371da177e4SLinus Torvalds } 15380e88460dSMel Gorman return node_zonelist(nd, gfp); 15391da177e4SLinus Torvalds } 15401da177e4SLinus Torvalds 15411da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 15421da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 15431da177e4SLinus Torvalds { 15441da177e4SLinus Torvalds unsigned nid, next; 15451da177e4SLinus Torvalds struct task_struct *me = current; 15461da177e4SLinus Torvalds 15471da177e4SLinus Torvalds nid = me->il_next; 1548dfcd3c0dSAndi Kleen next = next_node(nid, policy->v.nodes); 15491da177e4SLinus Torvalds if (next >= MAX_NUMNODES) 1550dfcd3c0dSAndi Kleen next = first_node(policy->v.nodes); 1551f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 15521da177e4SLinus Torvalds me->il_next = next; 15531da177e4SLinus Torvalds return nid; 15541da177e4SLinus Torvalds } 15551da177e4SLinus Torvalds 1556dc85da15SChristoph Lameter /* 1557dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1558dc85da15SChristoph Lameter * next slab entry. 155952cd3b07SLee Schermerhorn * @policy must be protected by freeing by the caller. If @policy is 156052cd3b07SLee Schermerhorn * the current task's mempolicy, this protection is implicit, as only the 156152cd3b07SLee Schermerhorn * task can change it's policy. The system default policy requires no 156252cd3b07SLee Schermerhorn * such protection. 1563dc85da15SChristoph Lameter */ 1564dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy) 1565dc85da15SChristoph Lameter { 1566fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 1567bea904d5SLee Schermerhorn return numa_node_id(); 1568765c4507SChristoph Lameter 1569bea904d5SLee Schermerhorn switch (policy->mode) { 1570bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1571fc36b8d3SLee Schermerhorn /* 1572fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1573fc36b8d3SLee Schermerhorn */ 1574bea904d5SLee Schermerhorn return policy->v.preferred_node; 1575bea904d5SLee Schermerhorn 1576dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1577dc85da15SChristoph Lameter return interleave_nodes(policy); 1578dc85da15SChristoph Lameter 1579dd1a239fSMel Gorman case MPOL_BIND: { 1580dc85da15SChristoph Lameter /* 1581dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1582dc85da15SChristoph Lameter * first node. 1583dc85da15SChristoph Lameter */ 158419770b32SMel Gorman struct zonelist *zonelist; 158519770b32SMel Gorman struct zone *zone; 158619770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 158719770b32SMel Gorman zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0]; 158819770b32SMel Gorman (void)first_zones_zonelist(zonelist, highest_zoneidx, 158919770b32SMel Gorman &policy->v.nodes, 159019770b32SMel Gorman &zone); 159119770b32SMel Gorman return zone->node; 1592dd1a239fSMel Gorman } 1593dc85da15SChristoph Lameter 1594dc85da15SChristoph Lameter default: 1595bea904d5SLee Schermerhorn BUG(); 1596dc85da15SChristoph Lameter } 1597dc85da15SChristoph Lameter } 1598dc85da15SChristoph Lameter 15991da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */ 16001da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 16011da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long off) 16021da177e4SLinus Torvalds { 1603dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1604f5b087b5SDavid Rientjes unsigned target; 16051da177e4SLinus Torvalds int c; 16061da177e4SLinus Torvalds int nid = -1; 16071da177e4SLinus Torvalds 1608f5b087b5SDavid Rientjes if (!nnodes) 1609f5b087b5SDavid Rientjes return numa_node_id(); 1610f5b087b5SDavid Rientjes target = (unsigned int)off % nnodes; 16111da177e4SLinus Torvalds c = 0; 16121da177e4SLinus Torvalds do { 1613dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 16141da177e4SLinus Torvalds c++; 16151da177e4SLinus Torvalds } while (c <= target); 16161da177e4SLinus Torvalds return nid; 16171da177e4SLinus Torvalds } 16181da177e4SLinus Torvalds 16195da7ca86SChristoph Lameter /* Determine a node number for interleave */ 16205da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 16215da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 16225da7ca86SChristoph Lameter { 16235da7ca86SChristoph Lameter if (vma) { 16245da7ca86SChristoph Lameter unsigned long off; 16255da7ca86SChristoph Lameter 16263b98b087SNishanth Aravamudan /* 16273b98b087SNishanth Aravamudan * for small pages, there is no difference between 16283b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 16293b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 16303b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 16313b98b087SNishanth Aravamudan * a useful offset. 16323b98b087SNishanth Aravamudan */ 16333b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 16343b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 16355da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 16365da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 16375da7ca86SChristoph Lameter } else 16385da7ca86SChristoph Lameter return interleave_nodes(pol); 16395da7ca86SChristoph Lameter } 16405da7ca86SChristoph Lameter 164100ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1642480eccf9SLee Schermerhorn /* 1643480eccf9SLee Schermerhorn * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1644480eccf9SLee Schermerhorn * @vma = virtual memory area whose policy is sought 1645480eccf9SLee Schermerhorn * @addr = address in @vma for shared policy lookup and interleave policy 1646480eccf9SLee Schermerhorn * @gfp_flags = for requested zone 164719770b32SMel Gorman * @mpol = pointer to mempolicy pointer for reference counted mempolicy 164819770b32SMel Gorman * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask 1649480eccf9SLee Schermerhorn * 165052cd3b07SLee Schermerhorn * Returns a zonelist suitable for a huge page allocation and a pointer 165152cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 165252cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 165352cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1654c0ff7453SMiao Xie * 1655c0ff7453SMiao Xie * Must be protected by get_mems_allowed() 1656480eccf9SLee Schermerhorn */ 1657396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 165819770b32SMel Gorman gfp_t gfp_flags, struct mempolicy **mpol, 165919770b32SMel Gorman nodemask_t **nodemask) 16605da7ca86SChristoph Lameter { 1661480eccf9SLee Schermerhorn struct zonelist *zl; 16625da7ca86SChristoph Lameter 166352cd3b07SLee Schermerhorn *mpol = get_vma_policy(current, vma, addr); 166419770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 16655da7ca86SChristoph Lameter 166652cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 166752cd3b07SLee Schermerhorn zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1668a5516438SAndi Kleen huge_page_shift(hstate_vma(vma))), gfp_flags); 166952cd3b07SLee Schermerhorn } else { 167052cd3b07SLee Schermerhorn zl = policy_zonelist(gfp_flags, *mpol); 167152cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 167252cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1673480eccf9SLee Schermerhorn } 1674480eccf9SLee Schermerhorn return zl; 16755da7ca86SChristoph Lameter } 167606808b08SLee Schermerhorn 167706808b08SLee Schermerhorn /* 167806808b08SLee Schermerhorn * init_nodemask_of_mempolicy 167906808b08SLee Schermerhorn * 168006808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 168106808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 168206808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 168306808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 168406808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 168506808b08SLee Schermerhorn * of non-default mempolicy. 168606808b08SLee Schermerhorn * 168706808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 168806808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 168906808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 169006808b08SLee Schermerhorn * 169106808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 169206808b08SLee Schermerhorn */ 169306808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 169406808b08SLee Schermerhorn { 169506808b08SLee Schermerhorn struct mempolicy *mempolicy; 169606808b08SLee Schermerhorn int nid; 169706808b08SLee Schermerhorn 169806808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 169906808b08SLee Schermerhorn return false; 170006808b08SLee Schermerhorn 1701c0ff7453SMiao Xie task_lock(current); 170206808b08SLee Schermerhorn mempolicy = current->mempolicy; 170306808b08SLee Schermerhorn switch (mempolicy->mode) { 170406808b08SLee Schermerhorn case MPOL_PREFERRED: 170506808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 170606808b08SLee Schermerhorn nid = numa_node_id(); 170706808b08SLee Schermerhorn else 170806808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 170906808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 171006808b08SLee Schermerhorn break; 171106808b08SLee Schermerhorn 171206808b08SLee Schermerhorn case MPOL_BIND: 171306808b08SLee Schermerhorn /* Fall through */ 171406808b08SLee Schermerhorn case MPOL_INTERLEAVE: 171506808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 171606808b08SLee Schermerhorn break; 171706808b08SLee Schermerhorn 171806808b08SLee Schermerhorn default: 171906808b08SLee Schermerhorn BUG(); 172006808b08SLee Schermerhorn } 1721c0ff7453SMiao Xie task_unlock(current); 172206808b08SLee Schermerhorn 172306808b08SLee Schermerhorn return true; 172406808b08SLee Schermerhorn } 172500ac59adSChen, Kenneth W #endif 17265da7ca86SChristoph Lameter 17276f48d0ebSDavid Rientjes /* 17286f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 17296f48d0ebSDavid Rientjes * 17306f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 17316f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 17326f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 17336f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 17346f48d0ebSDavid Rientjes * 17356f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 17366f48d0ebSDavid Rientjes */ 17376f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 17386f48d0ebSDavid Rientjes const nodemask_t *mask) 17396f48d0ebSDavid Rientjes { 17406f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 17416f48d0ebSDavid Rientjes bool ret = true; 17426f48d0ebSDavid Rientjes 17436f48d0ebSDavid Rientjes if (!mask) 17446f48d0ebSDavid Rientjes return ret; 17456f48d0ebSDavid Rientjes task_lock(tsk); 17466f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 17476f48d0ebSDavid Rientjes if (!mempolicy) 17486f48d0ebSDavid Rientjes goto out; 17496f48d0ebSDavid Rientjes 17506f48d0ebSDavid Rientjes switch (mempolicy->mode) { 17516f48d0ebSDavid Rientjes case MPOL_PREFERRED: 17526f48d0ebSDavid Rientjes /* 17536f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 17546f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 17556f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 17566f48d0ebSDavid Rientjes * nodes in mask. 17576f48d0ebSDavid Rientjes */ 17586f48d0ebSDavid Rientjes break; 17596f48d0ebSDavid Rientjes case MPOL_BIND: 17606f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 17616f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 17626f48d0ebSDavid Rientjes break; 17636f48d0ebSDavid Rientjes default: 17646f48d0ebSDavid Rientjes BUG(); 17656f48d0ebSDavid Rientjes } 17666f48d0ebSDavid Rientjes out: 17676f48d0ebSDavid Rientjes task_unlock(tsk); 17686f48d0ebSDavid Rientjes return ret; 17696f48d0ebSDavid Rientjes } 17706f48d0ebSDavid Rientjes 17711da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 17721da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1773662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1774662f3a0bSAndi Kleen unsigned nid) 17751da177e4SLinus Torvalds { 17761da177e4SLinus Torvalds struct zonelist *zl; 17771da177e4SLinus Torvalds struct page *page; 17781da177e4SLinus Torvalds 17790e88460dSMel Gorman zl = node_zonelist(nid, gfp); 17801da177e4SLinus Torvalds page = __alloc_pages(gfp, order, zl); 1781dd1a239fSMel Gorman if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) 1782ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 17831da177e4SLinus Torvalds return page; 17841da177e4SLinus Torvalds } 17851da177e4SLinus Torvalds 17861da177e4SLinus Torvalds /** 17871da177e4SLinus Torvalds * alloc_page_vma - Allocate a page for a VMA. 17881da177e4SLinus Torvalds * 17891da177e4SLinus Torvalds * @gfp: 17901da177e4SLinus Torvalds * %GFP_USER user allocation. 17911da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 17921da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 17931da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 17941da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 17951da177e4SLinus Torvalds * 17961da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 17971da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 17981da177e4SLinus Torvalds * 17991da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 18001da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 18011da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 18021da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 18031da177e4SLinus Torvalds * all allocations for pages that will be mapped into 18041da177e4SLinus Torvalds * user space. Returns NULL when no page can be allocated. 18051da177e4SLinus Torvalds * 18061da177e4SLinus Torvalds * Should be called with the mm_sem of the vma hold. 18071da177e4SLinus Torvalds */ 18081da177e4SLinus Torvalds struct page * 1809dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) 18101da177e4SLinus Torvalds { 18116e21c8f1SChristoph Lameter struct mempolicy *pol = get_vma_policy(current, vma, addr); 1812480eccf9SLee Schermerhorn struct zonelist *zl; 1813c0ff7453SMiao Xie struct page *page; 18141da177e4SLinus Torvalds 1815c0ff7453SMiao Xie get_mems_allowed(); 181645c4745aSLee Schermerhorn if (unlikely(pol->mode == MPOL_INTERLEAVE)) { 18171da177e4SLinus Torvalds unsigned nid; 18185da7ca86SChristoph Lameter 18195da7ca86SChristoph Lameter nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 182052cd3b07SLee Schermerhorn mpol_cond_put(pol); 1821c0ff7453SMiao Xie page = alloc_page_interleave(gfp, 0, nid); 1822c0ff7453SMiao Xie put_mems_allowed(); 1823c0ff7453SMiao Xie return page; 18241da177e4SLinus Torvalds } 182552cd3b07SLee Schermerhorn zl = policy_zonelist(gfp, pol); 182652cd3b07SLee Schermerhorn if (unlikely(mpol_needs_cond_ref(pol))) { 1827480eccf9SLee Schermerhorn /* 182852cd3b07SLee Schermerhorn * slow path: ref counted shared policy 1829480eccf9SLee Schermerhorn */ 183019770b32SMel Gorman struct page *page = __alloc_pages_nodemask(gfp, 0, 183152cd3b07SLee Schermerhorn zl, policy_nodemask(gfp, pol)); 1832f0be3d32SLee Schermerhorn __mpol_put(pol); 1833c0ff7453SMiao Xie put_mems_allowed(); 1834480eccf9SLee Schermerhorn return page; 1835480eccf9SLee Schermerhorn } 1836480eccf9SLee Schermerhorn /* 1837480eccf9SLee Schermerhorn * fast path: default or task policy 1838480eccf9SLee Schermerhorn */ 1839c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol)); 1840c0ff7453SMiao Xie put_mems_allowed(); 1841c0ff7453SMiao Xie return page; 18421da177e4SLinus Torvalds } 18431da177e4SLinus Torvalds 18441da177e4SLinus Torvalds /** 18451da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 18461da177e4SLinus Torvalds * 18471da177e4SLinus Torvalds * @gfp: 18481da177e4SLinus Torvalds * %GFP_USER user allocation, 18491da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 18501da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 18511da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 18521da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 18531da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 18541da177e4SLinus Torvalds * 18551da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 18561da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 18571da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 18581da177e4SLinus Torvalds * 1859cf2a473cSPaul Jackson * Don't call cpuset_update_task_memory_state() unless 18601da177e4SLinus Torvalds * 1) it's ok to take cpuset_sem (can WAIT), and 18611da177e4SLinus Torvalds * 2) allocating for current task (not interrupt). 18621da177e4SLinus Torvalds */ 1863dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 18641da177e4SLinus Torvalds { 18651da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 1866c0ff7453SMiao Xie struct page *page; 18671da177e4SLinus Torvalds 18689b819d20SChristoph Lameter if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) 18691da177e4SLinus Torvalds pol = &default_policy; 187052cd3b07SLee Schermerhorn 1871c0ff7453SMiao Xie get_mems_allowed(); 187252cd3b07SLee Schermerhorn /* 187352cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 187452cd3b07SLee Schermerhorn * nor system default_policy 187552cd3b07SLee Schermerhorn */ 187645c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 1877c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 1878c0ff7453SMiao Xie else 1879c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 188052cd3b07SLee Schermerhorn policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); 1881c0ff7453SMiao Xie put_mems_allowed(); 1882c0ff7453SMiao Xie return page; 18831da177e4SLinus Torvalds } 18841da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 18851da177e4SLinus Torvalds 18864225399aSPaul Jackson /* 1887846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 18884225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 18894225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 18904225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 18914225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 1892708c1bbcSMiao Xie * 1893708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 1894708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 18954225399aSPaul Jackson */ 18964225399aSPaul Jackson 1897846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 1898846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 18991da177e4SLinus Torvalds { 19001da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 19011da177e4SLinus Torvalds 19021da177e4SLinus Torvalds if (!new) 19031da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 1904708c1bbcSMiao Xie 1905708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 1906708c1bbcSMiao Xie if (old == current->mempolicy) { 1907708c1bbcSMiao Xie task_lock(current); 1908708c1bbcSMiao Xie *new = *old; 1909708c1bbcSMiao Xie task_unlock(current); 1910708c1bbcSMiao Xie } else 1911708c1bbcSMiao Xie *new = *old; 1912708c1bbcSMiao Xie 191399ee4ca7SPaul E. McKenney rcu_read_lock(); 19144225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 19154225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 1916708c1bbcSMiao Xie if (new->flags & MPOL_F_REBINDING) 1917708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); 1918708c1bbcSMiao Xie else 1919708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 19204225399aSPaul Jackson } 192199ee4ca7SPaul E. McKenney rcu_read_unlock(); 19221da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 19231da177e4SLinus Torvalds return new; 19241da177e4SLinus Torvalds } 19251da177e4SLinus Torvalds 192652cd3b07SLee Schermerhorn /* 192752cd3b07SLee Schermerhorn * If *frompol needs [has] an extra ref, copy *frompol to *tompol , 192852cd3b07SLee Schermerhorn * eliminate the * MPOL_F_* flags that require conditional ref and 192952cd3b07SLee Schermerhorn * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly 193052cd3b07SLee Schermerhorn * after return. Use the returned value. 193152cd3b07SLee Schermerhorn * 193252cd3b07SLee Schermerhorn * Allows use of a mempolicy for, e.g., multiple allocations with a single 193352cd3b07SLee Schermerhorn * policy lookup, even if the policy needs/has extra ref on lookup. 193452cd3b07SLee Schermerhorn * shmem_readahead needs this. 193552cd3b07SLee Schermerhorn */ 193652cd3b07SLee Schermerhorn struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, 193752cd3b07SLee Schermerhorn struct mempolicy *frompol) 193852cd3b07SLee Schermerhorn { 193952cd3b07SLee Schermerhorn if (!mpol_needs_cond_ref(frompol)) 194052cd3b07SLee Schermerhorn return frompol; 194152cd3b07SLee Schermerhorn 194252cd3b07SLee Schermerhorn *tompol = *frompol; 194352cd3b07SLee Schermerhorn tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */ 194452cd3b07SLee Schermerhorn __mpol_put(frompol); 194552cd3b07SLee Schermerhorn return tompol; 194652cd3b07SLee Schermerhorn } 194752cd3b07SLee Schermerhorn 19481da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 19491da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b) 19501da177e4SLinus Torvalds { 19511da177e4SLinus Torvalds if (!a || !b) 19521da177e4SLinus Torvalds return 0; 195345c4745aSLee Schermerhorn if (a->mode != b->mode) 19541da177e4SLinus Torvalds return 0; 195519800502SBob Liu if (a->flags != b->flags) 1956f5b087b5SDavid Rientjes return 0; 195719800502SBob Liu if (mpol_store_user_nodemask(a)) 195819800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 195919800502SBob Liu return 0; 196019800502SBob Liu 196145c4745aSLee Schermerhorn switch (a->mode) { 196219770b32SMel Gorman case MPOL_BIND: 196319770b32SMel Gorman /* Fall through */ 19641da177e4SLinus Torvalds case MPOL_INTERLEAVE: 1965dfcd3c0dSAndi Kleen return nodes_equal(a->v.nodes, b->v.nodes); 19661da177e4SLinus Torvalds case MPOL_PREFERRED: 1967fc36b8d3SLee Schermerhorn return a->v.preferred_node == b->v.preferred_node && 1968fc36b8d3SLee Schermerhorn a->flags == b->flags; 19691da177e4SLinus Torvalds default: 19701da177e4SLinus Torvalds BUG(); 19711da177e4SLinus Torvalds return 0; 19721da177e4SLinus Torvalds } 19731da177e4SLinus Torvalds } 19741da177e4SLinus Torvalds 19751da177e4SLinus Torvalds /* 19761da177e4SLinus Torvalds * Shared memory backing store policy support. 19771da177e4SLinus Torvalds * 19781da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 19791da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 19801da177e4SLinus Torvalds * They are protected by the sp->lock spinlock, which should be held 19811da177e4SLinus Torvalds * for any accesses to the tree. 19821da177e4SLinus Torvalds */ 19831da177e4SLinus Torvalds 19841da177e4SLinus Torvalds /* lookup first element intersecting start-end */ 19851da177e4SLinus Torvalds /* Caller holds sp->lock */ 19861da177e4SLinus Torvalds static struct sp_node * 19871da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 19881da177e4SLinus Torvalds { 19891da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 19901da177e4SLinus Torvalds 19911da177e4SLinus Torvalds while (n) { 19921da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 19931da177e4SLinus Torvalds 19941da177e4SLinus Torvalds if (start >= p->end) 19951da177e4SLinus Torvalds n = n->rb_right; 19961da177e4SLinus Torvalds else if (end <= p->start) 19971da177e4SLinus Torvalds n = n->rb_left; 19981da177e4SLinus Torvalds else 19991da177e4SLinus Torvalds break; 20001da177e4SLinus Torvalds } 20011da177e4SLinus Torvalds if (!n) 20021da177e4SLinus Torvalds return NULL; 20031da177e4SLinus Torvalds for (;;) { 20041da177e4SLinus Torvalds struct sp_node *w = NULL; 20051da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 20061da177e4SLinus Torvalds if (!prev) 20071da177e4SLinus Torvalds break; 20081da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 20091da177e4SLinus Torvalds if (w->end <= start) 20101da177e4SLinus Torvalds break; 20111da177e4SLinus Torvalds n = prev; 20121da177e4SLinus Torvalds } 20131da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 20141da177e4SLinus Torvalds } 20151da177e4SLinus Torvalds 20161da177e4SLinus Torvalds /* Insert a new shared policy into the list. */ 20171da177e4SLinus Torvalds /* Caller holds sp->lock */ 20181da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 20191da177e4SLinus Torvalds { 20201da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 20211da177e4SLinus Torvalds struct rb_node *parent = NULL; 20221da177e4SLinus Torvalds struct sp_node *nd; 20231da177e4SLinus Torvalds 20241da177e4SLinus Torvalds while (*p) { 20251da177e4SLinus Torvalds parent = *p; 20261da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 20271da177e4SLinus Torvalds if (new->start < nd->start) 20281da177e4SLinus Torvalds p = &(*p)->rb_left; 20291da177e4SLinus Torvalds else if (new->end > nd->end) 20301da177e4SLinus Torvalds p = &(*p)->rb_right; 20311da177e4SLinus Torvalds else 20321da177e4SLinus Torvalds BUG(); 20331da177e4SLinus Torvalds } 20341da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 20351da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2036140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 203745c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 20381da177e4SLinus Torvalds } 20391da177e4SLinus Torvalds 20401da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 20411da177e4SLinus Torvalds struct mempolicy * 20421da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 20431da177e4SLinus Torvalds { 20441da177e4SLinus Torvalds struct mempolicy *pol = NULL; 20451da177e4SLinus Torvalds struct sp_node *sn; 20461da177e4SLinus Torvalds 20471da177e4SLinus Torvalds if (!sp->root.rb_node) 20481da177e4SLinus Torvalds return NULL; 20491da177e4SLinus Torvalds spin_lock(&sp->lock); 20501da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 20511da177e4SLinus Torvalds if (sn) { 20521da177e4SLinus Torvalds mpol_get(sn->policy); 20531da177e4SLinus Torvalds pol = sn->policy; 20541da177e4SLinus Torvalds } 20551da177e4SLinus Torvalds spin_unlock(&sp->lock); 20561da177e4SLinus Torvalds return pol; 20571da177e4SLinus Torvalds } 20581da177e4SLinus Torvalds 20591da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 20601da177e4SLinus Torvalds { 2061140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 20621da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 2063f0be3d32SLee Schermerhorn mpol_put(n->policy); 20641da177e4SLinus Torvalds kmem_cache_free(sn_cache, n); 20651da177e4SLinus Torvalds } 20661da177e4SLinus Torvalds 2067dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2068dbcb0f19SAdrian Bunk struct mempolicy *pol) 20691da177e4SLinus Torvalds { 20701da177e4SLinus Torvalds struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 20711da177e4SLinus Torvalds 20721da177e4SLinus Torvalds if (!n) 20731da177e4SLinus Torvalds return NULL; 20741da177e4SLinus Torvalds n->start = start; 20751da177e4SLinus Torvalds n->end = end; 20761da177e4SLinus Torvalds mpol_get(pol); 2077aab0b102SLee Schermerhorn pol->flags |= MPOL_F_SHARED; /* for unref */ 20781da177e4SLinus Torvalds n->policy = pol; 20791da177e4SLinus Torvalds return n; 20801da177e4SLinus Torvalds } 20811da177e4SLinus Torvalds 20821da177e4SLinus Torvalds /* Replace a policy range. */ 20831da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 20841da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 20851da177e4SLinus Torvalds { 20861da177e4SLinus Torvalds struct sp_node *n, *new2 = NULL; 20871da177e4SLinus Torvalds 20881da177e4SLinus Torvalds restart: 20891da177e4SLinus Torvalds spin_lock(&sp->lock); 20901da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 20911da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 20921da177e4SLinus Torvalds while (n && n->start < end) { 20931da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 20941da177e4SLinus Torvalds if (n->start >= start) { 20951da177e4SLinus Torvalds if (n->end <= end) 20961da177e4SLinus Torvalds sp_delete(sp, n); 20971da177e4SLinus Torvalds else 20981da177e4SLinus Torvalds n->start = end; 20991da177e4SLinus Torvalds } else { 21001da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 21011da177e4SLinus Torvalds if (n->end > end) { 21021da177e4SLinus Torvalds if (!new2) { 21031da177e4SLinus Torvalds spin_unlock(&sp->lock); 21041da177e4SLinus Torvalds new2 = sp_alloc(end, n->end, n->policy); 21051da177e4SLinus Torvalds if (!new2) 21061da177e4SLinus Torvalds return -ENOMEM; 21071da177e4SLinus Torvalds goto restart; 21081da177e4SLinus Torvalds } 21091da177e4SLinus Torvalds n->end = start; 21101da177e4SLinus Torvalds sp_insert(sp, new2); 21111da177e4SLinus Torvalds new2 = NULL; 21121da177e4SLinus Torvalds break; 21131da177e4SLinus Torvalds } else 21141da177e4SLinus Torvalds n->end = start; 21151da177e4SLinus Torvalds } 21161da177e4SLinus Torvalds if (!next) 21171da177e4SLinus Torvalds break; 21181da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 21191da177e4SLinus Torvalds } 21201da177e4SLinus Torvalds if (new) 21211da177e4SLinus Torvalds sp_insert(sp, new); 21221da177e4SLinus Torvalds spin_unlock(&sp->lock); 21231da177e4SLinus Torvalds if (new2) { 2124f0be3d32SLee Schermerhorn mpol_put(new2->policy); 21251da177e4SLinus Torvalds kmem_cache_free(sn_cache, new2); 21261da177e4SLinus Torvalds } 21271da177e4SLinus Torvalds return 0; 21281da177e4SLinus Torvalds } 21291da177e4SLinus Torvalds 213071fe804bSLee Schermerhorn /** 213171fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 213271fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 213371fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 213471fe804bSLee Schermerhorn * 213571fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 213671fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 213771fe804bSLee Schermerhorn * This must be released on exit. 21384bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 213971fe804bSLee Schermerhorn */ 214071fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 21417339ff83SRobin Holt { 214258568d2aSMiao Xie int ret; 214358568d2aSMiao Xie 214471fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 214571fe804bSLee Schermerhorn spin_lock_init(&sp->lock); 21467339ff83SRobin Holt 214771fe804bSLee Schermerhorn if (mpol) { 21487339ff83SRobin Holt struct vm_area_struct pvma; 214971fe804bSLee Schermerhorn struct mempolicy *new; 21504bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 21517339ff83SRobin Holt 21524bfc4495SKAMEZAWA Hiroyuki if (!scratch) 21535c0c1654SLee Schermerhorn goto put_mpol; 215471fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 215571fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 215615d77835SLee Schermerhorn if (IS_ERR(new)) 21570cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 215858568d2aSMiao Xie 215958568d2aSMiao Xie task_lock(current); 21604bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 216158568d2aSMiao Xie task_unlock(current); 216215d77835SLee Schermerhorn if (ret) 21635c0c1654SLee Schermerhorn goto put_new; 216471fe804bSLee Schermerhorn 216571fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 21667339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 216771fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 216871fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 216915d77835SLee Schermerhorn 21705c0c1654SLee Schermerhorn put_new: 217171fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 21720cae3457SDan Carpenter free_scratch: 21734bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 21745c0c1654SLee Schermerhorn put_mpol: 21755c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 21767339ff83SRobin Holt } 21777339ff83SRobin Holt } 21787339ff83SRobin Holt 21791da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 21801da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 21811da177e4SLinus Torvalds { 21821da177e4SLinus Torvalds int err; 21831da177e4SLinus Torvalds struct sp_node *new = NULL; 21841da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 21851da177e4SLinus Torvalds 2186028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 21871da177e4SLinus Torvalds vma->vm_pgoff, 218845c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2189028fec41SDavid Rientjes npol ? npol->flags : -1, 2190dfcd3c0dSAndi Kleen npol ? nodes_addr(npol->v.nodes)[0] : -1); 21911da177e4SLinus Torvalds 21921da177e4SLinus Torvalds if (npol) { 21931da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 21941da177e4SLinus Torvalds if (!new) 21951da177e4SLinus Torvalds return -ENOMEM; 21961da177e4SLinus Torvalds } 21971da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 21981da177e4SLinus Torvalds if (err && new) 21991da177e4SLinus Torvalds kmem_cache_free(sn_cache, new); 22001da177e4SLinus Torvalds return err; 22011da177e4SLinus Torvalds } 22021da177e4SLinus Torvalds 22031da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 22041da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 22051da177e4SLinus Torvalds { 22061da177e4SLinus Torvalds struct sp_node *n; 22071da177e4SLinus Torvalds struct rb_node *next; 22081da177e4SLinus Torvalds 22091da177e4SLinus Torvalds if (!p->root.rb_node) 22101da177e4SLinus Torvalds return; 22111da177e4SLinus Torvalds spin_lock(&p->lock); 22121da177e4SLinus Torvalds next = rb_first(&p->root); 22131da177e4SLinus Torvalds while (next) { 22141da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 22151da177e4SLinus Torvalds next = rb_next(&n->nd); 221690c5029eSAndi Kleen rb_erase(&n->nd, &p->root); 2217f0be3d32SLee Schermerhorn mpol_put(n->policy); 22181da177e4SLinus Torvalds kmem_cache_free(sn_cache, n); 22191da177e4SLinus Torvalds } 22201da177e4SLinus Torvalds spin_unlock(&p->lock); 22211da177e4SLinus Torvalds } 22221da177e4SLinus Torvalds 22231da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 22241da177e4SLinus Torvalds void __init numa_policy_init(void) 22251da177e4SLinus Torvalds { 2226b71636e2SPaul Mundt nodemask_t interleave_nodes; 2227b71636e2SPaul Mundt unsigned long largest = 0; 2228b71636e2SPaul Mundt int nid, prefer = 0; 2229b71636e2SPaul Mundt 22301da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 22311da177e4SLinus Torvalds sizeof(struct mempolicy), 223220c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 22331da177e4SLinus Torvalds 22341da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 22351da177e4SLinus Torvalds sizeof(struct sp_node), 223620c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 22371da177e4SLinus Torvalds 2238b71636e2SPaul Mundt /* 2239b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2240b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2241b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2242b71636e2SPaul Mundt */ 2243b71636e2SPaul Mundt nodes_clear(interleave_nodes); 224456bbd65dSChristoph Lameter for_each_node_state(nid, N_HIGH_MEMORY) { 2245b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 22461da177e4SLinus Torvalds 2247b71636e2SPaul Mundt /* Preserve the largest node */ 2248b71636e2SPaul Mundt if (largest < total_pages) { 2249b71636e2SPaul Mundt largest = total_pages; 2250b71636e2SPaul Mundt prefer = nid; 2251b71636e2SPaul Mundt } 2252b71636e2SPaul Mundt 2253b71636e2SPaul Mundt /* Interleave this node? */ 2254b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2255b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2256b71636e2SPaul Mundt } 2257b71636e2SPaul Mundt 2258b71636e2SPaul Mundt /* All too small, use the largest */ 2259b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2260b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2261b71636e2SPaul Mundt 2262028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 22631da177e4SLinus Torvalds printk("numa_policy_init: interleaving failed\n"); 22641da177e4SLinus Torvalds } 22651da177e4SLinus Torvalds 22668bccd85fSChristoph Lameter /* Reset policy of current process to default */ 22671da177e4SLinus Torvalds void numa_default_policy(void) 22681da177e4SLinus Torvalds { 2269028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 22701da177e4SLinus Torvalds } 227168860ec1SPaul Jackson 22724225399aSPaul Jackson /* 2273095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2274095f1fc4SLee Schermerhorn */ 2275095f1fc4SLee Schermerhorn 2276095f1fc4SLee Schermerhorn /* 2277fc36b8d3SLee Schermerhorn * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag 22783f226aa1SLee Schermerhorn * Used only for mpol_parse_str() and mpol_to_str() 22791a75a6c8SChristoph Lameter */ 2280345ace9cSLee Schermerhorn #define MPOL_LOCAL MPOL_MAX 2281345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2282345ace9cSLee Schermerhorn { 2283345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2284345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2285345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2286345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2287345ace9cSLee Schermerhorn [MPOL_LOCAL] = "local" 2288345ace9cSLee Schermerhorn }; 22891a75a6c8SChristoph Lameter 2290095f1fc4SLee Schermerhorn 2291095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2292095f1fc4SLee Schermerhorn /** 2293095f1fc4SLee Schermerhorn * mpol_parse_str - parse string to mempolicy 2294095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 229571fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 229671fe804bSLee Schermerhorn * @no_context: flag whether to "contextualize" the mempolicy 2297095f1fc4SLee Schermerhorn * 2298095f1fc4SLee Schermerhorn * Format of input: 2299095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2300095f1fc4SLee Schermerhorn * 230171fe804bSLee Schermerhorn * if @no_context is true, save the input nodemask in w.user_nodemask in 230271fe804bSLee Schermerhorn * the returned mempolicy. This will be used to "clone" the mempolicy in 230371fe804bSLee Schermerhorn * a specific context [cpuset] at a later time. Used to parse tmpfs mpol 230471fe804bSLee Schermerhorn * mount option. Note that if 'static' or 'relative' mode flags were 230571fe804bSLee Schermerhorn * specified, the input nodemask will already have been saved. Saving 230671fe804bSLee Schermerhorn * it again is redundant, but safe. 230771fe804bSLee Schermerhorn * 230871fe804bSLee Schermerhorn * On success, returns 0, else 1 2309095f1fc4SLee Schermerhorn */ 231071fe804bSLee Schermerhorn int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) 2311095f1fc4SLee Schermerhorn { 231271fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2313b4652e84SLee Schermerhorn unsigned short mode; 231471fe804bSLee Schermerhorn unsigned short uninitialized_var(mode_flags); 231571fe804bSLee Schermerhorn nodemask_t nodes; 2316095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2317095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2318095f1fc4SLee Schermerhorn int err = 1; 2319095f1fc4SLee Schermerhorn 2320095f1fc4SLee Schermerhorn if (nodelist) { 2321095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2322095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 232371fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2324095f1fc4SLee Schermerhorn goto out; 232571fe804bSLee Schermerhorn if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY])) 2326095f1fc4SLee Schermerhorn goto out; 232771fe804bSLee Schermerhorn } else 232871fe804bSLee Schermerhorn nodes_clear(nodes); 232971fe804bSLee Schermerhorn 2330095f1fc4SLee Schermerhorn if (flags) 2331095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2332095f1fc4SLee Schermerhorn 2333b4652e84SLee Schermerhorn for (mode = 0; mode <= MPOL_LOCAL; mode++) { 2334345ace9cSLee Schermerhorn if (!strcmp(str, policy_modes[mode])) { 2335095f1fc4SLee Schermerhorn break; 2336095f1fc4SLee Schermerhorn } 2337095f1fc4SLee Schermerhorn } 2338b4652e84SLee Schermerhorn if (mode > MPOL_LOCAL) 2339095f1fc4SLee Schermerhorn goto out; 2340095f1fc4SLee Schermerhorn 234171fe804bSLee Schermerhorn switch (mode) { 2342095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 234371fe804bSLee Schermerhorn /* 234471fe804bSLee Schermerhorn * Insist on a nodelist of one node only 234571fe804bSLee Schermerhorn */ 2346095f1fc4SLee Schermerhorn if (nodelist) { 2347095f1fc4SLee Schermerhorn char *rest = nodelist; 2348095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2349095f1fc4SLee Schermerhorn rest++; 2350926f2ae0SKOSAKI Motohiro if (*rest) 2351926f2ae0SKOSAKI Motohiro goto out; 2352095f1fc4SLee Schermerhorn } 2353095f1fc4SLee Schermerhorn break; 2354095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2355095f1fc4SLee Schermerhorn /* 2356095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2357095f1fc4SLee Schermerhorn */ 2358095f1fc4SLee Schermerhorn if (!nodelist) 235971fe804bSLee Schermerhorn nodes = node_states[N_HIGH_MEMORY]; 23603f226aa1SLee Schermerhorn break; 236171fe804bSLee Schermerhorn case MPOL_LOCAL: 23623f226aa1SLee Schermerhorn /* 236371fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 23643f226aa1SLee Schermerhorn */ 236571fe804bSLee Schermerhorn if (nodelist) 23663f226aa1SLee Schermerhorn goto out; 236771fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 23683f226aa1SLee Schermerhorn break; 2369413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2370413b43deSRavikiran G Thirumalai /* 2371413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2372413b43deSRavikiran G Thirumalai */ 2373413b43deSRavikiran G Thirumalai if (!nodelist) 2374413b43deSRavikiran G Thirumalai err = 0; 2375413b43deSRavikiran G Thirumalai goto out; 2376d69b2e63SKOSAKI Motohiro case MPOL_BIND: 237771fe804bSLee Schermerhorn /* 2378d69b2e63SKOSAKI Motohiro * Insist on a nodelist 237971fe804bSLee Schermerhorn */ 2380d69b2e63SKOSAKI Motohiro if (!nodelist) 2381d69b2e63SKOSAKI Motohiro goto out; 2382095f1fc4SLee Schermerhorn } 2383095f1fc4SLee Schermerhorn 238471fe804bSLee Schermerhorn mode_flags = 0; 2385095f1fc4SLee Schermerhorn if (flags) { 2386095f1fc4SLee Schermerhorn /* 2387095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2388095f1fc4SLee Schermerhorn * mode flags. 2389095f1fc4SLee Schermerhorn */ 2390095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 239171fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2392095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 239371fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2394095f1fc4SLee Schermerhorn else 2395926f2ae0SKOSAKI Motohiro goto out; 2396095f1fc4SLee Schermerhorn } 239771fe804bSLee Schermerhorn 239871fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 239971fe804bSLee Schermerhorn if (IS_ERR(new)) 2400926f2ae0SKOSAKI Motohiro goto out; 2401926f2ae0SKOSAKI Motohiro 2402e17f74afSLee Schermerhorn if (no_context) { 2403e17f74afSLee Schermerhorn /* save for contextualization */ 2404e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2405e17f74afSLee Schermerhorn } else { 240658568d2aSMiao Xie int ret; 24074bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 24084bfc4495SKAMEZAWA Hiroyuki if (scratch) { 240958568d2aSMiao Xie task_lock(current); 24104bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &nodes, scratch); 241158568d2aSMiao Xie task_unlock(current); 24124bfc4495SKAMEZAWA Hiroyuki } else 24134bfc4495SKAMEZAWA Hiroyuki ret = -ENOMEM; 24144bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 24154bfc4495SKAMEZAWA Hiroyuki if (ret) { 24164bfc4495SKAMEZAWA Hiroyuki mpol_put(new); 2417926f2ae0SKOSAKI Motohiro goto out; 2418926f2ae0SKOSAKI Motohiro } 2419926f2ae0SKOSAKI Motohiro } 2420926f2ae0SKOSAKI Motohiro err = 0; 242171fe804bSLee Schermerhorn 2422095f1fc4SLee Schermerhorn out: 2423095f1fc4SLee Schermerhorn /* Restore string for error message */ 2424095f1fc4SLee Schermerhorn if (nodelist) 2425095f1fc4SLee Schermerhorn *--nodelist = ':'; 2426095f1fc4SLee Schermerhorn if (flags) 2427095f1fc4SLee Schermerhorn *--flags = '='; 242871fe804bSLee Schermerhorn if (!err) 242971fe804bSLee Schermerhorn *mpol = new; 2430095f1fc4SLee Schermerhorn return err; 2431095f1fc4SLee Schermerhorn } 2432095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2433095f1fc4SLee Schermerhorn 243471fe804bSLee Schermerhorn /** 243571fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 243671fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 243771fe804bSLee Schermerhorn * @maxlen: length of @buffer 243871fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 243971fe804bSLee Schermerhorn * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask 244071fe804bSLee Schermerhorn * 24411a75a6c8SChristoph Lameter * Convert a mempolicy into a string. 24421a75a6c8SChristoph Lameter * Returns the number of characters in buffer (if positive) 24431a75a6c8SChristoph Lameter * or an error (negative) 24441a75a6c8SChristoph Lameter */ 244571fe804bSLee Schermerhorn int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) 24461a75a6c8SChristoph Lameter { 24471a75a6c8SChristoph Lameter char *p = buffer; 24481a75a6c8SChristoph Lameter int l; 24491a75a6c8SChristoph Lameter nodemask_t nodes; 2450bea904d5SLee Schermerhorn unsigned short mode; 2451f5b087b5SDavid Rientjes unsigned short flags = pol ? pol->flags : 0; 24521a75a6c8SChristoph Lameter 24532291990aSLee Schermerhorn /* 24542291990aSLee Schermerhorn * Sanity check: room for longest mode, flag and some nodes 24552291990aSLee Schermerhorn */ 24562291990aSLee Schermerhorn VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16); 24572291990aSLee Schermerhorn 2458bea904d5SLee Schermerhorn if (!pol || pol == &default_policy) 2459bea904d5SLee Schermerhorn mode = MPOL_DEFAULT; 2460bea904d5SLee Schermerhorn else 2461bea904d5SLee Schermerhorn mode = pol->mode; 2462bea904d5SLee Schermerhorn 24631a75a6c8SChristoph Lameter switch (mode) { 24641a75a6c8SChristoph Lameter case MPOL_DEFAULT: 24651a75a6c8SChristoph Lameter nodes_clear(nodes); 24661a75a6c8SChristoph Lameter break; 24671a75a6c8SChristoph Lameter 24681a75a6c8SChristoph Lameter case MPOL_PREFERRED: 24691a75a6c8SChristoph Lameter nodes_clear(nodes); 2470fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 247153f2556bSLee Schermerhorn mode = MPOL_LOCAL; /* pseudo-policy */ 247253f2556bSLee Schermerhorn else 2473fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 24741a75a6c8SChristoph Lameter break; 24751a75a6c8SChristoph Lameter 24761a75a6c8SChristoph Lameter case MPOL_BIND: 247719770b32SMel Gorman /* Fall through */ 24781a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 247971fe804bSLee Schermerhorn if (no_context) 248071fe804bSLee Schermerhorn nodes = pol->w.user_nodemask; 248171fe804bSLee Schermerhorn else 24821a75a6c8SChristoph Lameter nodes = pol->v.nodes; 24831a75a6c8SChristoph Lameter break; 24841a75a6c8SChristoph Lameter 24851a75a6c8SChristoph Lameter default: 24861a75a6c8SChristoph Lameter BUG(); 24871a75a6c8SChristoph Lameter } 24881a75a6c8SChristoph Lameter 2489345ace9cSLee Schermerhorn l = strlen(policy_modes[mode]); 24901a75a6c8SChristoph Lameter if (buffer + maxlen < p + l + 1) 24911a75a6c8SChristoph Lameter return -ENOSPC; 24921a75a6c8SChristoph Lameter 2493345ace9cSLee Schermerhorn strcpy(p, policy_modes[mode]); 24941a75a6c8SChristoph Lameter p += l; 24951a75a6c8SChristoph Lameter 2496fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2497f5b087b5SDavid Rientjes if (buffer + maxlen < p + 2) 2498f5b087b5SDavid Rientjes return -ENOSPC; 2499f5b087b5SDavid Rientjes *p++ = '='; 2500f5b087b5SDavid Rientjes 25012291990aSLee Schermerhorn /* 25022291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 25032291990aSLee Schermerhorn */ 2504f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 25052291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 25062291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 25072291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2508f5b087b5SDavid Rientjes } 2509f5b087b5SDavid Rientjes 25101a75a6c8SChristoph Lameter if (!nodes_empty(nodes)) { 25111a75a6c8SChristoph Lameter if (buffer + maxlen < p + 2) 25121a75a6c8SChristoph Lameter return -ENOSPC; 2513095f1fc4SLee Schermerhorn *p++ = ':'; 25141a75a6c8SChristoph Lameter p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); 25151a75a6c8SChristoph Lameter } 25161a75a6c8SChristoph Lameter return p - buffer; 25171a75a6c8SChristoph Lameter } 25181a75a6c8SChristoph Lameter 25191a75a6c8SChristoph Lameter struct numa_maps { 25201a75a6c8SChristoph Lameter unsigned long pages; 25211a75a6c8SChristoph Lameter unsigned long anon; 2522397874dfSChristoph Lameter unsigned long active; 2523397874dfSChristoph Lameter unsigned long writeback; 25241a75a6c8SChristoph Lameter unsigned long mapcount_max; 2525397874dfSChristoph Lameter unsigned long dirty; 2526397874dfSChristoph Lameter unsigned long swapcache; 25271a75a6c8SChristoph Lameter unsigned long node[MAX_NUMNODES]; 25281a75a6c8SChristoph Lameter }; 25291a75a6c8SChristoph Lameter 2530397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty) 25311a75a6c8SChristoph Lameter { 25321a75a6c8SChristoph Lameter struct numa_maps *md = private; 25331a75a6c8SChristoph Lameter int count = page_mapcount(page); 25341a75a6c8SChristoph Lameter 25351a75a6c8SChristoph Lameter md->pages++; 2536397874dfSChristoph Lameter if (pte_dirty || PageDirty(page)) 2537397874dfSChristoph Lameter md->dirty++; 2538397874dfSChristoph Lameter 2539397874dfSChristoph Lameter if (PageSwapCache(page)) 2540397874dfSChristoph Lameter md->swapcache++; 2541397874dfSChristoph Lameter 2542894bc310SLee Schermerhorn if (PageActive(page) || PageUnevictable(page)) 2543397874dfSChristoph Lameter md->active++; 2544397874dfSChristoph Lameter 2545397874dfSChristoph Lameter if (PageWriteback(page)) 2546397874dfSChristoph Lameter md->writeback++; 25471a75a6c8SChristoph Lameter 25481a75a6c8SChristoph Lameter if (PageAnon(page)) 25491a75a6c8SChristoph Lameter md->anon++; 25501a75a6c8SChristoph Lameter 2551397874dfSChristoph Lameter if (count > md->mapcount_max) 2552397874dfSChristoph Lameter md->mapcount_max = count; 2553397874dfSChristoph Lameter 25541a75a6c8SChristoph Lameter md->node[page_to_nid(page)]++; 25551a75a6c8SChristoph Lameter } 25561a75a6c8SChristoph Lameter 25577f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE 2558397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma, 2559397874dfSChristoph Lameter unsigned long start, unsigned long end, 2560397874dfSChristoph Lameter struct numa_maps *md) 2561397874dfSChristoph Lameter { 2562397874dfSChristoph Lameter unsigned long addr; 2563397874dfSChristoph Lameter struct page *page; 2564a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2565a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 2566397874dfSChristoph Lameter 2567a5516438SAndi Kleen for (addr = start; addr < end; addr += sz) { 2568a5516438SAndi Kleen pte_t *ptep = huge_pte_offset(vma->vm_mm, 2569a5516438SAndi Kleen addr & huge_page_mask(h)); 2570397874dfSChristoph Lameter pte_t pte; 2571397874dfSChristoph Lameter 2572397874dfSChristoph Lameter if (!ptep) 2573397874dfSChristoph Lameter continue; 2574397874dfSChristoph Lameter 2575397874dfSChristoph Lameter pte = *ptep; 2576397874dfSChristoph Lameter if (pte_none(pte)) 2577397874dfSChristoph Lameter continue; 2578397874dfSChristoph Lameter 2579397874dfSChristoph Lameter page = pte_page(pte); 2580397874dfSChristoph Lameter if (!page) 2581397874dfSChristoph Lameter continue; 2582397874dfSChristoph Lameter 2583397874dfSChristoph Lameter gather_stats(page, md, pte_dirty(*ptep)); 2584397874dfSChristoph Lameter } 2585397874dfSChristoph Lameter } 25867f709ed0SAndrew Morton #else 25877f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma, 25887f709ed0SAndrew Morton unsigned long start, unsigned long end, 25897f709ed0SAndrew Morton struct numa_maps *md) 25907f709ed0SAndrew Morton { 25917f709ed0SAndrew Morton } 25927f709ed0SAndrew Morton #endif 2593397874dfSChristoph Lameter 259453f2556bSLee Schermerhorn /* 259553f2556bSLee Schermerhorn * Display pages allocated per node and memory policy via /proc. 259653f2556bSLee Schermerhorn */ 25971a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v) 25981a75a6c8SChristoph Lameter { 259999f89551SEric W. Biederman struct proc_maps_private *priv = m->private; 26001a75a6c8SChristoph Lameter struct vm_area_struct *vma = v; 26011a75a6c8SChristoph Lameter struct numa_maps *md; 2602397874dfSChristoph Lameter struct file *file = vma->vm_file; 2603397874dfSChristoph Lameter struct mm_struct *mm = vma->vm_mm; 2604480eccf9SLee Schermerhorn struct mempolicy *pol; 26051a75a6c8SChristoph Lameter int n; 26061a75a6c8SChristoph Lameter char buffer[50]; 26071a75a6c8SChristoph Lameter 2608397874dfSChristoph Lameter if (!mm) 26091a75a6c8SChristoph Lameter return 0; 26101a75a6c8SChristoph Lameter 26111a75a6c8SChristoph Lameter md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); 26121a75a6c8SChristoph Lameter if (!md) 26131a75a6c8SChristoph Lameter return 0; 26141a75a6c8SChristoph Lameter 2615480eccf9SLee Schermerhorn pol = get_vma_policy(priv->task, vma, vma->vm_start); 261671fe804bSLee Schermerhorn mpol_to_str(buffer, sizeof(buffer), pol, 0); 261752cd3b07SLee Schermerhorn mpol_cond_put(pol); 26181a75a6c8SChristoph Lameter 2619397874dfSChristoph Lameter seq_printf(m, "%08lx %s", vma->vm_start, buffer); 2620397874dfSChristoph Lameter 2621397874dfSChristoph Lameter if (file) { 2622397874dfSChristoph Lameter seq_printf(m, " file="); 2623c32c2f63SJan Blunck seq_path(m, &file->f_path, "\n\t= "); 2624397874dfSChristoph Lameter } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 2625397874dfSChristoph Lameter seq_printf(m, " heap"); 2626397874dfSChristoph Lameter } else if (vma->vm_start <= mm->start_stack && 2627397874dfSChristoph Lameter vma->vm_end >= mm->start_stack) { 2628397874dfSChristoph Lameter seq_printf(m, " stack"); 2629397874dfSChristoph Lameter } 2630397874dfSChristoph Lameter 2631397874dfSChristoph Lameter if (is_vm_hugetlb_page(vma)) { 2632397874dfSChristoph Lameter check_huge_range(vma, vma->vm_start, vma->vm_end, md); 2633397874dfSChristoph Lameter seq_printf(m, " huge"); 2634397874dfSChristoph Lameter } else { 2635397874dfSChristoph Lameter check_pgd_range(vma, vma->vm_start, vma->vm_end, 263656bbd65dSChristoph Lameter &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md); 2637397874dfSChristoph Lameter } 2638397874dfSChristoph Lameter 2639397874dfSChristoph Lameter if (!md->pages) 2640397874dfSChristoph Lameter goto out; 26411a75a6c8SChristoph Lameter 26421a75a6c8SChristoph Lameter if (md->anon) 26431a75a6c8SChristoph Lameter seq_printf(m," anon=%lu",md->anon); 26441a75a6c8SChristoph Lameter 2645397874dfSChristoph Lameter if (md->dirty) 2646397874dfSChristoph Lameter seq_printf(m," dirty=%lu",md->dirty); 2647397874dfSChristoph Lameter 2648397874dfSChristoph Lameter if (md->pages != md->anon && md->pages != md->dirty) 2649397874dfSChristoph Lameter seq_printf(m, " mapped=%lu", md->pages); 2650397874dfSChristoph Lameter 2651397874dfSChristoph Lameter if (md->mapcount_max > 1) 2652397874dfSChristoph Lameter seq_printf(m, " mapmax=%lu", md->mapcount_max); 2653397874dfSChristoph Lameter 2654397874dfSChristoph Lameter if (md->swapcache) 2655397874dfSChristoph Lameter seq_printf(m," swapcache=%lu", md->swapcache); 2656397874dfSChristoph Lameter 2657397874dfSChristoph Lameter if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 2658397874dfSChristoph Lameter seq_printf(m," active=%lu", md->active); 2659397874dfSChristoph Lameter 2660397874dfSChristoph Lameter if (md->writeback) 2661397874dfSChristoph Lameter seq_printf(m," writeback=%lu", md->writeback); 2662397874dfSChristoph Lameter 266356bbd65dSChristoph Lameter for_each_node_state(n, N_HIGH_MEMORY) 26641a75a6c8SChristoph Lameter if (md->node[n]) 26651a75a6c8SChristoph Lameter seq_printf(m, " N%d=%lu", n, md->node[n]); 2666397874dfSChristoph Lameter out: 26671a75a6c8SChristoph Lameter seq_putc(m, '\n'); 26681a75a6c8SChristoph Lameter kfree(md); 26691a75a6c8SChristoph Lameter 26701a75a6c8SChristoph Lameter if (m->count < m->size) 267199f89551SEric W. Biederman m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; 26721a75a6c8SChristoph Lameter return 0; 26731a75a6c8SChristoph Lameter } 2674