11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 681da177e4SLinus Torvalds #include <linux/mempolicy.h> 691da177e4SLinus Torvalds #include <linux/mm.h> 701da177e4SLinus Torvalds #include <linux/highmem.h> 711da177e4SLinus Torvalds #include <linux/hugetlb.h> 721da177e4SLinus Torvalds #include <linux/kernel.h> 731da177e4SLinus Torvalds #include <linux/sched.h> 741da177e4SLinus Torvalds #include <linux/nodemask.h> 751da177e4SLinus Torvalds #include <linux/cpuset.h> 761da177e4SLinus Torvalds #include <linux/slab.h> 771da177e4SLinus Torvalds #include <linux/string.h> 78b95f1b31SPaul Gortmaker #include <linux/export.h> 79b488893aSPavel Emelyanov #include <linux/nsproxy.h> 801da177e4SLinus Torvalds #include <linux/interrupt.h> 811da177e4SLinus Torvalds #include <linux/init.h> 821da177e4SLinus Torvalds #include <linux/compat.h> 83dc9aa5b9SChristoph Lameter #include <linux/swap.h> 841a75a6c8SChristoph Lameter #include <linux/seq_file.h> 851a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 86b20a3503SChristoph Lameter #include <linux/migrate.h> 8762b61f61SHugh Dickins #include <linux/ksm.h> 8895a402c3SChristoph Lameter #include <linux/rmap.h> 8986c3a764SDavid Quigley #include <linux/security.h> 90dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 91095f1fc4SLee Schermerhorn #include <linux/ctype.h> 926d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 93b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 94dc9aa5b9SChristoph Lameter 951da177e4SLinus Torvalds #include <asm/tlbflush.h> 961da177e4SLinus Torvalds #include <asm/uaccess.h> 97778d3b0fSMichal Hocko #include <linux/random.h> 981da177e4SLinus Torvalds 9962695a84SNick Piggin #include "internal.h" 10062695a84SNick Piggin 10138e35860SChristoph Lameter /* Internal flags */ 102dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10338e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 104dc9aa5b9SChristoph Lameter 105fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 106fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1071da177e4SLinus Torvalds 1081da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1091da177e4SLinus Torvalds policied. */ 1106267276fSChristoph Lameter enum zone_type policy_zone = 0; 1111da177e4SLinus Torvalds 112bea904d5SLee Schermerhorn /* 113bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 114bea904d5SLee Schermerhorn */ 115e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1161da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 117bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 118fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1191da177e4SLinus Torvalds }; 1201da177e4SLinus Torvalds 1215606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1225606e387SMel Gorman 1235606e387SMel Gorman static struct mempolicy *get_task_policy(struct task_struct *p) 1245606e387SMel Gorman { 1255606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 1265606e387SMel Gorman 1275606e387SMel Gorman if (!pol) { 1281da6f0e1SJianguo Wu int node = numa_node_id(); 1295606e387SMel Gorman 1301da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1311da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 1321da6f0e1SJianguo Wu /* 1331da6f0e1SJianguo Wu * preferred_node_policy is not initialised early in 1341da6f0e1SJianguo Wu * boot 1351da6f0e1SJianguo Wu */ 1365606e387SMel Gorman if (!pol->mode) 1375606e387SMel Gorman pol = NULL; 1385606e387SMel Gorman } 1391da6f0e1SJianguo Wu } 1405606e387SMel Gorman 1415606e387SMel Gorman return pol; 1425606e387SMel Gorman } 1435606e387SMel Gorman 14437012946SDavid Rientjes static const struct mempolicy_operations { 14537012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 146708c1bbcSMiao Xie /* 147708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 148708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 149708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 150708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 151708c1bbcSMiao Xie * page. 152708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 153708c1bbcSMiao Xie * rebind directly. 154708c1bbcSMiao Xie * 155708c1bbcSMiao Xie * step: 156708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 157708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 158708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 159708c1bbcSMiao Xie */ 160708c1bbcSMiao Xie void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, 161708c1bbcSMiao Xie enum mpol_rebind_step step); 16237012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 16337012946SDavid Rientjes 16419770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */ 16537012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask) 1661da177e4SLinus Torvalds { 167d3eb1570SLai Jiangshan return nodes_intersects(*nodemask, node_states[N_MEMORY]); 1681da177e4SLinus Torvalds } 1691da177e4SLinus Torvalds 170f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 171f5b087b5SDavid Rientjes { 1726d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1734c50bc01SDavid Rientjes } 1744c50bc01SDavid Rientjes 1754c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1764c50bc01SDavid Rientjes const nodemask_t *rel) 1774c50bc01SDavid Rientjes { 1784c50bc01SDavid Rientjes nodemask_t tmp; 1794c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1804c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 181f5b087b5SDavid Rientjes } 182f5b087b5SDavid Rientjes 18337012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 18437012946SDavid Rientjes { 18537012946SDavid Rientjes if (nodes_empty(*nodes)) 18637012946SDavid Rientjes return -EINVAL; 18737012946SDavid Rientjes pol->v.nodes = *nodes; 18837012946SDavid Rientjes return 0; 18937012946SDavid Rientjes } 19037012946SDavid Rientjes 19137012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 19237012946SDavid Rientjes { 19337012946SDavid Rientjes if (!nodes) 194fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 19537012946SDavid Rientjes else if (nodes_empty(*nodes)) 19637012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 19737012946SDavid Rientjes else 19837012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 19937012946SDavid Rientjes return 0; 20037012946SDavid Rientjes } 20137012946SDavid Rientjes 20237012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 20337012946SDavid Rientjes { 20437012946SDavid Rientjes if (!is_valid_nodemask(nodes)) 20537012946SDavid Rientjes return -EINVAL; 20637012946SDavid Rientjes pol->v.nodes = *nodes; 20737012946SDavid Rientjes return 0; 20837012946SDavid Rientjes } 20937012946SDavid Rientjes 21058568d2aSMiao Xie /* 21158568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 21258568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 21358568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 21458568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 21558568d2aSMiao Xie * 21658568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 21758568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 21858568d2aSMiao Xie */ 2194bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2204bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22158568d2aSMiao Xie { 22258568d2aSMiao Xie int ret; 22358568d2aSMiao Xie 22458568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 22558568d2aSMiao Xie if (pol == NULL) 22658568d2aSMiao Xie return 0; 22701f13bd6SLai Jiangshan /* Check N_MEMORY */ 2284bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 22901f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 23058568d2aSMiao Xie 23158568d2aSMiao Xie VM_BUG_ON(!nodes); 23258568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 23358568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 23458568d2aSMiao Xie else { 23558568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2364bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1); 23758568d2aSMiao Xie else 2384bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2394bfc4495SKAMEZAWA Hiroyuki 24058568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 24158568d2aSMiao Xie pol->w.user_nodemask = *nodes; 24258568d2aSMiao Xie else 24358568d2aSMiao Xie pol->w.cpuset_mems_allowed = 24458568d2aSMiao Xie cpuset_current_mems_allowed; 24558568d2aSMiao Xie } 24658568d2aSMiao Xie 2474bfc4495SKAMEZAWA Hiroyuki if (nodes) 2484bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2494bfc4495SKAMEZAWA Hiroyuki else 2504bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 25158568d2aSMiao Xie return ret; 25258568d2aSMiao Xie } 25358568d2aSMiao Xie 25458568d2aSMiao Xie /* 25558568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25658568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 25758568d2aSMiao Xie */ 258028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 259028fec41SDavid Rientjes nodemask_t *nodes) 2601da177e4SLinus Torvalds { 2611da177e4SLinus Torvalds struct mempolicy *policy; 2621da177e4SLinus Torvalds 263028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26400ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 265140d5a49SPaul Mundt 2663e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2673e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 26837012946SDavid Rientjes return ERR_PTR(-EINVAL); 269d3a71033SLee Schermerhorn return NULL; 27037012946SDavid Rientjes } 2713e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2723e1f0645SDavid Rientjes 2733e1f0645SDavid Rientjes /* 2743e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2753e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2763e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2773e1f0645SDavid Rientjes */ 2783e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2793e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2803e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2813e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2823e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2833e1f0645SDavid Rientjes } 284479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 285479e2802SPeter Zijlstra if (!nodes_empty(*nodes)) 286479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 287479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2883e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2893e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2901da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2911da177e4SLinus Torvalds if (!policy) 2921da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2931da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 29445c4745aSLee Schermerhorn policy->mode = mode; 29537012946SDavid Rientjes policy->flags = flags; 2963e1f0645SDavid Rientjes 29737012946SDavid Rientjes return policy; 29837012946SDavid Rientjes } 29937012946SDavid Rientjes 30052cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30152cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 30252cd3b07SLee Schermerhorn { 30352cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 30452cd3b07SLee Schermerhorn return; 30552cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 30652cd3b07SLee Schermerhorn } 30752cd3b07SLee Schermerhorn 308708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, 309708c1bbcSMiao Xie enum mpol_rebind_step step) 31037012946SDavid Rientjes { 31137012946SDavid Rientjes } 31237012946SDavid Rientjes 313708c1bbcSMiao Xie /* 314708c1bbcSMiao Xie * step: 315708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 316708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 317708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 318708c1bbcSMiao Xie */ 319708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, 320708c1bbcSMiao Xie enum mpol_rebind_step step) 3211d0d2680SDavid Rientjes { 3221d0d2680SDavid Rientjes nodemask_t tmp; 3231d0d2680SDavid Rientjes 32437012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32537012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32637012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32737012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3281d0d2680SDavid Rientjes else { 329708c1bbcSMiao Xie /* 330708c1bbcSMiao Xie * if step == 1, we use ->w.cpuset_mems_allowed to cache the 331708c1bbcSMiao Xie * result 332708c1bbcSMiao Xie */ 333708c1bbcSMiao Xie if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { 334708c1bbcSMiao Xie nodes_remap(tmp, pol->v.nodes, 335708c1bbcSMiao Xie pol->w.cpuset_mems_allowed, *nodes); 336708c1bbcSMiao Xie pol->w.cpuset_mems_allowed = step ? tmp : *nodes; 337708c1bbcSMiao Xie } else if (step == MPOL_REBIND_STEP2) { 338708c1bbcSMiao Xie tmp = pol->w.cpuset_mems_allowed; 33937012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 340708c1bbcSMiao Xie } else 341708c1bbcSMiao Xie BUG(); 3421d0d2680SDavid Rientjes } 34337012946SDavid Rientjes 344708c1bbcSMiao Xie if (nodes_empty(tmp)) 345708c1bbcSMiao Xie tmp = *nodes; 346708c1bbcSMiao Xie 347708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 348708c1bbcSMiao Xie nodes_or(pol->v.nodes, pol->v.nodes, tmp); 349708c1bbcSMiao Xie else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) 3501d0d2680SDavid Rientjes pol->v.nodes = tmp; 351708c1bbcSMiao Xie else 352708c1bbcSMiao Xie BUG(); 353708c1bbcSMiao Xie 3541d0d2680SDavid Rientjes if (!node_isset(current->il_next, tmp)) { 3551d0d2680SDavid Rientjes current->il_next = next_node(current->il_next, tmp); 3561d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3571d0d2680SDavid Rientjes current->il_next = first_node(tmp); 3581d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3591d0d2680SDavid Rientjes current->il_next = numa_node_id(); 3601d0d2680SDavid Rientjes } 36137012946SDavid Rientjes } 36237012946SDavid Rientjes 36337012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 364708c1bbcSMiao Xie const nodemask_t *nodes, 365708c1bbcSMiao Xie enum mpol_rebind_step step) 36637012946SDavid Rientjes { 36737012946SDavid Rientjes nodemask_t tmp; 36837012946SDavid Rientjes 36937012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3701d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3711d0d2680SDavid Rientjes 372fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3731d0d2680SDavid Rientjes pol->v.preferred_node = node; 374fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 375fc36b8d3SLee Schermerhorn } else 376fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 37737012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 37837012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3791d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 380fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3811d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 38237012946SDavid Rientjes pol->w.cpuset_mems_allowed, 38337012946SDavid Rientjes *nodes); 38437012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3851d0d2680SDavid Rientjes } 3861d0d2680SDavid Rientjes } 38737012946SDavid Rientjes 388708c1bbcSMiao Xie /* 389708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 390708c1bbcSMiao Xie * 391708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 392708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 393708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 394708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 395708c1bbcSMiao Xie * page. 396708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 397708c1bbcSMiao Xie * rebind directly. 398708c1bbcSMiao Xie * 399708c1bbcSMiao Xie * step: 400708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 401708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 402708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 403708c1bbcSMiao Xie */ 404708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, 405708c1bbcSMiao Xie enum mpol_rebind_step step) 40637012946SDavid Rientjes { 40737012946SDavid Rientjes if (!pol) 40837012946SDavid Rientjes return; 40989c522c7SWang Sheng-Hui if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE && 41037012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 41137012946SDavid Rientjes return; 412708c1bbcSMiao Xie 413708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) 414708c1bbcSMiao Xie return; 415708c1bbcSMiao Xie 416708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) 417708c1bbcSMiao Xie BUG(); 418708c1bbcSMiao Xie 419708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 420708c1bbcSMiao Xie pol->flags |= MPOL_F_REBINDING; 421708c1bbcSMiao Xie else if (step == MPOL_REBIND_STEP2) 422708c1bbcSMiao Xie pol->flags &= ~MPOL_F_REBINDING; 423708c1bbcSMiao Xie else if (step >= MPOL_REBIND_NSTEP) 424708c1bbcSMiao Xie BUG(); 425708c1bbcSMiao Xie 426708c1bbcSMiao Xie mpol_ops[pol->mode].rebind(pol, newmask, step); 4271d0d2680SDavid Rientjes } 4281d0d2680SDavid Rientjes 4291d0d2680SDavid Rientjes /* 4301d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 4311d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 43258568d2aSMiao Xie * 43358568d2aSMiao Xie * Called with task's alloc_lock held. 4341d0d2680SDavid Rientjes */ 4351d0d2680SDavid Rientjes 436708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 437708c1bbcSMiao Xie enum mpol_rebind_step step) 4381d0d2680SDavid Rientjes { 439708c1bbcSMiao Xie mpol_rebind_policy(tsk->mempolicy, new, step); 4401d0d2680SDavid Rientjes } 4411d0d2680SDavid Rientjes 4421d0d2680SDavid Rientjes /* 4431d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 4441d0d2680SDavid Rientjes * 4451d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 4461d0d2680SDavid Rientjes */ 4471d0d2680SDavid Rientjes 4481d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 4491d0d2680SDavid Rientjes { 4501d0d2680SDavid Rientjes struct vm_area_struct *vma; 4511d0d2680SDavid Rientjes 4521d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 4531d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 454708c1bbcSMiao Xie mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); 4551d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 4561d0d2680SDavid Rientjes } 4571d0d2680SDavid Rientjes 45837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 45937012946SDavid Rientjes [MPOL_DEFAULT] = { 46037012946SDavid Rientjes .rebind = mpol_rebind_default, 46137012946SDavid Rientjes }, 46237012946SDavid Rientjes [MPOL_INTERLEAVE] = { 46337012946SDavid Rientjes .create = mpol_new_interleave, 46437012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 46537012946SDavid Rientjes }, 46637012946SDavid Rientjes [MPOL_PREFERRED] = { 46737012946SDavid Rientjes .create = mpol_new_preferred, 46837012946SDavid Rientjes .rebind = mpol_rebind_preferred, 46937012946SDavid Rientjes }, 47037012946SDavid Rientjes [MPOL_BIND] = { 47137012946SDavid Rientjes .create = mpol_new_bind, 47237012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 47337012946SDavid Rientjes }, 47437012946SDavid Rientjes }; 47537012946SDavid Rientjes 476fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 477fc301289SChristoph Lameter unsigned long flags); 4781a75a6c8SChristoph Lameter 479*98094945SNaoya Horiguchi /* 480*98094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 481*98094945SNaoya Horiguchi * and move them to the pagelist if they do. 482*98094945SNaoya Horiguchi */ 483*98094945SNaoya Horiguchi static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 484dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 485dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 48638e35860SChristoph Lameter void *private) 4871da177e4SLinus Torvalds { 48891612e0dSHugh Dickins pte_t *orig_pte; 48991612e0dSHugh Dickins pte_t *pte; 490705e87c0SHugh Dickins spinlock_t *ptl; 491941150a3SHugh Dickins 492705e87c0SHugh Dickins orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 49391612e0dSHugh Dickins do { 4946aab341eSLinus Torvalds struct page *page; 49525ba77c1SAndy Whitcroft int nid; 49691612e0dSHugh Dickins 49791612e0dSHugh Dickins if (!pte_present(*pte)) 49891612e0dSHugh Dickins continue; 4996aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5006aab341eSLinus Torvalds if (!page) 50191612e0dSHugh Dickins continue; 502053837fcSNick Piggin /* 50362b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 50462b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 505053837fcSNick Piggin */ 506b79bc0a0SHugh Dickins if (PageReserved(page)) 507f4598c8bSChristoph Lameter continue; 5086aab341eSLinus Torvalds nid = page_to_nid(page); 50938e35860SChristoph Lameter if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 51038e35860SChristoph Lameter continue; 51138e35860SChristoph Lameter 512b1f72d18SStephen Wilson if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 513fc301289SChristoph Lameter migrate_page_add(page, private, flags); 514dc9aa5b9SChristoph Lameter else 5151da177e4SLinus Torvalds break; 51691612e0dSHugh Dickins } while (pte++, addr += PAGE_SIZE, addr != end); 517705e87c0SHugh Dickins pte_unmap_unlock(orig_pte, ptl); 51891612e0dSHugh Dickins return addr != end; 51991612e0dSHugh Dickins } 52091612e0dSHugh Dickins 521*98094945SNaoya Horiguchi static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, 522*98094945SNaoya Horiguchi pmd_t *pmd, const nodemask_t *nodes, unsigned long flags, 523e2d8cf40SNaoya Horiguchi void *private) 524e2d8cf40SNaoya Horiguchi { 525e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 526e2d8cf40SNaoya Horiguchi int nid; 527e2d8cf40SNaoya Horiguchi struct page *page; 528e2d8cf40SNaoya Horiguchi 529e2d8cf40SNaoya Horiguchi spin_lock(&vma->vm_mm->page_table_lock); 530e2d8cf40SNaoya Horiguchi page = pte_page(huge_ptep_get((pte_t *)pmd)); 531e2d8cf40SNaoya Horiguchi nid = page_to_nid(page); 532e2d8cf40SNaoya Horiguchi if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 533e2d8cf40SNaoya Horiguchi goto unlock; 534e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 535e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 536e2d8cf40SNaoya Horiguchi (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 537e2d8cf40SNaoya Horiguchi isolate_huge_page(page, private); 538e2d8cf40SNaoya Horiguchi unlock: 539e2d8cf40SNaoya Horiguchi spin_unlock(&vma->vm_mm->page_table_lock); 540e2d8cf40SNaoya Horiguchi #else 541e2d8cf40SNaoya Horiguchi BUG(); 542e2d8cf40SNaoya Horiguchi #endif 543e2d8cf40SNaoya Horiguchi } 544e2d8cf40SNaoya Horiguchi 545*98094945SNaoya Horiguchi static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, 546dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 547dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 54838e35860SChristoph Lameter void *private) 54991612e0dSHugh Dickins { 55091612e0dSHugh Dickins pmd_t *pmd; 55191612e0dSHugh Dickins unsigned long next; 55291612e0dSHugh Dickins 55391612e0dSHugh Dickins pmd = pmd_offset(pud, addr); 55491612e0dSHugh Dickins do { 55591612e0dSHugh Dickins next = pmd_addr_end(addr, end); 556e2d8cf40SNaoya Horiguchi if (!pmd_present(*pmd)) 557e2d8cf40SNaoya Horiguchi continue; 558e2d8cf40SNaoya Horiguchi if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) { 559*98094945SNaoya Horiguchi queue_pages_hugetlb_pmd_range(vma, pmd, nodes, 560e2d8cf40SNaoya Horiguchi flags, private); 561e2d8cf40SNaoya Horiguchi continue; 562e2d8cf40SNaoya Horiguchi } 563e180377fSKirill A. Shutemov split_huge_page_pmd(vma, addr, pmd); 5641a5a9906SAndrea Arcangeli if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 56591612e0dSHugh Dickins continue; 566*98094945SNaoya Horiguchi if (queue_pages_pte_range(vma, pmd, addr, next, nodes, 56738e35860SChristoph Lameter flags, private)) 56891612e0dSHugh Dickins return -EIO; 56991612e0dSHugh Dickins } while (pmd++, addr = next, addr != end); 57091612e0dSHugh Dickins return 0; 57191612e0dSHugh Dickins } 57291612e0dSHugh Dickins 573*98094945SNaoya Horiguchi static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 574dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 575dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 57638e35860SChristoph Lameter void *private) 57791612e0dSHugh Dickins { 57891612e0dSHugh Dickins pud_t *pud; 57991612e0dSHugh Dickins unsigned long next; 58091612e0dSHugh Dickins 58191612e0dSHugh Dickins pud = pud_offset(pgd, addr); 58291612e0dSHugh Dickins do { 58391612e0dSHugh Dickins next = pud_addr_end(addr, end); 584e2d8cf40SNaoya Horiguchi if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) 585e2d8cf40SNaoya Horiguchi continue; 58691612e0dSHugh Dickins if (pud_none_or_clear_bad(pud)) 58791612e0dSHugh Dickins continue; 588*98094945SNaoya Horiguchi if (queue_pages_pmd_range(vma, pud, addr, next, nodes, 58938e35860SChristoph Lameter flags, private)) 59091612e0dSHugh Dickins return -EIO; 59191612e0dSHugh Dickins } while (pud++, addr = next, addr != end); 59291612e0dSHugh Dickins return 0; 59391612e0dSHugh Dickins } 59491612e0dSHugh Dickins 595*98094945SNaoya Horiguchi static inline int queue_pages_pgd_range(struct vm_area_struct *vma, 596dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 597dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 59838e35860SChristoph Lameter void *private) 59991612e0dSHugh Dickins { 60091612e0dSHugh Dickins pgd_t *pgd; 60191612e0dSHugh Dickins unsigned long next; 60291612e0dSHugh Dickins 603b5810039SNick Piggin pgd = pgd_offset(vma->vm_mm, addr); 60491612e0dSHugh Dickins do { 60591612e0dSHugh Dickins next = pgd_addr_end(addr, end); 60691612e0dSHugh Dickins if (pgd_none_or_clear_bad(pgd)) 60791612e0dSHugh Dickins continue; 608*98094945SNaoya Horiguchi if (queue_pages_pud_range(vma, pgd, addr, next, nodes, 60938e35860SChristoph Lameter flags, private)) 61091612e0dSHugh Dickins return -EIO; 61191612e0dSHugh Dickins } while (pgd++, addr = next, addr != end); 61291612e0dSHugh Dickins return 0; 6131da177e4SLinus Torvalds } 6141da177e4SLinus Torvalds 615b24f53a0SLee Schermerhorn #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE 616b24f53a0SLee Schermerhorn /* 6174b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6184b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6194b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6204b10e7d5SMel Gorman * 6214b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6224b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6234b10e7d5SMel Gorman * changes to the core. 624b24f53a0SLee Schermerhorn */ 6254b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6264b10e7d5SMel Gorman unsigned long addr, unsigned long end) 627b24f53a0SLee Schermerhorn { 6284b10e7d5SMel Gorman int nr_updated; 6294b10e7d5SMel Gorman BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE); 630b24f53a0SLee Schermerhorn 6314b10e7d5SMel Gorman nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); 63203c5a6e1SMel Gorman if (nr_updated) 63303c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 634b24f53a0SLee Schermerhorn 6354b10e7d5SMel Gorman return nr_updated; 636b24f53a0SLee Schermerhorn } 637b24f53a0SLee Schermerhorn #else 638b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 639b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 640b24f53a0SLee Schermerhorn { 641b24f53a0SLee Schermerhorn return 0; 642b24f53a0SLee Schermerhorn } 643b24f53a0SLee Schermerhorn #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ 644b24f53a0SLee Schermerhorn 645dc9aa5b9SChristoph Lameter /* 646*98094945SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 647*98094945SNaoya Horiguchi * 648*98094945SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 649*98094945SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 650*98094945SNaoya Horiguchi * passed via @private.) 651dc9aa5b9SChristoph Lameter */ 6521da177e4SLinus Torvalds static struct vm_area_struct * 653*98094945SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 65438e35860SChristoph Lameter const nodemask_t *nodes, unsigned long flags, void *private) 6551da177e4SLinus Torvalds { 6561da177e4SLinus Torvalds int err; 6571da177e4SLinus Torvalds struct vm_area_struct *first, *vma, *prev; 6581da177e4SLinus Torvalds 659053837fcSNick Piggin 6601da177e4SLinus Torvalds first = find_vma(mm, start); 6611da177e4SLinus Torvalds if (!first) 6621da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 6631da177e4SLinus Torvalds prev = NULL; 6641da177e4SLinus Torvalds for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 6655b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 666dc9aa5b9SChristoph Lameter 6675b952b3cSAndi Kleen if (endvma > end) 6685b952b3cSAndi Kleen endvma = end; 6695b952b3cSAndi Kleen if (vma->vm_start > start) 6705b952b3cSAndi Kleen start = vma->vm_start; 671b24f53a0SLee Schermerhorn 672b24f53a0SLee Schermerhorn if (!(flags & MPOL_MF_DISCONTIG_OK)) { 673b24f53a0SLee Schermerhorn if (!vma->vm_next && vma->vm_end < end) 674b24f53a0SLee Schermerhorn return ERR_PTR(-EFAULT); 675b24f53a0SLee Schermerhorn if (prev && prev->vm_end < vma->vm_start) 676b24f53a0SLee Schermerhorn return ERR_PTR(-EFAULT); 677b24f53a0SLee Schermerhorn } 678b24f53a0SLee Schermerhorn 679b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 680b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 681b24f53a0SLee Schermerhorn goto next; 682b24f53a0SLee Schermerhorn } 683b24f53a0SLee Schermerhorn 684b24f53a0SLee Schermerhorn if ((flags & MPOL_MF_STRICT) || 685b24f53a0SLee Schermerhorn ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && 686b24f53a0SLee Schermerhorn vma_migratable(vma))) { 687b24f53a0SLee Schermerhorn 688*98094945SNaoya Horiguchi err = queue_pages_pgd_range(vma, start, endvma, nodes, 68938e35860SChristoph Lameter flags, private); 6901da177e4SLinus Torvalds if (err) { 6911da177e4SLinus Torvalds first = ERR_PTR(err); 6921da177e4SLinus Torvalds break; 6931da177e4SLinus Torvalds } 6941da177e4SLinus Torvalds } 695b24f53a0SLee Schermerhorn next: 6961da177e4SLinus Torvalds prev = vma; 6971da177e4SLinus Torvalds } 6981da177e4SLinus Torvalds return first; 6991da177e4SLinus Torvalds } 7001da177e4SLinus Torvalds 701869833f2SKOSAKI Motohiro /* 702869833f2SKOSAKI Motohiro * Apply policy to a single VMA 703869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 704869833f2SKOSAKI Motohiro */ 705869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 706869833f2SKOSAKI Motohiro struct mempolicy *pol) 7078d34694cSKOSAKI Motohiro { 708869833f2SKOSAKI Motohiro int err; 709869833f2SKOSAKI Motohiro struct mempolicy *old; 710869833f2SKOSAKI Motohiro struct mempolicy *new; 7118d34694cSKOSAKI Motohiro 7128d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7138d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7148d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7158d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7168d34694cSKOSAKI Motohiro 717869833f2SKOSAKI Motohiro new = mpol_dup(pol); 718869833f2SKOSAKI Motohiro if (IS_ERR(new)) 719869833f2SKOSAKI Motohiro return PTR_ERR(new); 720869833f2SKOSAKI Motohiro 721869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7228d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 723869833f2SKOSAKI Motohiro if (err) 724869833f2SKOSAKI Motohiro goto err_out; 7258d34694cSKOSAKI Motohiro } 726869833f2SKOSAKI Motohiro 727869833f2SKOSAKI Motohiro old = vma->vm_policy; 728869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 729869833f2SKOSAKI Motohiro mpol_put(old); 730869833f2SKOSAKI Motohiro 731869833f2SKOSAKI Motohiro return 0; 732869833f2SKOSAKI Motohiro err_out: 733869833f2SKOSAKI Motohiro mpol_put(new); 7348d34694cSKOSAKI Motohiro return err; 7358d34694cSKOSAKI Motohiro } 7368d34694cSKOSAKI Motohiro 7371da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7389d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7399d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7401da177e4SLinus Torvalds { 7411da177e4SLinus Torvalds struct vm_area_struct *next; 7429d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7439d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7449d8cebd4SKOSAKI Motohiro int err = 0; 745e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7469d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7479d8cebd4SKOSAKI Motohiro unsigned long vmend; 7481da177e4SLinus Torvalds 749097d5910SLinus Torvalds vma = find_vma(mm, start); 7509d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 7519d8cebd4SKOSAKI Motohiro return -EFAULT; 7529d8cebd4SKOSAKI Motohiro 753097d5910SLinus Torvalds prev = vma->vm_prev; 754e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 755e26a5114SKOSAKI Motohiro prev = vma; 756e26a5114SKOSAKI Motohiro 7579d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 7581da177e4SLinus Torvalds next = vma->vm_next; 7599d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 7609d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 7619d8cebd4SKOSAKI Motohiro 762e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 763e26a5114SKOSAKI Motohiro continue; 764e26a5114SKOSAKI Motohiro 765e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 766e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 7679d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 768e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 7698aacc9f5SCaspar Zhang new_pol); 7709d8cebd4SKOSAKI Motohiro if (prev) { 7719d8cebd4SKOSAKI Motohiro vma = prev; 7729d8cebd4SKOSAKI Motohiro next = vma->vm_next; 7733964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 7749d8cebd4SKOSAKI Motohiro continue; 7753964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 7763964acd0SOleg Nesterov goto replace; 7771da177e4SLinus Torvalds } 7789d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 7799d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 7809d8cebd4SKOSAKI Motohiro if (err) 7819d8cebd4SKOSAKI Motohiro goto out; 7829d8cebd4SKOSAKI Motohiro } 7839d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 7849d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 7859d8cebd4SKOSAKI Motohiro if (err) 7869d8cebd4SKOSAKI Motohiro goto out; 7879d8cebd4SKOSAKI Motohiro } 7883964acd0SOleg Nesterov replace: 789869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 7909d8cebd4SKOSAKI Motohiro if (err) 7919d8cebd4SKOSAKI Motohiro goto out; 7929d8cebd4SKOSAKI Motohiro } 7939d8cebd4SKOSAKI Motohiro 7949d8cebd4SKOSAKI Motohiro out: 7951da177e4SLinus Torvalds return err; 7961da177e4SLinus Torvalds } 7971da177e4SLinus Torvalds 798c61afb18SPaul Jackson /* 799c61afb18SPaul Jackson * Update task->flags PF_MEMPOLICY bit: set iff non-default 800c61afb18SPaul Jackson * mempolicy. Allows more rapid checking of this (combined perhaps 801c61afb18SPaul Jackson * with other PF_* flag bits) on memory allocation hot code paths. 802c61afb18SPaul Jackson * 803c61afb18SPaul Jackson * If called from outside this file, the task 'p' should -only- be 804c61afb18SPaul Jackson * a newly forked child not yet visible on the task list, because 805c61afb18SPaul Jackson * manipulating the task flags of a visible task is not safe. 806c61afb18SPaul Jackson * 807c61afb18SPaul Jackson * The above limitation is why this routine has the funny name 808c61afb18SPaul Jackson * mpol_fix_fork_child_flag(). 809c61afb18SPaul Jackson * 810c61afb18SPaul Jackson * It is also safe to call this with a task pointer of current, 811c61afb18SPaul Jackson * which the static wrapper mpol_set_task_struct_flag() does, 812c61afb18SPaul Jackson * for use within this file. 813c61afb18SPaul Jackson */ 814c61afb18SPaul Jackson 815c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p) 816c61afb18SPaul Jackson { 817c61afb18SPaul Jackson if (p->mempolicy) 818c61afb18SPaul Jackson p->flags |= PF_MEMPOLICY; 819c61afb18SPaul Jackson else 820c61afb18SPaul Jackson p->flags &= ~PF_MEMPOLICY; 821c61afb18SPaul Jackson } 822c61afb18SPaul Jackson 823c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void) 824c61afb18SPaul Jackson { 825c61afb18SPaul Jackson mpol_fix_fork_child_flag(current); 826c61afb18SPaul Jackson } 827c61afb18SPaul Jackson 8281da177e4SLinus Torvalds /* Set the process memory policy */ 829028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 830028fec41SDavid Rientjes nodemask_t *nodes) 8311da177e4SLinus Torvalds { 83258568d2aSMiao Xie struct mempolicy *new, *old; 833f4e53d91SLee Schermerhorn struct mm_struct *mm = current->mm; 8344bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 83558568d2aSMiao Xie int ret; 8361da177e4SLinus Torvalds 8374bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8384bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 839f4e53d91SLee Schermerhorn 8404bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8414bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8424bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8434bfc4495SKAMEZAWA Hiroyuki goto out; 8444bfc4495SKAMEZAWA Hiroyuki } 845f4e53d91SLee Schermerhorn /* 846f4e53d91SLee Schermerhorn * prevent changing our mempolicy while show_numa_maps() 847f4e53d91SLee Schermerhorn * is using it. 848f4e53d91SLee Schermerhorn * Note: do_set_mempolicy() can be called at init time 849f4e53d91SLee Schermerhorn * with no 'mm'. 850f4e53d91SLee Schermerhorn */ 851f4e53d91SLee Schermerhorn if (mm) 852f4e53d91SLee Schermerhorn down_write(&mm->mmap_sem); 85358568d2aSMiao Xie task_lock(current); 8544bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 85558568d2aSMiao Xie if (ret) { 85658568d2aSMiao Xie task_unlock(current); 85758568d2aSMiao Xie if (mm) 85858568d2aSMiao Xie up_write(&mm->mmap_sem); 85958568d2aSMiao Xie mpol_put(new); 8604bfc4495SKAMEZAWA Hiroyuki goto out; 86158568d2aSMiao Xie } 86258568d2aSMiao Xie old = current->mempolicy; 8631da177e4SLinus Torvalds current->mempolicy = new; 864c61afb18SPaul Jackson mpol_set_task_struct_flag(); 86545c4745aSLee Schermerhorn if (new && new->mode == MPOL_INTERLEAVE && 866f5b087b5SDavid Rientjes nodes_weight(new->v.nodes)) 867dfcd3c0dSAndi Kleen current->il_next = first_node(new->v.nodes); 86858568d2aSMiao Xie task_unlock(current); 869f4e53d91SLee Schermerhorn if (mm) 870f4e53d91SLee Schermerhorn up_write(&mm->mmap_sem); 871f4e53d91SLee Schermerhorn 87258568d2aSMiao Xie mpol_put(old); 8734bfc4495SKAMEZAWA Hiroyuki ret = 0; 8744bfc4495SKAMEZAWA Hiroyuki out: 8754bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8764bfc4495SKAMEZAWA Hiroyuki return ret; 8771da177e4SLinus Torvalds } 8781da177e4SLinus Torvalds 879bea904d5SLee Schermerhorn /* 880bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 88158568d2aSMiao Xie * 88258568d2aSMiao Xie * Called with task's alloc_lock held 883bea904d5SLee Schermerhorn */ 884bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8851da177e4SLinus Torvalds { 886dfcd3c0dSAndi Kleen nodes_clear(*nodes); 887bea904d5SLee Schermerhorn if (p == &default_policy) 888bea904d5SLee Schermerhorn return; 889bea904d5SLee Schermerhorn 89045c4745aSLee Schermerhorn switch (p->mode) { 89119770b32SMel Gorman case MPOL_BIND: 89219770b32SMel Gorman /* Fall through */ 8931da177e4SLinus Torvalds case MPOL_INTERLEAVE: 894dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 8951da177e4SLinus Torvalds break; 8961da177e4SLinus Torvalds case MPOL_PREFERRED: 897fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 898dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 89953f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 9001da177e4SLinus Torvalds break; 9011da177e4SLinus Torvalds default: 9021da177e4SLinus Torvalds BUG(); 9031da177e4SLinus Torvalds } 9041da177e4SLinus Torvalds } 9051da177e4SLinus Torvalds 9061da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr) 9071da177e4SLinus Torvalds { 9081da177e4SLinus Torvalds struct page *p; 9091da177e4SLinus Torvalds int err; 9101da177e4SLinus Torvalds 9111da177e4SLinus Torvalds err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 9121da177e4SLinus Torvalds if (err >= 0) { 9131da177e4SLinus Torvalds err = page_to_nid(p); 9141da177e4SLinus Torvalds put_page(p); 9151da177e4SLinus Torvalds } 9161da177e4SLinus Torvalds return err; 9171da177e4SLinus Torvalds } 9181da177e4SLinus Torvalds 9191da177e4SLinus Torvalds /* Retrieve NUMA policy */ 920dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9211da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9221da177e4SLinus Torvalds { 9238bccd85fSChristoph Lameter int err; 9241da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9251da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9261da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 9271da177e4SLinus Torvalds 928754af6f5SLee Schermerhorn if (flags & 929754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9301da177e4SLinus Torvalds return -EINVAL; 931754af6f5SLee Schermerhorn 932754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 933754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 934754af6f5SLee Schermerhorn return -EINVAL; 935754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 93658568d2aSMiao Xie task_lock(current); 937754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 93858568d2aSMiao Xie task_unlock(current); 939754af6f5SLee Schermerhorn return 0; 940754af6f5SLee Schermerhorn } 941754af6f5SLee Schermerhorn 9421da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 943bea904d5SLee Schermerhorn /* 944bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 945bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 946bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 947bea904d5SLee Schermerhorn */ 9481da177e4SLinus Torvalds down_read(&mm->mmap_sem); 9491da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 9501da177e4SLinus Torvalds if (!vma) { 9511da177e4SLinus Torvalds up_read(&mm->mmap_sem); 9521da177e4SLinus Torvalds return -EFAULT; 9531da177e4SLinus Torvalds } 9541da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9551da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9561da177e4SLinus Torvalds else 9571da177e4SLinus Torvalds pol = vma->vm_policy; 9581da177e4SLinus Torvalds } else if (addr) 9591da177e4SLinus Torvalds return -EINVAL; 9601da177e4SLinus Torvalds 9611da177e4SLinus Torvalds if (!pol) 962bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9631da177e4SLinus Torvalds 9641da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9651da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9661da177e4SLinus Torvalds err = lookup_node(mm, addr); 9671da177e4SLinus Torvalds if (err < 0) 9681da177e4SLinus Torvalds goto out; 9698bccd85fSChristoph Lameter *policy = err; 9701da177e4SLinus Torvalds } else if (pol == current->mempolicy && 97145c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 9728bccd85fSChristoph Lameter *policy = current->il_next; 9731da177e4SLinus Torvalds } else { 9741da177e4SLinus Torvalds err = -EINVAL; 9751da177e4SLinus Torvalds goto out; 9761da177e4SLinus Torvalds } 977bea904d5SLee Schermerhorn } else { 978bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 979bea904d5SLee Schermerhorn pol->mode; 980d79df630SDavid Rientjes /* 981d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 982d79df630SDavid Rientjes * the policy to userspace. 983d79df630SDavid Rientjes */ 984d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 985bea904d5SLee Schermerhorn } 9861da177e4SLinus Torvalds 9871da177e4SLinus Torvalds if (vma) { 9881da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 9891da177e4SLinus Torvalds vma = NULL; 9901da177e4SLinus Torvalds } 9911da177e4SLinus Torvalds 9921da177e4SLinus Torvalds err = 0; 99358568d2aSMiao Xie if (nmask) { 994c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 995c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 996c6b6ef8bSLee Schermerhorn } else { 99758568d2aSMiao Xie task_lock(current); 998bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 99958568d2aSMiao Xie task_unlock(current); 100058568d2aSMiao Xie } 1001c6b6ef8bSLee Schermerhorn } 10021da177e4SLinus Torvalds 10031da177e4SLinus Torvalds out: 100452cd3b07SLee Schermerhorn mpol_cond_put(pol); 10051da177e4SLinus Torvalds if (vma) 10061da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 10071da177e4SLinus Torvalds return err; 10081da177e4SLinus Torvalds } 10091da177e4SLinus Torvalds 1010b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10118bccd85fSChristoph Lameter /* 10126ce3c4c0SChristoph Lameter * page migration 10136ce3c4c0SChristoph Lameter */ 1014fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1015fc301289SChristoph Lameter unsigned long flags) 10166ce3c4c0SChristoph Lameter { 10176ce3c4c0SChristoph Lameter /* 1018fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 10196ce3c4c0SChristoph Lameter */ 102062695a84SNick Piggin if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 102162695a84SNick Piggin if (!isolate_lru_page(page)) { 102262695a84SNick Piggin list_add_tail(&page->lru, pagelist); 10236d9c285aSKOSAKI Motohiro inc_zone_page_state(page, NR_ISOLATED_ANON + 10246d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 102562695a84SNick Piggin } 102662695a84SNick Piggin } 10276ce3c4c0SChristoph Lameter } 10286ce3c4c0SChristoph Lameter 1029742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 103095a402c3SChristoph Lameter { 1031e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 1032e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 1033e2d8cf40SNaoya Horiguchi node); 1034e2d8cf40SNaoya Horiguchi else 10356484eb3eSMel Gorman return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); 103695a402c3SChristoph Lameter } 103795a402c3SChristoph Lameter 10386ce3c4c0SChristoph Lameter /* 10397e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10407e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10417e2ab150SChristoph Lameter */ 1042dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1043dbcb0f19SAdrian Bunk int flags) 10447e2ab150SChristoph Lameter { 10457e2ab150SChristoph Lameter nodemask_t nmask; 10467e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10477e2ab150SChristoph Lameter int err = 0; 10487e2ab150SChristoph Lameter 10497e2ab150SChristoph Lameter nodes_clear(nmask); 10507e2ab150SChristoph Lameter node_set(source, nmask); 10517e2ab150SChristoph Lameter 105208270807SMinchan Kim /* 105308270807SMinchan Kim * This does not "check" the range but isolates all pages that 105408270807SMinchan Kim * need migration. Between passing in the full user address 105508270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 105608270807SMinchan Kim */ 105708270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 1058*98094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10597e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10607e2ab150SChristoph Lameter 1061cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 10627f0f2496SMel Gorman err = migrate_pages(&pagelist, new_node_page, dest, 10639c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 1064cf608ac1SMinchan Kim if (err) 1065e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1066cf608ac1SMinchan Kim } 106795a402c3SChristoph Lameter 10687e2ab150SChristoph Lameter return err; 10697e2ab150SChristoph Lameter } 10707e2ab150SChristoph Lameter 10717e2ab150SChristoph Lameter /* 10727e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10737e2ab150SChristoph Lameter * layout as much as possible. 107439743889SChristoph Lameter * 107539743889SChristoph Lameter * Returns the number of page that could not be moved. 107639743889SChristoph Lameter */ 10770ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10780ce72d4fSAndrew Morton const nodemask_t *to, int flags) 107939743889SChristoph Lameter { 10807e2ab150SChristoph Lameter int busy = 0; 10810aedadf9SChristoph Lameter int err; 10827e2ab150SChristoph Lameter nodemask_t tmp; 108339743889SChristoph Lameter 10840aedadf9SChristoph Lameter err = migrate_prep(); 10850aedadf9SChristoph Lameter if (err) 10860aedadf9SChristoph Lameter return err; 10870aedadf9SChristoph Lameter 108839743889SChristoph Lameter down_read(&mm->mmap_sem); 1089d4984711SChristoph Lameter 10900ce72d4fSAndrew Morton err = migrate_vmas(mm, from, to, flags); 10917b2259b3SChristoph Lameter if (err) 10927b2259b3SChristoph Lameter goto out; 10937b2259b3SChristoph Lameter 10947e2ab150SChristoph Lameter /* 10957e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 10967e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 10977e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 10987e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 10997e2ab150SChristoph Lameter * 11007e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11017e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11027e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11037e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11047e2ab150SChristoph Lameter * 11057e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11067e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11077e2ab150SChristoph Lameter * (nothing left to migrate). 11087e2ab150SChristoph Lameter * 11097e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11107e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11117e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11127e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11137e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11147e2ab150SChristoph Lameter * 11157e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11167e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11177e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11187e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1119ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11207e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11217e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11227e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11237e2ab150SChristoph Lameter */ 11247e2ab150SChristoph Lameter 11250ce72d4fSAndrew Morton tmp = *from; 11267e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11277e2ab150SChristoph Lameter int s,d; 11287e2ab150SChristoph Lameter int source = -1; 11297e2ab150SChristoph Lameter int dest = 0; 11307e2ab150SChristoph Lameter 11317e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11324a5b18ccSLarry Woodman 11334a5b18ccSLarry Woodman /* 11344a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11354a5b18ccSLarry Woodman * node relationship of the pages established between 11364a5b18ccSLarry Woodman * threads and memory areas. 11374a5b18ccSLarry Woodman * 11384a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11394a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11404a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11414a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11424a5b18ccSLarry Woodman * mask. 11434a5b18ccSLarry Woodman * 11444a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11454a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11464a5b18ccSLarry Woodman */ 11474a5b18ccSLarry Woodman 11480ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11490ce72d4fSAndrew Morton (node_isset(s, *to))) 11504a5b18ccSLarry Woodman continue; 11514a5b18ccSLarry Woodman 11520ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11537e2ab150SChristoph Lameter if (s == d) 11547e2ab150SChristoph Lameter continue; 11557e2ab150SChristoph Lameter 11567e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11577e2ab150SChristoph Lameter dest = d; 11587e2ab150SChristoph Lameter 11597e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11607e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11617e2ab150SChristoph Lameter break; 11627e2ab150SChristoph Lameter } 11637e2ab150SChristoph Lameter if (source == -1) 11647e2ab150SChristoph Lameter break; 11657e2ab150SChristoph Lameter 11667e2ab150SChristoph Lameter node_clear(source, tmp); 11677e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11687e2ab150SChristoph Lameter if (err > 0) 11697e2ab150SChristoph Lameter busy += err; 11707e2ab150SChristoph Lameter if (err < 0) 11717e2ab150SChristoph Lameter break; 117239743889SChristoph Lameter } 11737b2259b3SChristoph Lameter out: 117439743889SChristoph Lameter up_read(&mm->mmap_sem); 11757e2ab150SChristoph Lameter if (err < 0) 11767e2ab150SChristoph Lameter return err; 11777e2ab150SChristoph Lameter return busy; 1178b20a3503SChristoph Lameter 117939743889SChristoph Lameter } 118039743889SChristoph Lameter 11813ad33b24SLee Schermerhorn /* 11823ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 11833ad33b24SLee Schermerhorn * Start assuming that page is mapped by vma pointed to by @private. 11843ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11853ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11863ad33b24SLee Schermerhorn * is in virtual address order. 11873ad33b24SLee Schermerhorn */ 1188742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 118995a402c3SChristoph Lameter { 119095a402c3SChristoph Lameter struct vm_area_struct *vma = (struct vm_area_struct *)private; 11913ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 119295a402c3SChristoph Lameter 11933ad33b24SLee Schermerhorn while (vma) { 11943ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 11953ad33b24SLee Schermerhorn if (address != -EFAULT) 11963ad33b24SLee Schermerhorn break; 11973ad33b24SLee Schermerhorn vma = vma->vm_next; 11983ad33b24SLee Schermerhorn } 11993ad33b24SLee Schermerhorn 120074060e4dSNaoya Horiguchi if (PageHuge(page)) 120174060e4dSNaoya Horiguchi return alloc_huge_page_noerr(vma, address, 1); 12023ad33b24SLee Schermerhorn /* 12033ad33b24SLee Schermerhorn * if !vma, alloc_page_vma() will use task or system default policy 12043ad33b24SLee Schermerhorn */ 12053ad33b24SLee Schermerhorn return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 120695a402c3SChristoph Lameter } 1207b20a3503SChristoph Lameter #else 1208b20a3503SChristoph Lameter 1209b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1210b20a3503SChristoph Lameter unsigned long flags) 1211b20a3503SChristoph Lameter { 1212b20a3503SChristoph Lameter } 1213b20a3503SChristoph Lameter 12140ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12150ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1216b20a3503SChristoph Lameter { 1217b20a3503SChristoph Lameter return -ENOSYS; 1218b20a3503SChristoph Lameter } 121995a402c3SChristoph Lameter 122069939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 122195a402c3SChristoph Lameter { 122295a402c3SChristoph Lameter return NULL; 122395a402c3SChristoph Lameter } 1224b20a3503SChristoph Lameter #endif 1225b20a3503SChristoph Lameter 1226dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1227028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1228028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12296ce3c4c0SChristoph Lameter { 12306ce3c4c0SChristoph Lameter struct vm_area_struct *vma; 12316ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12326ce3c4c0SChristoph Lameter struct mempolicy *new; 12336ce3c4c0SChristoph Lameter unsigned long end; 12346ce3c4c0SChristoph Lameter int err; 12356ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12366ce3c4c0SChristoph Lameter 1237b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12386ce3c4c0SChristoph Lameter return -EINVAL; 123974c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12406ce3c4c0SChristoph Lameter return -EPERM; 12416ce3c4c0SChristoph Lameter 12426ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12436ce3c4c0SChristoph Lameter return -EINVAL; 12446ce3c4c0SChristoph Lameter 12456ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12466ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12476ce3c4c0SChristoph Lameter 12486ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12496ce3c4c0SChristoph Lameter end = start + len; 12506ce3c4c0SChristoph Lameter 12516ce3c4c0SChristoph Lameter if (end < start) 12526ce3c4c0SChristoph Lameter return -EINVAL; 12536ce3c4c0SChristoph Lameter if (end == start) 12546ce3c4c0SChristoph Lameter return 0; 12556ce3c4c0SChristoph Lameter 1256028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12576ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12586ce3c4c0SChristoph Lameter return PTR_ERR(new); 12596ce3c4c0SChristoph Lameter 1260b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1261b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1262b24f53a0SLee Schermerhorn 12636ce3c4c0SChristoph Lameter /* 12646ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12656ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12666ce3c4c0SChristoph Lameter */ 12676ce3c4c0SChristoph Lameter if (!new) 12686ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12696ce3c4c0SChristoph Lameter 1270028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1271028fec41SDavid Rientjes start, start + len, mode, mode_flags, 127200ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12736ce3c4c0SChristoph Lameter 12740aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12750aedadf9SChristoph Lameter 12760aedadf9SChristoph Lameter err = migrate_prep(); 12770aedadf9SChristoph Lameter if (err) 1278b05ca738SKOSAKI Motohiro goto mpol_out; 12790aedadf9SChristoph Lameter } 12804bfc4495SKAMEZAWA Hiroyuki { 12814bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 12824bfc4495SKAMEZAWA Hiroyuki if (scratch) { 12836ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 128458568d2aSMiao Xie task_lock(current); 12854bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 128658568d2aSMiao Xie task_unlock(current); 12874bfc4495SKAMEZAWA Hiroyuki if (err) 128858568d2aSMiao Xie up_write(&mm->mmap_sem); 12894bfc4495SKAMEZAWA Hiroyuki } else 12904bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 12914bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 12924bfc4495SKAMEZAWA Hiroyuki } 1293b05ca738SKOSAKI Motohiro if (err) 1294b05ca738SKOSAKI Motohiro goto mpol_out; 1295b05ca738SKOSAKI Motohiro 1296*98094945SNaoya Horiguchi vma = queue_pages_range(mm, start, end, nmask, 12976ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 12986ce3c4c0SChristoph Lameter 1299b24f53a0SLee Schermerhorn err = PTR_ERR(vma); /* maybe ... */ 1300a720094dSMel Gorman if (!IS_ERR(vma)) 13019d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13027e2ab150SChristoph Lameter 1303b24f53a0SLee Schermerhorn if (!err) { 1304b24f53a0SLee Schermerhorn int nr_failed = 0; 1305b24f53a0SLee Schermerhorn 1306cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1307b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 130895a402c3SChristoph Lameter nr_failed = migrate_pages(&pagelist, new_vma_page, 13097f0f2496SMel Gorman (unsigned long)vma, 13109c620e2bSHugh Dickins MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1311cf608ac1SMinchan Kim if (nr_failed) 131274060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1313cf608ac1SMinchan Kim } 13146ce3c4c0SChristoph Lameter 1315b24f53a0SLee Schermerhorn if (nr_failed && (flags & MPOL_MF_STRICT)) 13166ce3c4c0SChristoph Lameter err = -EIO; 1317ab8a3e14SKOSAKI Motohiro } else 1318ab8a3e14SKOSAKI Motohiro putback_lru_pages(&pagelist); 1319b20a3503SChristoph Lameter 13206ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1321b05ca738SKOSAKI Motohiro mpol_out: 1322f0be3d32SLee Schermerhorn mpol_put(new); 13236ce3c4c0SChristoph Lameter return err; 13246ce3c4c0SChristoph Lameter } 13256ce3c4c0SChristoph Lameter 132639743889SChristoph Lameter /* 13278bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13288bccd85fSChristoph Lameter */ 13298bccd85fSChristoph Lameter 13308bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 133139743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13328bccd85fSChristoph Lameter unsigned long maxnode) 13338bccd85fSChristoph Lameter { 13348bccd85fSChristoph Lameter unsigned long k; 13358bccd85fSChristoph Lameter unsigned long nlongs; 13368bccd85fSChristoph Lameter unsigned long endmask; 13378bccd85fSChristoph Lameter 13388bccd85fSChristoph Lameter --maxnode; 13398bccd85fSChristoph Lameter nodes_clear(*nodes); 13408bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13418bccd85fSChristoph Lameter return 0; 1342a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1343636f13c1SChris Wright return -EINVAL; 13448bccd85fSChristoph Lameter 13458bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 13468bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 13478bccd85fSChristoph Lameter endmask = ~0UL; 13488bccd85fSChristoph Lameter else 13498bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 13508bccd85fSChristoph Lameter 13518bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 13528bccd85fSChristoph Lameter if the non supported part is all zero. */ 13538bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 13548bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 13558bccd85fSChristoph Lameter return -EINVAL; 13568bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 13578bccd85fSChristoph Lameter unsigned long t; 13588bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 13598bccd85fSChristoph Lameter return -EFAULT; 13608bccd85fSChristoph Lameter if (k == nlongs - 1) { 13618bccd85fSChristoph Lameter if (t & endmask) 13628bccd85fSChristoph Lameter return -EINVAL; 13638bccd85fSChristoph Lameter } else if (t) 13648bccd85fSChristoph Lameter return -EINVAL; 13658bccd85fSChristoph Lameter } 13668bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 13678bccd85fSChristoph Lameter endmask = ~0UL; 13688bccd85fSChristoph Lameter } 13698bccd85fSChristoph Lameter 13708bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 13718bccd85fSChristoph Lameter return -EFAULT; 13728bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 13738bccd85fSChristoph Lameter return 0; 13748bccd85fSChristoph Lameter } 13758bccd85fSChristoph Lameter 13768bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 13778bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 13788bccd85fSChristoph Lameter nodemask_t *nodes) 13798bccd85fSChristoph Lameter { 13808bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 13818bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 13828bccd85fSChristoph Lameter 13838bccd85fSChristoph Lameter if (copy > nbytes) { 13848bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 13858bccd85fSChristoph Lameter return -EINVAL; 13868bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 13878bccd85fSChristoph Lameter return -EFAULT; 13888bccd85fSChristoph Lameter copy = nbytes; 13898bccd85fSChristoph Lameter } 13908bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 13918bccd85fSChristoph Lameter } 13928bccd85fSChristoph Lameter 1393938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1394938bb9f5SHeiko Carstens unsigned long, mode, unsigned long __user *, nmask, 1395938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 13968bccd85fSChristoph Lameter { 13978bccd85fSChristoph Lameter nodemask_t nodes; 13988bccd85fSChristoph Lameter int err; 1399028fec41SDavid Rientjes unsigned short mode_flags; 14008bccd85fSChristoph Lameter 1401028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1402028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1403a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1404a3b51e01SDavid Rientjes return -EINVAL; 14054c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 14064c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 14074c50bc01SDavid Rientjes return -EINVAL; 14088bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14098bccd85fSChristoph Lameter if (err) 14108bccd85fSChristoph Lameter return err; 1411028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 14128bccd85fSChristoph Lameter } 14138bccd85fSChristoph Lameter 14148bccd85fSChristoph Lameter /* Set the process memory policy */ 1415938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, 1416938bb9f5SHeiko Carstens unsigned long, maxnode) 14178bccd85fSChristoph Lameter { 14188bccd85fSChristoph Lameter int err; 14198bccd85fSChristoph Lameter nodemask_t nodes; 1420028fec41SDavid Rientjes unsigned short flags; 14218bccd85fSChristoph Lameter 1422028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1423028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1424028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 14258bccd85fSChristoph Lameter return -EINVAL; 14264c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 14274c50bc01SDavid Rientjes return -EINVAL; 14288bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14298bccd85fSChristoph Lameter if (err) 14308bccd85fSChristoph Lameter return err; 1431028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 14328bccd85fSChristoph Lameter } 14338bccd85fSChristoph Lameter 1434938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1435938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1436938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 143739743889SChristoph Lameter { 1438c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 1439596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 144039743889SChristoph Lameter struct task_struct *task; 144139743889SChristoph Lameter nodemask_t task_nodes; 144239743889SChristoph Lameter int err; 1443596d7cfaSKOSAKI Motohiro nodemask_t *old; 1444596d7cfaSKOSAKI Motohiro nodemask_t *new; 1445596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 144639743889SChristoph Lameter 1447596d7cfaSKOSAKI Motohiro if (!scratch) 1448596d7cfaSKOSAKI Motohiro return -ENOMEM; 144939743889SChristoph Lameter 1450596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1451596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1452596d7cfaSKOSAKI Motohiro 1453596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 145439743889SChristoph Lameter if (err) 1455596d7cfaSKOSAKI Motohiro goto out; 1456596d7cfaSKOSAKI Motohiro 1457596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1458596d7cfaSKOSAKI Motohiro if (err) 1459596d7cfaSKOSAKI Motohiro goto out; 146039743889SChristoph Lameter 146139743889SChristoph Lameter /* Find the mm_struct */ 146255cfaa3cSZeng Zhaoming rcu_read_lock(); 1463228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 146439743889SChristoph Lameter if (!task) { 146555cfaa3cSZeng Zhaoming rcu_read_unlock(); 1466596d7cfaSKOSAKI Motohiro err = -ESRCH; 1467596d7cfaSKOSAKI Motohiro goto out; 146839743889SChristoph Lameter } 14693268c63eSChristoph Lameter get_task_struct(task); 147039743889SChristoph Lameter 1471596d7cfaSKOSAKI Motohiro err = -EINVAL; 147239743889SChristoph Lameter 147339743889SChristoph Lameter /* 147439743889SChristoph Lameter * Check if this process has the right to modify the specified 147539743889SChristoph Lameter * process. The right exists if the process has administrative 14767f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 147739743889SChristoph Lameter * userid as the target process. 147839743889SChristoph Lameter */ 1479c69e8d9cSDavid Howells tcred = __task_cred(task); 1480b38a86ebSEric W. Biederman if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1481b38a86ebSEric W. Biederman !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 148274c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1483c69e8d9cSDavid Howells rcu_read_unlock(); 148439743889SChristoph Lameter err = -EPERM; 14853268c63eSChristoph Lameter goto out_put; 148639743889SChristoph Lameter } 1487c69e8d9cSDavid Howells rcu_read_unlock(); 148839743889SChristoph Lameter 148939743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 149039743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1491596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 149239743889SChristoph Lameter err = -EPERM; 14933268c63eSChristoph Lameter goto out_put; 149439743889SChristoph Lameter } 149539743889SChristoph Lameter 149601f13bd6SLai Jiangshan if (!nodes_subset(*new, node_states[N_MEMORY])) { 14973b42d28bSChristoph Lameter err = -EINVAL; 14983268c63eSChristoph Lameter goto out_put; 14993b42d28bSChristoph Lameter } 15003b42d28bSChristoph Lameter 150186c3a764SDavid Quigley err = security_task_movememory(task); 150286c3a764SDavid Quigley if (err) 15033268c63eSChristoph Lameter goto out_put; 150486c3a764SDavid Quigley 15053268c63eSChristoph Lameter mm = get_task_mm(task); 15063268c63eSChristoph Lameter put_task_struct(task); 1507f2a9ef88SSasha Levin 1508f2a9ef88SSasha Levin if (!mm) { 1509f2a9ef88SSasha Levin err = -EINVAL; 1510f2a9ef88SSasha Levin goto out; 1511f2a9ef88SSasha Levin } 1512f2a9ef88SSasha Levin 1513596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 151474c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 15153268c63eSChristoph Lameter 151639743889SChristoph Lameter mmput(mm); 15173268c63eSChristoph Lameter out: 1518596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1519596d7cfaSKOSAKI Motohiro 152039743889SChristoph Lameter return err; 15213268c63eSChristoph Lameter 15223268c63eSChristoph Lameter out_put: 15233268c63eSChristoph Lameter put_task_struct(task); 15243268c63eSChristoph Lameter goto out; 15253268c63eSChristoph Lameter 152639743889SChristoph Lameter } 152739743889SChristoph Lameter 152839743889SChristoph Lameter 15298bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1530938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1531938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1532938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 15338bccd85fSChristoph Lameter { 1534dbcb0f19SAdrian Bunk int err; 1535dbcb0f19SAdrian Bunk int uninitialized_var(pval); 15368bccd85fSChristoph Lameter nodemask_t nodes; 15378bccd85fSChristoph Lameter 15388bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 15398bccd85fSChristoph Lameter return -EINVAL; 15408bccd85fSChristoph Lameter 15418bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 15428bccd85fSChristoph Lameter 15438bccd85fSChristoph Lameter if (err) 15448bccd85fSChristoph Lameter return err; 15458bccd85fSChristoph Lameter 15468bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 15478bccd85fSChristoph Lameter return -EFAULT; 15488bccd85fSChristoph Lameter 15498bccd85fSChristoph Lameter if (nmask) 15508bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 15518bccd85fSChristoph Lameter 15528bccd85fSChristoph Lameter return err; 15538bccd85fSChristoph Lameter } 15548bccd85fSChristoph Lameter 15551da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 15561da177e4SLinus Torvalds 15571da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy, 15581da177e4SLinus Torvalds compat_ulong_t __user *nmask, 15591da177e4SLinus Torvalds compat_ulong_t maxnode, 15601da177e4SLinus Torvalds compat_ulong_t addr, compat_ulong_t flags) 15611da177e4SLinus Torvalds { 15621da177e4SLinus Torvalds long err; 15631da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15641da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15651da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15661da177e4SLinus Torvalds 15671da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15681da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15691da177e4SLinus Torvalds 15701da177e4SLinus Torvalds if (nmask) 15711da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 15721da177e4SLinus Torvalds 15731da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 15741da177e4SLinus Torvalds 15751da177e4SLinus Torvalds if (!err && nmask) { 15762bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 15772bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 15782bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 15791da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 15801da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 15811da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 15821da177e4SLinus Torvalds } 15831da177e4SLinus Torvalds 15841da177e4SLinus Torvalds return err; 15851da177e4SLinus Torvalds } 15861da177e4SLinus Torvalds 15871da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, 15881da177e4SLinus Torvalds compat_ulong_t maxnode) 15891da177e4SLinus Torvalds { 15901da177e4SLinus Torvalds long err = 0; 15911da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15921da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15931da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15941da177e4SLinus Torvalds 15951da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15961da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15971da177e4SLinus Torvalds 15981da177e4SLinus Torvalds if (nmask) { 15991da177e4SLinus Torvalds err = compat_get_bitmap(bm, nmask, nr_bits); 16001da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 16011da177e4SLinus Torvalds err |= copy_to_user(nm, bm, alloc_size); 16021da177e4SLinus Torvalds } 16031da177e4SLinus Torvalds 16041da177e4SLinus Torvalds if (err) 16051da177e4SLinus Torvalds return -EFAULT; 16061da177e4SLinus Torvalds 16071da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 16081da177e4SLinus Torvalds } 16091da177e4SLinus Torvalds 16101da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, 16111da177e4SLinus Torvalds compat_ulong_t mode, compat_ulong_t __user *nmask, 16121da177e4SLinus Torvalds compat_ulong_t maxnode, compat_ulong_t flags) 16131da177e4SLinus Torvalds { 16141da177e4SLinus Torvalds long err = 0; 16151da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16161da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1617dfcd3c0dSAndi Kleen nodemask_t bm; 16181da177e4SLinus Torvalds 16191da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16201da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16211da177e4SLinus Torvalds 16221da177e4SLinus Torvalds if (nmask) { 1623dfcd3c0dSAndi Kleen err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 16241da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 1625dfcd3c0dSAndi Kleen err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 16261da177e4SLinus Torvalds } 16271da177e4SLinus Torvalds 16281da177e4SLinus Torvalds if (err) 16291da177e4SLinus Torvalds return -EFAULT; 16301da177e4SLinus Torvalds 16311da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 16321da177e4SLinus Torvalds } 16331da177e4SLinus Torvalds 16341da177e4SLinus Torvalds #endif 16351da177e4SLinus Torvalds 1636480eccf9SLee Schermerhorn /* 1637480eccf9SLee Schermerhorn * get_vma_policy(@task, @vma, @addr) 1638480eccf9SLee Schermerhorn * @task - task for fallback if vma policy == default 1639480eccf9SLee Schermerhorn * @vma - virtual memory area whose policy is sought 1640480eccf9SLee Schermerhorn * @addr - address in @vma for shared policy lookup 1641480eccf9SLee Schermerhorn * 1642480eccf9SLee Schermerhorn * Returns effective policy for a VMA at specified address. 1643480eccf9SLee Schermerhorn * Falls back to @task or system default policy, as necessary. 164432f8516aSDavid Rientjes * Current or other task's task mempolicy and non-shared vma policies must be 164532f8516aSDavid Rientjes * protected by task_lock(task) by the caller. 164652cd3b07SLee Schermerhorn * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 164752cd3b07SLee Schermerhorn * count--added by the get_policy() vm_op, as appropriate--to protect against 164852cd3b07SLee Schermerhorn * freeing by another task. It is the caller's responsibility to free the 164952cd3b07SLee Schermerhorn * extra reference for shared policies. 1650480eccf9SLee Schermerhorn */ 1651d98f6cb6SStephen Wilson struct mempolicy *get_vma_policy(struct task_struct *task, 165248fce342SChristoph Lameter struct vm_area_struct *vma, unsigned long addr) 16531da177e4SLinus Torvalds { 16545606e387SMel Gorman struct mempolicy *pol = get_task_policy(task); 16551da177e4SLinus Torvalds 16561da177e4SLinus Torvalds if (vma) { 1657480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 1658ae4d8c16SLee Schermerhorn struct mempolicy *vpol = vma->vm_ops->get_policy(vma, 1659ae4d8c16SLee Schermerhorn addr); 1660ae4d8c16SLee Schermerhorn if (vpol) 1661ae4d8c16SLee Schermerhorn pol = vpol; 166200442ad0SMel Gorman } else if (vma->vm_policy) { 16631da177e4SLinus Torvalds pol = vma->vm_policy; 166400442ad0SMel Gorman 166500442ad0SMel Gorman /* 166600442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 166700442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 166800442ad0SMel Gorman * count on these policies which will be dropped by 166900442ad0SMel Gorman * mpol_cond_put() later 167000442ad0SMel Gorman */ 167100442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 167200442ad0SMel Gorman mpol_get(pol); 167300442ad0SMel Gorman } 16741da177e4SLinus Torvalds } 16751da177e4SLinus Torvalds if (!pol) 16761da177e4SLinus Torvalds pol = &default_policy; 16771da177e4SLinus Torvalds return pol; 16781da177e4SLinus Torvalds } 16791da177e4SLinus Torvalds 1680d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1681d3eb1570SLai Jiangshan { 1682d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1683d3eb1570SLai Jiangshan 1684d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1685d3eb1570SLai Jiangshan 1686d3eb1570SLai Jiangshan /* 1687d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1688d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1689d3eb1570SLai Jiangshan * 1690d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1691d3eb1570SLai Jiangshan * so if the following test faile, it implies 1692d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1693d3eb1570SLai Jiangshan */ 1694d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1695d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1696d3eb1570SLai Jiangshan 1697d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1698d3eb1570SLai Jiangshan } 1699d3eb1570SLai Jiangshan 170052cd3b07SLee Schermerhorn /* 170152cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 170252cd3b07SLee Schermerhorn * page allocation 170352cd3b07SLee Schermerhorn */ 170452cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 170519770b32SMel Gorman { 170619770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 170745c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1708d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 170919770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 171019770b32SMel Gorman return &policy->v.nodes; 171119770b32SMel Gorman 171219770b32SMel Gorman return NULL; 171319770b32SMel Gorman } 171419770b32SMel Gorman 171552cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */ 17162f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, 17172f5f9486SAndi Kleen int nd) 17181da177e4SLinus Torvalds { 171945c4745aSLee Schermerhorn switch (policy->mode) { 17201da177e4SLinus Torvalds case MPOL_PREFERRED: 1721fc36b8d3SLee Schermerhorn if (!(policy->flags & MPOL_F_LOCAL)) 17221da177e4SLinus Torvalds nd = policy->v.preferred_node; 17231da177e4SLinus Torvalds break; 17241da177e4SLinus Torvalds case MPOL_BIND: 172519770b32SMel Gorman /* 172652cd3b07SLee Schermerhorn * Normally, MPOL_BIND allocations are node-local within the 172752cd3b07SLee Schermerhorn * allowed nodemask. However, if __GFP_THISNODE is set and the 17286eb27e1fSBob Liu * current node isn't part of the mask, we use the zonelist for 172952cd3b07SLee Schermerhorn * the first node in the mask instead. 173019770b32SMel Gorman */ 173119770b32SMel Gorman if (unlikely(gfp & __GFP_THISNODE) && 173219770b32SMel Gorman unlikely(!node_isset(nd, policy->v.nodes))) 173319770b32SMel Gorman nd = first_node(policy->v.nodes); 173419770b32SMel Gorman break; 17351da177e4SLinus Torvalds default: 17361da177e4SLinus Torvalds BUG(); 17371da177e4SLinus Torvalds } 17380e88460dSMel Gorman return node_zonelist(nd, gfp); 17391da177e4SLinus Torvalds } 17401da177e4SLinus Torvalds 17411da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 17421da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 17431da177e4SLinus Torvalds { 17441da177e4SLinus Torvalds unsigned nid, next; 17451da177e4SLinus Torvalds struct task_struct *me = current; 17461da177e4SLinus Torvalds 17471da177e4SLinus Torvalds nid = me->il_next; 1748dfcd3c0dSAndi Kleen next = next_node(nid, policy->v.nodes); 17491da177e4SLinus Torvalds if (next >= MAX_NUMNODES) 1750dfcd3c0dSAndi Kleen next = first_node(policy->v.nodes); 1751f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 17521da177e4SLinus Torvalds me->il_next = next; 17531da177e4SLinus Torvalds return nid; 17541da177e4SLinus Torvalds } 17551da177e4SLinus Torvalds 1756dc85da15SChristoph Lameter /* 1757dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1758dc85da15SChristoph Lameter * next slab entry. 175952cd3b07SLee Schermerhorn * @policy must be protected by freeing by the caller. If @policy is 176052cd3b07SLee Schermerhorn * the current task's mempolicy, this protection is implicit, as only the 176152cd3b07SLee Schermerhorn * task can change it's policy. The system default policy requires no 176252cd3b07SLee Schermerhorn * such protection. 1763dc85da15SChristoph Lameter */ 1764e7b691b0SAndi Kleen unsigned slab_node(void) 1765dc85da15SChristoph Lameter { 1766e7b691b0SAndi Kleen struct mempolicy *policy; 1767e7b691b0SAndi Kleen 1768e7b691b0SAndi Kleen if (in_interrupt()) 1769e7b691b0SAndi Kleen return numa_node_id(); 1770e7b691b0SAndi Kleen 1771e7b691b0SAndi Kleen policy = current->mempolicy; 1772fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 1773bea904d5SLee Schermerhorn return numa_node_id(); 1774765c4507SChristoph Lameter 1775bea904d5SLee Schermerhorn switch (policy->mode) { 1776bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1777fc36b8d3SLee Schermerhorn /* 1778fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1779fc36b8d3SLee Schermerhorn */ 1780bea904d5SLee Schermerhorn return policy->v.preferred_node; 1781bea904d5SLee Schermerhorn 1782dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1783dc85da15SChristoph Lameter return interleave_nodes(policy); 1784dc85da15SChristoph Lameter 1785dd1a239fSMel Gorman case MPOL_BIND: { 1786dc85da15SChristoph Lameter /* 1787dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1788dc85da15SChristoph Lameter * first node. 1789dc85da15SChristoph Lameter */ 179019770b32SMel Gorman struct zonelist *zonelist; 179119770b32SMel Gorman struct zone *zone; 179219770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 179319770b32SMel Gorman zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0]; 179419770b32SMel Gorman (void)first_zones_zonelist(zonelist, highest_zoneidx, 179519770b32SMel Gorman &policy->v.nodes, 179619770b32SMel Gorman &zone); 1797800416f7SEric Dumazet return zone ? zone->node : numa_node_id(); 1798dd1a239fSMel Gorman } 1799dc85da15SChristoph Lameter 1800dc85da15SChristoph Lameter default: 1801bea904d5SLee Schermerhorn BUG(); 1802dc85da15SChristoph Lameter } 1803dc85da15SChristoph Lameter } 1804dc85da15SChristoph Lameter 18051da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */ 18061da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 18071da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long off) 18081da177e4SLinus Torvalds { 1809dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1810f5b087b5SDavid Rientjes unsigned target; 18111da177e4SLinus Torvalds int c; 18121da177e4SLinus Torvalds int nid = -1; 18131da177e4SLinus Torvalds 1814f5b087b5SDavid Rientjes if (!nnodes) 1815f5b087b5SDavid Rientjes return numa_node_id(); 1816f5b087b5SDavid Rientjes target = (unsigned int)off % nnodes; 18171da177e4SLinus Torvalds c = 0; 18181da177e4SLinus Torvalds do { 1819dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 18201da177e4SLinus Torvalds c++; 18211da177e4SLinus Torvalds } while (c <= target); 18221da177e4SLinus Torvalds return nid; 18231da177e4SLinus Torvalds } 18241da177e4SLinus Torvalds 18255da7ca86SChristoph Lameter /* Determine a node number for interleave */ 18265da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 18275da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 18285da7ca86SChristoph Lameter { 18295da7ca86SChristoph Lameter if (vma) { 18305da7ca86SChristoph Lameter unsigned long off; 18315da7ca86SChristoph Lameter 18323b98b087SNishanth Aravamudan /* 18333b98b087SNishanth Aravamudan * for small pages, there is no difference between 18343b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 18353b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 18363b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 18373b98b087SNishanth Aravamudan * a useful offset. 18383b98b087SNishanth Aravamudan */ 18393b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 18403b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 18415da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 18425da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 18435da7ca86SChristoph Lameter } else 18445da7ca86SChristoph Lameter return interleave_nodes(pol); 18455da7ca86SChristoph Lameter } 18465da7ca86SChristoph Lameter 1847778d3b0fSMichal Hocko /* 1848778d3b0fSMichal Hocko * Return the bit number of a random bit set in the nodemask. 1849778d3b0fSMichal Hocko * (returns -1 if nodemask is empty) 1850778d3b0fSMichal Hocko */ 1851778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp) 1852778d3b0fSMichal Hocko { 1853778d3b0fSMichal Hocko int w, bit = -1; 1854778d3b0fSMichal Hocko 1855778d3b0fSMichal Hocko w = nodes_weight(*maskp); 1856778d3b0fSMichal Hocko if (w) 1857778d3b0fSMichal Hocko bit = bitmap_ord_to_pos(maskp->bits, 1858778d3b0fSMichal Hocko get_random_int() % w, MAX_NUMNODES); 1859778d3b0fSMichal Hocko return bit; 1860778d3b0fSMichal Hocko } 1861778d3b0fSMichal Hocko 186200ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1863480eccf9SLee Schermerhorn /* 1864480eccf9SLee Schermerhorn * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1865480eccf9SLee Schermerhorn * @vma = virtual memory area whose policy is sought 1866480eccf9SLee Schermerhorn * @addr = address in @vma for shared policy lookup and interleave policy 1867480eccf9SLee Schermerhorn * @gfp_flags = for requested zone 186819770b32SMel Gorman * @mpol = pointer to mempolicy pointer for reference counted mempolicy 186919770b32SMel Gorman * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask 1870480eccf9SLee Schermerhorn * 187152cd3b07SLee Schermerhorn * Returns a zonelist suitable for a huge page allocation and a pointer 187252cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 187352cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 187452cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1875c0ff7453SMiao Xie * 1876c0ff7453SMiao Xie * Must be protected by get_mems_allowed() 1877480eccf9SLee Schermerhorn */ 1878396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 187919770b32SMel Gorman gfp_t gfp_flags, struct mempolicy **mpol, 188019770b32SMel Gorman nodemask_t **nodemask) 18815da7ca86SChristoph Lameter { 1882480eccf9SLee Schermerhorn struct zonelist *zl; 18835da7ca86SChristoph Lameter 188452cd3b07SLee Schermerhorn *mpol = get_vma_policy(current, vma, addr); 188519770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 18865da7ca86SChristoph Lameter 188752cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 188852cd3b07SLee Schermerhorn zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1889a5516438SAndi Kleen huge_page_shift(hstate_vma(vma))), gfp_flags); 189052cd3b07SLee Schermerhorn } else { 18912f5f9486SAndi Kleen zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); 189252cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 189352cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1894480eccf9SLee Schermerhorn } 1895480eccf9SLee Schermerhorn return zl; 18965da7ca86SChristoph Lameter } 189706808b08SLee Schermerhorn 189806808b08SLee Schermerhorn /* 189906808b08SLee Schermerhorn * init_nodemask_of_mempolicy 190006808b08SLee Schermerhorn * 190106808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 190206808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 190306808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 190406808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 190506808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 190606808b08SLee Schermerhorn * of non-default mempolicy. 190706808b08SLee Schermerhorn * 190806808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 190906808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 191006808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 191106808b08SLee Schermerhorn * 191206808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 191306808b08SLee Schermerhorn */ 191406808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 191506808b08SLee Schermerhorn { 191606808b08SLee Schermerhorn struct mempolicy *mempolicy; 191706808b08SLee Schermerhorn int nid; 191806808b08SLee Schermerhorn 191906808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 192006808b08SLee Schermerhorn return false; 192106808b08SLee Schermerhorn 1922c0ff7453SMiao Xie task_lock(current); 192306808b08SLee Schermerhorn mempolicy = current->mempolicy; 192406808b08SLee Schermerhorn switch (mempolicy->mode) { 192506808b08SLee Schermerhorn case MPOL_PREFERRED: 192606808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 192706808b08SLee Schermerhorn nid = numa_node_id(); 192806808b08SLee Schermerhorn else 192906808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 193006808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 193106808b08SLee Schermerhorn break; 193206808b08SLee Schermerhorn 193306808b08SLee Schermerhorn case MPOL_BIND: 193406808b08SLee Schermerhorn /* Fall through */ 193506808b08SLee Schermerhorn case MPOL_INTERLEAVE: 193606808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 193706808b08SLee Schermerhorn break; 193806808b08SLee Schermerhorn 193906808b08SLee Schermerhorn default: 194006808b08SLee Schermerhorn BUG(); 194106808b08SLee Schermerhorn } 1942c0ff7453SMiao Xie task_unlock(current); 194306808b08SLee Schermerhorn 194406808b08SLee Schermerhorn return true; 194506808b08SLee Schermerhorn } 194600ac59adSChen, Kenneth W #endif 19475da7ca86SChristoph Lameter 19486f48d0ebSDavid Rientjes /* 19496f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 19506f48d0ebSDavid Rientjes * 19516f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 19526f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 19536f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 19546f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 19556f48d0ebSDavid Rientjes * 19566f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 19576f48d0ebSDavid Rientjes */ 19586f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 19596f48d0ebSDavid Rientjes const nodemask_t *mask) 19606f48d0ebSDavid Rientjes { 19616f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 19626f48d0ebSDavid Rientjes bool ret = true; 19636f48d0ebSDavid Rientjes 19646f48d0ebSDavid Rientjes if (!mask) 19656f48d0ebSDavid Rientjes return ret; 19666f48d0ebSDavid Rientjes task_lock(tsk); 19676f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 19686f48d0ebSDavid Rientjes if (!mempolicy) 19696f48d0ebSDavid Rientjes goto out; 19706f48d0ebSDavid Rientjes 19716f48d0ebSDavid Rientjes switch (mempolicy->mode) { 19726f48d0ebSDavid Rientjes case MPOL_PREFERRED: 19736f48d0ebSDavid Rientjes /* 19746f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 19756f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 19766f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 19776f48d0ebSDavid Rientjes * nodes in mask. 19786f48d0ebSDavid Rientjes */ 19796f48d0ebSDavid Rientjes break; 19806f48d0ebSDavid Rientjes case MPOL_BIND: 19816f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 19826f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 19836f48d0ebSDavid Rientjes break; 19846f48d0ebSDavid Rientjes default: 19856f48d0ebSDavid Rientjes BUG(); 19866f48d0ebSDavid Rientjes } 19876f48d0ebSDavid Rientjes out: 19886f48d0ebSDavid Rientjes task_unlock(tsk); 19896f48d0ebSDavid Rientjes return ret; 19906f48d0ebSDavid Rientjes } 19916f48d0ebSDavid Rientjes 19921da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 19931da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1994662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1995662f3a0bSAndi Kleen unsigned nid) 19961da177e4SLinus Torvalds { 19971da177e4SLinus Torvalds struct zonelist *zl; 19981da177e4SLinus Torvalds struct page *page; 19991da177e4SLinus Torvalds 20000e88460dSMel Gorman zl = node_zonelist(nid, gfp); 20011da177e4SLinus Torvalds page = __alloc_pages(gfp, order, zl); 2002dd1a239fSMel Gorman if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) 2003ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 20041da177e4SLinus Torvalds return page; 20051da177e4SLinus Torvalds } 20061da177e4SLinus Torvalds 20071da177e4SLinus Torvalds /** 20080bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 20091da177e4SLinus Torvalds * 20101da177e4SLinus Torvalds * @gfp: 20111da177e4SLinus Torvalds * %GFP_USER user allocation. 20121da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 20131da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 20141da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 20151da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 20161da177e4SLinus Torvalds * 20170bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 20181da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 20191da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 20201da177e4SLinus Torvalds * 20211da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 20221da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 20231da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 20241da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 20251da177e4SLinus Torvalds * all allocations for pages that will be mapped into 20261da177e4SLinus Torvalds * user space. Returns NULL when no page can be allocated. 20271da177e4SLinus Torvalds * 20281da177e4SLinus Torvalds * Should be called with the mm_sem of the vma hold. 20291da177e4SLinus Torvalds */ 20301da177e4SLinus Torvalds struct page * 20310bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 20322f5f9486SAndi Kleen unsigned long addr, int node) 20331da177e4SLinus Torvalds { 2034cc9a6c87SMel Gorman struct mempolicy *pol; 2035c0ff7453SMiao Xie struct page *page; 2036cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 20371da177e4SLinus Torvalds 2038cc9a6c87SMel Gorman retry_cpuset: 2039cc9a6c87SMel Gorman pol = get_vma_policy(current, vma, addr); 2040cc9a6c87SMel Gorman cpuset_mems_cookie = get_mems_allowed(); 2041cc9a6c87SMel Gorman 204245c4745aSLee Schermerhorn if (unlikely(pol->mode == MPOL_INTERLEAVE)) { 20431da177e4SLinus Torvalds unsigned nid; 20445da7ca86SChristoph Lameter 20458eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 204652cd3b07SLee Schermerhorn mpol_cond_put(pol); 20470bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2048cc9a6c87SMel Gorman if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 2049cc9a6c87SMel Gorman goto retry_cpuset; 2050cc9a6c87SMel Gorman 2051c0ff7453SMiao Xie return page; 20521da177e4SLinus Torvalds } 2053212a0a6fSDavid Rientjes page = __alloc_pages_nodemask(gfp, order, 2054212a0a6fSDavid Rientjes policy_zonelist(gfp, pol, node), 20550bbbc0b3SAndrea Arcangeli policy_nodemask(gfp, pol)); 2056212a0a6fSDavid Rientjes if (unlikely(mpol_needs_cond_ref(pol))) 2057212a0a6fSDavid Rientjes __mpol_put(pol); 2058cc9a6c87SMel Gorman if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 2059cc9a6c87SMel Gorman goto retry_cpuset; 2060c0ff7453SMiao Xie return page; 20611da177e4SLinus Torvalds } 20621da177e4SLinus Torvalds 20631da177e4SLinus Torvalds /** 20641da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 20651da177e4SLinus Torvalds * 20661da177e4SLinus Torvalds * @gfp: 20671da177e4SLinus Torvalds * %GFP_USER user allocation, 20681da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 20691da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 20701da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 20711da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 20721da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 20731da177e4SLinus Torvalds * 20741da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 20751da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 20761da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 20771da177e4SLinus Torvalds * 2078cf2a473cSPaul Jackson * Don't call cpuset_update_task_memory_state() unless 20791da177e4SLinus Torvalds * 1) it's ok to take cpuset_sem (can WAIT), and 20801da177e4SLinus Torvalds * 2) allocating for current task (not interrupt). 20811da177e4SLinus Torvalds */ 2082dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 20831da177e4SLinus Torvalds { 20845606e387SMel Gorman struct mempolicy *pol = get_task_policy(current); 2085c0ff7453SMiao Xie struct page *page; 2086cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 20871da177e4SLinus Torvalds 20889b819d20SChristoph Lameter if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) 20891da177e4SLinus Torvalds pol = &default_policy; 209052cd3b07SLee Schermerhorn 2091cc9a6c87SMel Gorman retry_cpuset: 2092cc9a6c87SMel Gorman cpuset_mems_cookie = get_mems_allowed(); 2093cc9a6c87SMel Gorman 209452cd3b07SLee Schermerhorn /* 209552cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 209652cd3b07SLee Schermerhorn * nor system default_policy 209752cd3b07SLee Schermerhorn */ 209845c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2099c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2100c0ff7453SMiao Xie else 2101c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 21025c4b4be3SAndi Kleen policy_zonelist(gfp, pol, numa_node_id()), 21035c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2104cc9a6c87SMel Gorman 2105cc9a6c87SMel Gorman if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 2106cc9a6c87SMel Gorman goto retry_cpuset; 2107cc9a6c87SMel Gorman 2108c0ff7453SMiao Xie return page; 21091da177e4SLinus Torvalds } 21101da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 21111da177e4SLinus Torvalds 2112ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2113ef0855d3SOleg Nesterov { 2114ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2115ef0855d3SOleg Nesterov 2116ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2117ef0855d3SOleg Nesterov return PTR_ERR(pol); 2118ef0855d3SOleg Nesterov dst->vm_policy = pol; 2119ef0855d3SOleg Nesterov return 0; 2120ef0855d3SOleg Nesterov } 2121ef0855d3SOleg Nesterov 21224225399aSPaul Jackson /* 2123846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 21244225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 21254225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 21264225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 21274225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2128708c1bbcSMiao Xie * 2129708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2130708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 21314225399aSPaul Jackson */ 21324225399aSPaul Jackson 2133846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2134846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 21351da177e4SLinus Torvalds { 21361da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 21371da177e4SLinus Torvalds 21381da177e4SLinus Torvalds if (!new) 21391da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2140708c1bbcSMiao Xie 2141708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2142708c1bbcSMiao Xie if (old == current->mempolicy) { 2143708c1bbcSMiao Xie task_lock(current); 2144708c1bbcSMiao Xie *new = *old; 2145708c1bbcSMiao Xie task_unlock(current); 2146708c1bbcSMiao Xie } else 2147708c1bbcSMiao Xie *new = *old; 2148708c1bbcSMiao Xie 214999ee4ca7SPaul E. McKenney rcu_read_lock(); 21504225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 21514225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2152708c1bbcSMiao Xie if (new->flags & MPOL_F_REBINDING) 2153708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); 2154708c1bbcSMiao Xie else 2155708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 21564225399aSPaul Jackson } 215799ee4ca7SPaul E. McKenney rcu_read_unlock(); 21581da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 21591da177e4SLinus Torvalds return new; 21601da177e4SLinus Torvalds } 21611da177e4SLinus Torvalds 21621da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2163fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 21641da177e4SLinus Torvalds { 21651da177e4SLinus Torvalds if (!a || !b) 2166fcfb4dccSKOSAKI Motohiro return false; 216745c4745aSLee Schermerhorn if (a->mode != b->mode) 2168fcfb4dccSKOSAKI Motohiro return false; 216919800502SBob Liu if (a->flags != b->flags) 2170fcfb4dccSKOSAKI Motohiro return false; 217119800502SBob Liu if (mpol_store_user_nodemask(a)) 217219800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2173fcfb4dccSKOSAKI Motohiro return false; 217419800502SBob Liu 217545c4745aSLee Schermerhorn switch (a->mode) { 217619770b32SMel Gorman case MPOL_BIND: 217719770b32SMel Gorman /* Fall through */ 21781da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2179fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 21801da177e4SLinus Torvalds case MPOL_PREFERRED: 218175719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 21821da177e4SLinus Torvalds default: 21831da177e4SLinus Torvalds BUG(); 2184fcfb4dccSKOSAKI Motohiro return false; 21851da177e4SLinus Torvalds } 21861da177e4SLinus Torvalds } 21871da177e4SLinus Torvalds 21881da177e4SLinus Torvalds /* 21891da177e4SLinus Torvalds * Shared memory backing store policy support. 21901da177e4SLinus Torvalds * 21911da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 21921da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 21931da177e4SLinus Torvalds * They are protected by the sp->lock spinlock, which should be held 21941da177e4SLinus Torvalds * for any accesses to the tree. 21951da177e4SLinus Torvalds */ 21961da177e4SLinus Torvalds 21971da177e4SLinus Torvalds /* lookup first element intersecting start-end */ 219842288fe3SMel Gorman /* Caller holds sp->lock */ 21991da177e4SLinus Torvalds static struct sp_node * 22001da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 22011da177e4SLinus Torvalds { 22021da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 22031da177e4SLinus Torvalds 22041da177e4SLinus Torvalds while (n) { 22051da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 22061da177e4SLinus Torvalds 22071da177e4SLinus Torvalds if (start >= p->end) 22081da177e4SLinus Torvalds n = n->rb_right; 22091da177e4SLinus Torvalds else if (end <= p->start) 22101da177e4SLinus Torvalds n = n->rb_left; 22111da177e4SLinus Torvalds else 22121da177e4SLinus Torvalds break; 22131da177e4SLinus Torvalds } 22141da177e4SLinus Torvalds if (!n) 22151da177e4SLinus Torvalds return NULL; 22161da177e4SLinus Torvalds for (;;) { 22171da177e4SLinus Torvalds struct sp_node *w = NULL; 22181da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 22191da177e4SLinus Torvalds if (!prev) 22201da177e4SLinus Torvalds break; 22211da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 22221da177e4SLinus Torvalds if (w->end <= start) 22231da177e4SLinus Torvalds break; 22241da177e4SLinus Torvalds n = prev; 22251da177e4SLinus Torvalds } 22261da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 22271da177e4SLinus Torvalds } 22281da177e4SLinus Torvalds 22291da177e4SLinus Torvalds /* Insert a new shared policy into the list. */ 22301da177e4SLinus Torvalds /* Caller holds sp->lock */ 22311da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 22321da177e4SLinus Torvalds { 22331da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 22341da177e4SLinus Torvalds struct rb_node *parent = NULL; 22351da177e4SLinus Torvalds struct sp_node *nd; 22361da177e4SLinus Torvalds 22371da177e4SLinus Torvalds while (*p) { 22381da177e4SLinus Torvalds parent = *p; 22391da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 22401da177e4SLinus Torvalds if (new->start < nd->start) 22411da177e4SLinus Torvalds p = &(*p)->rb_left; 22421da177e4SLinus Torvalds else if (new->end > nd->end) 22431da177e4SLinus Torvalds p = &(*p)->rb_right; 22441da177e4SLinus Torvalds else 22451da177e4SLinus Torvalds BUG(); 22461da177e4SLinus Torvalds } 22471da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 22481da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2249140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 225045c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 22511da177e4SLinus Torvalds } 22521da177e4SLinus Torvalds 22531da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 22541da177e4SLinus Torvalds struct mempolicy * 22551da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 22561da177e4SLinus Torvalds { 22571da177e4SLinus Torvalds struct mempolicy *pol = NULL; 22581da177e4SLinus Torvalds struct sp_node *sn; 22591da177e4SLinus Torvalds 22601da177e4SLinus Torvalds if (!sp->root.rb_node) 22611da177e4SLinus Torvalds return NULL; 226242288fe3SMel Gorman spin_lock(&sp->lock); 22631da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 22641da177e4SLinus Torvalds if (sn) { 22651da177e4SLinus Torvalds mpol_get(sn->policy); 22661da177e4SLinus Torvalds pol = sn->policy; 22671da177e4SLinus Torvalds } 226842288fe3SMel Gorman spin_unlock(&sp->lock); 22691da177e4SLinus Torvalds return pol; 22701da177e4SLinus Torvalds } 22711da177e4SLinus Torvalds 227263f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 227363f74ca2SKOSAKI Motohiro { 227463f74ca2SKOSAKI Motohiro mpol_put(n->policy); 227563f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 227663f74ca2SKOSAKI Motohiro } 227763f74ca2SKOSAKI Motohiro 2278771fb4d8SLee Schermerhorn /** 2279771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2280771fb4d8SLee Schermerhorn * 2281771fb4d8SLee Schermerhorn * @page - page to be checked 2282771fb4d8SLee Schermerhorn * @vma - vm area where page mapped 2283771fb4d8SLee Schermerhorn * @addr - virtual address where page mapped 2284771fb4d8SLee Schermerhorn * 2285771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2286771fb4d8SLee Schermerhorn * node id. 2287771fb4d8SLee Schermerhorn * 2288771fb4d8SLee Schermerhorn * Returns: 2289771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2290771fb4d8SLee Schermerhorn * node - node id where the page should be 2291771fb4d8SLee Schermerhorn * 2292771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2293771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2294771fb4d8SLee Schermerhorn */ 2295771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2296771fb4d8SLee Schermerhorn { 2297771fb4d8SLee Schermerhorn struct mempolicy *pol; 2298771fb4d8SLee Schermerhorn struct zone *zone; 2299771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2300771fb4d8SLee Schermerhorn unsigned long pgoff; 2301771fb4d8SLee Schermerhorn int polnid = -1; 2302771fb4d8SLee Schermerhorn int ret = -1; 2303771fb4d8SLee Schermerhorn 2304771fb4d8SLee Schermerhorn BUG_ON(!vma); 2305771fb4d8SLee Schermerhorn 2306771fb4d8SLee Schermerhorn pol = get_vma_policy(current, vma, addr); 2307771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2308771fb4d8SLee Schermerhorn goto out; 2309771fb4d8SLee Schermerhorn 2310771fb4d8SLee Schermerhorn switch (pol->mode) { 2311771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2312771fb4d8SLee Schermerhorn BUG_ON(addr >= vma->vm_end); 2313771fb4d8SLee Schermerhorn BUG_ON(addr < vma->vm_start); 2314771fb4d8SLee Schermerhorn 2315771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2316771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2317771fb4d8SLee Schermerhorn polnid = offset_il_node(pol, vma, pgoff); 2318771fb4d8SLee Schermerhorn break; 2319771fb4d8SLee Schermerhorn 2320771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2321771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2322771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2323771fb4d8SLee Schermerhorn else 2324771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2325771fb4d8SLee Schermerhorn break; 2326771fb4d8SLee Schermerhorn 2327771fb4d8SLee Schermerhorn case MPOL_BIND: 2328771fb4d8SLee Schermerhorn /* 2329771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2330771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2331771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2332771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2333771fb4d8SLee Schermerhorn */ 2334771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2335771fb4d8SLee Schermerhorn goto out; 2336771fb4d8SLee Schermerhorn (void)first_zones_zonelist( 2337771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2338771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2339771fb4d8SLee Schermerhorn &pol->v.nodes, &zone); 2340771fb4d8SLee Schermerhorn polnid = zone->node; 2341771fb4d8SLee Schermerhorn break; 2342771fb4d8SLee Schermerhorn 2343771fb4d8SLee Schermerhorn default: 2344771fb4d8SLee Schermerhorn BUG(); 2345771fb4d8SLee Schermerhorn } 23465606e387SMel Gorman 23475606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2348e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 2349e42c8ff2SMel Gorman int last_nid; 2350e42c8ff2SMel Gorman 23515606e387SMel Gorman polnid = numa_node_id(); 23525606e387SMel Gorman 2353e42c8ff2SMel Gorman /* 2354e42c8ff2SMel Gorman * Multi-stage node selection is used in conjunction 2355e42c8ff2SMel Gorman * with a periodic migration fault to build a temporal 2356e42c8ff2SMel Gorman * task<->page relation. By using a two-stage filter we 2357e42c8ff2SMel Gorman * remove short/unlikely relations. 2358e42c8ff2SMel Gorman * 2359e42c8ff2SMel Gorman * Using P(p) ~ n_p / n_t as per frequentist 2360e42c8ff2SMel Gorman * probability, we can equate a task's usage of a 2361e42c8ff2SMel Gorman * particular page (n_p) per total usage of this 2362e42c8ff2SMel Gorman * page (n_t) (in a given time-span) to a probability. 2363e42c8ff2SMel Gorman * 2364e42c8ff2SMel Gorman * Our periodic faults will sample this probability and 2365e42c8ff2SMel Gorman * getting the same result twice in a row, given these 2366e42c8ff2SMel Gorman * samples are fully independent, is then given by 2367e42c8ff2SMel Gorman * P(n)^2, provided our sample period is sufficiently 2368e42c8ff2SMel Gorman * short compared to the usage pattern. 2369e42c8ff2SMel Gorman * 2370e42c8ff2SMel Gorman * This quadric squishes small probabilities, making 2371e42c8ff2SMel Gorman * it less likely we act on an unlikely task<->page 2372e42c8ff2SMel Gorman * relation. 2373e42c8ff2SMel Gorman */ 237422b751c3SMel Gorman last_nid = page_nid_xchg_last(page, polnid); 2375e42c8ff2SMel Gorman if (last_nid != polnid) 2376e42c8ff2SMel Gorman goto out; 2377e42c8ff2SMel Gorman } 2378e42c8ff2SMel Gorman 2379771fb4d8SLee Schermerhorn if (curnid != polnid) 2380771fb4d8SLee Schermerhorn ret = polnid; 2381771fb4d8SLee Schermerhorn out: 2382771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2383771fb4d8SLee Schermerhorn 2384771fb4d8SLee Schermerhorn return ret; 2385771fb4d8SLee Schermerhorn } 2386771fb4d8SLee Schermerhorn 23871da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 23881da177e4SLinus Torvalds { 2389140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 23901da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 239163f74ca2SKOSAKI Motohiro sp_free(n); 23921da177e4SLinus Torvalds } 23931da177e4SLinus Torvalds 239442288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 239542288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 239642288fe3SMel Gorman { 239742288fe3SMel Gorman node->start = start; 239842288fe3SMel Gorman node->end = end; 239942288fe3SMel Gorman node->policy = pol; 240042288fe3SMel Gorman } 240142288fe3SMel Gorman 2402dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2403dbcb0f19SAdrian Bunk struct mempolicy *pol) 24041da177e4SLinus Torvalds { 2405869833f2SKOSAKI Motohiro struct sp_node *n; 2406869833f2SKOSAKI Motohiro struct mempolicy *newpol; 24071da177e4SLinus Torvalds 2408869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 24091da177e4SLinus Torvalds if (!n) 24101da177e4SLinus Torvalds return NULL; 2411869833f2SKOSAKI Motohiro 2412869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2413869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2414869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2415869833f2SKOSAKI Motohiro return NULL; 2416869833f2SKOSAKI Motohiro } 2417869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 241842288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2419869833f2SKOSAKI Motohiro 24201da177e4SLinus Torvalds return n; 24211da177e4SLinus Torvalds } 24221da177e4SLinus Torvalds 24231da177e4SLinus Torvalds /* Replace a policy range. */ 24241da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 24251da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 24261da177e4SLinus Torvalds { 2427b22d127aSMel Gorman struct sp_node *n; 242842288fe3SMel Gorman struct sp_node *n_new = NULL; 242942288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2430b22d127aSMel Gorman int ret = 0; 24311da177e4SLinus Torvalds 243242288fe3SMel Gorman restart: 243342288fe3SMel Gorman spin_lock(&sp->lock); 24341da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 24351da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 24361da177e4SLinus Torvalds while (n && n->start < end) { 24371da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 24381da177e4SLinus Torvalds if (n->start >= start) { 24391da177e4SLinus Torvalds if (n->end <= end) 24401da177e4SLinus Torvalds sp_delete(sp, n); 24411da177e4SLinus Torvalds else 24421da177e4SLinus Torvalds n->start = end; 24431da177e4SLinus Torvalds } else { 24441da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 24451da177e4SLinus Torvalds if (n->end > end) { 244642288fe3SMel Gorman if (!n_new) 244742288fe3SMel Gorman goto alloc_new; 244842288fe3SMel Gorman 244942288fe3SMel Gorman *mpol_new = *n->policy; 245042288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 24517880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 24521da177e4SLinus Torvalds n->end = start; 24535ca39575SHillf Danton sp_insert(sp, n_new); 245442288fe3SMel Gorman n_new = NULL; 245542288fe3SMel Gorman mpol_new = NULL; 24561da177e4SLinus Torvalds break; 24571da177e4SLinus Torvalds } else 24581da177e4SLinus Torvalds n->end = start; 24591da177e4SLinus Torvalds } 24601da177e4SLinus Torvalds if (!next) 24611da177e4SLinus Torvalds break; 24621da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 24631da177e4SLinus Torvalds } 24641da177e4SLinus Torvalds if (new) 24651da177e4SLinus Torvalds sp_insert(sp, new); 246642288fe3SMel Gorman spin_unlock(&sp->lock); 246742288fe3SMel Gorman ret = 0; 246842288fe3SMel Gorman 246942288fe3SMel Gorman err_out: 247042288fe3SMel Gorman if (mpol_new) 247142288fe3SMel Gorman mpol_put(mpol_new); 247242288fe3SMel Gorman if (n_new) 247342288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 247442288fe3SMel Gorman 2475b22d127aSMel Gorman return ret; 247642288fe3SMel Gorman 247742288fe3SMel Gorman alloc_new: 247842288fe3SMel Gorman spin_unlock(&sp->lock); 247942288fe3SMel Gorman ret = -ENOMEM; 248042288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 248142288fe3SMel Gorman if (!n_new) 248242288fe3SMel Gorman goto err_out; 248342288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 248442288fe3SMel Gorman if (!mpol_new) 248542288fe3SMel Gorman goto err_out; 248642288fe3SMel Gorman goto restart; 24871da177e4SLinus Torvalds } 24881da177e4SLinus Torvalds 248971fe804bSLee Schermerhorn /** 249071fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 249171fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 249271fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 249371fe804bSLee Schermerhorn * 249471fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 249571fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 249671fe804bSLee Schermerhorn * This must be released on exit. 24974bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 249871fe804bSLee Schermerhorn */ 249971fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 25007339ff83SRobin Holt { 250158568d2aSMiao Xie int ret; 250258568d2aSMiao Xie 250371fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 250442288fe3SMel Gorman spin_lock_init(&sp->lock); 25057339ff83SRobin Holt 250671fe804bSLee Schermerhorn if (mpol) { 25077339ff83SRobin Holt struct vm_area_struct pvma; 250871fe804bSLee Schermerhorn struct mempolicy *new; 25094bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 25107339ff83SRobin Holt 25114bfc4495SKAMEZAWA Hiroyuki if (!scratch) 25125c0c1654SLee Schermerhorn goto put_mpol; 251371fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 251471fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 251515d77835SLee Schermerhorn if (IS_ERR(new)) 25160cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 251758568d2aSMiao Xie 251858568d2aSMiao Xie task_lock(current); 25194bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 252058568d2aSMiao Xie task_unlock(current); 252115d77835SLee Schermerhorn if (ret) 25225c0c1654SLee Schermerhorn goto put_new; 252371fe804bSLee Schermerhorn 252471fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 25257339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 252671fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 252771fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 252815d77835SLee Schermerhorn 25295c0c1654SLee Schermerhorn put_new: 253071fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 25310cae3457SDan Carpenter free_scratch: 25324bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 25335c0c1654SLee Schermerhorn put_mpol: 25345c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 25357339ff83SRobin Holt } 25367339ff83SRobin Holt } 25377339ff83SRobin Holt 25381da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 25391da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 25401da177e4SLinus Torvalds { 25411da177e4SLinus Torvalds int err; 25421da177e4SLinus Torvalds struct sp_node *new = NULL; 25431da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 25441da177e4SLinus Torvalds 2545028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 25461da177e4SLinus Torvalds vma->vm_pgoff, 254745c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2548028fec41SDavid Rientjes npol ? npol->flags : -1, 254900ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 25501da177e4SLinus Torvalds 25511da177e4SLinus Torvalds if (npol) { 25521da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 25531da177e4SLinus Torvalds if (!new) 25541da177e4SLinus Torvalds return -ENOMEM; 25551da177e4SLinus Torvalds } 25561da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 25571da177e4SLinus Torvalds if (err && new) 255863f74ca2SKOSAKI Motohiro sp_free(new); 25591da177e4SLinus Torvalds return err; 25601da177e4SLinus Torvalds } 25611da177e4SLinus Torvalds 25621da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 25631da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 25641da177e4SLinus Torvalds { 25651da177e4SLinus Torvalds struct sp_node *n; 25661da177e4SLinus Torvalds struct rb_node *next; 25671da177e4SLinus Torvalds 25681da177e4SLinus Torvalds if (!p->root.rb_node) 25691da177e4SLinus Torvalds return; 257042288fe3SMel Gorman spin_lock(&p->lock); 25711da177e4SLinus Torvalds next = rb_first(&p->root); 25721da177e4SLinus Torvalds while (next) { 25731da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25741da177e4SLinus Torvalds next = rb_next(&n->nd); 257563f74ca2SKOSAKI Motohiro sp_delete(p, n); 25761da177e4SLinus Torvalds } 257742288fe3SMel Gorman spin_unlock(&p->lock); 25781da177e4SLinus Torvalds } 25791da177e4SLinus Torvalds 25801a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 25811a687c2eSMel Gorman static bool __initdata numabalancing_override; 25821a687c2eSMel Gorman 25831a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 25841a687c2eSMel Gorman { 25851a687c2eSMel Gorman bool numabalancing_default = false; 25861a687c2eSMel Gorman 25871a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 25881a687c2eSMel Gorman numabalancing_default = true; 25891a687c2eSMel Gorman 25901a687c2eSMel Gorman if (nr_node_ids > 1 && !numabalancing_override) { 25911a687c2eSMel Gorman printk(KERN_INFO "Enabling automatic NUMA balancing. " 25921a687c2eSMel Gorman "Configure with numa_balancing= or sysctl"); 25931a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 25941a687c2eSMel Gorman } 25951a687c2eSMel Gorman } 25961a687c2eSMel Gorman 25971a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 25981a687c2eSMel Gorman { 25991a687c2eSMel Gorman int ret = 0; 26001a687c2eSMel Gorman if (!str) 26011a687c2eSMel Gorman goto out; 26021a687c2eSMel Gorman numabalancing_override = true; 26031a687c2eSMel Gorman 26041a687c2eSMel Gorman if (!strcmp(str, "enable")) { 26051a687c2eSMel Gorman set_numabalancing_state(true); 26061a687c2eSMel Gorman ret = 1; 26071a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 26081a687c2eSMel Gorman set_numabalancing_state(false); 26091a687c2eSMel Gorman ret = 1; 26101a687c2eSMel Gorman } 26111a687c2eSMel Gorman out: 26121a687c2eSMel Gorman if (!ret) 26131a687c2eSMel Gorman printk(KERN_WARNING "Unable to parse numa_balancing=\n"); 26141a687c2eSMel Gorman 26151a687c2eSMel Gorman return ret; 26161a687c2eSMel Gorman } 26171a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 26181a687c2eSMel Gorman #else 26191a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 26201a687c2eSMel Gorman { 26211a687c2eSMel Gorman } 26221a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 26231a687c2eSMel Gorman 26241da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 26251da177e4SLinus Torvalds void __init numa_policy_init(void) 26261da177e4SLinus Torvalds { 2627b71636e2SPaul Mundt nodemask_t interleave_nodes; 2628b71636e2SPaul Mundt unsigned long largest = 0; 2629b71636e2SPaul Mundt int nid, prefer = 0; 2630b71636e2SPaul Mundt 26311da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 26321da177e4SLinus Torvalds sizeof(struct mempolicy), 263320c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 26341da177e4SLinus Torvalds 26351da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 26361da177e4SLinus Torvalds sizeof(struct sp_node), 263720c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 26381da177e4SLinus Torvalds 26395606e387SMel Gorman for_each_node(nid) { 26405606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 26415606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 26425606e387SMel Gorman .mode = MPOL_PREFERRED, 26435606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 26445606e387SMel Gorman .v = { .preferred_node = nid, }, 26455606e387SMel Gorman }; 26465606e387SMel Gorman } 26475606e387SMel Gorman 2648b71636e2SPaul Mundt /* 2649b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2650b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2651b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2652b71636e2SPaul Mundt */ 2653b71636e2SPaul Mundt nodes_clear(interleave_nodes); 265401f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2655b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 26561da177e4SLinus Torvalds 2657b71636e2SPaul Mundt /* Preserve the largest node */ 2658b71636e2SPaul Mundt if (largest < total_pages) { 2659b71636e2SPaul Mundt largest = total_pages; 2660b71636e2SPaul Mundt prefer = nid; 2661b71636e2SPaul Mundt } 2662b71636e2SPaul Mundt 2663b71636e2SPaul Mundt /* Interleave this node? */ 2664b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2665b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2666b71636e2SPaul Mundt } 2667b71636e2SPaul Mundt 2668b71636e2SPaul Mundt /* All too small, use the largest */ 2669b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2670b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2671b71636e2SPaul Mundt 2672028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 26731da177e4SLinus Torvalds printk("numa_policy_init: interleaving failed\n"); 26741a687c2eSMel Gorman 26751a687c2eSMel Gorman check_numabalancing_enable(); 26761da177e4SLinus Torvalds } 26771da177e4SLinus Torvalds 26788bccd85fSChristoph Lameter /* Reset policy of current process to default */ 26791da177e4SLinus Torvalds void numa_default_policy(void) 26801da177e4SLinus Torvalds { 2681028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 26821da177e4SLinus Torvalds } 268368860ec1SPaul Jackson 26844225399aSPaul Jackson /* 2685095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2686095f1fc4SLee Schermerhorn */ 2687095f1fc4SLee Schermerhorn 2688095f1fc4SLee Schermerhorn /* 2689f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 26901a75a6c8SChristoph Lameter */ 2691345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2692345ace9cSLee Schermerhorn { 2693345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2694345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2695345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2696345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2697d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2698345ace9cSLee Schermerhorn }; 26991a75a6c8SChristoph Lameter 2700095f1fc4SLee Schermerhorn 2701095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2702095f1fc4SLee Schermerhorn /** 2703f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2704095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 270571fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2706095f1fc4SLee Schermerhorn * 2707095f1fc4SLee Schermerhorn * Format of input: 2708095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2709095f1fc4SLee Schermerhorn * 271071fe804bSLee Schermerhorn * On success, returns 0, else 1 2711095f1fc4SLee Schermerhorn */ 2712a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2713095f1fc4SLee Schermerhorn { 271471fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2715b4652e84SLee Schermerhorn unsigned short mode; 2716f2a07f40SHugh Dickins unsigned short mode_flags; 271771fe804bSLee Schermerhorn nodemask_t nodes; 2718095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2719095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2720095f1fc4SLee Schermerhorn int err = 1; 2721095f1fc4SLee Schermerhorn 2722095f1fc4SLee Schermerhorn if (nodelist) { 2723095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2724095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 272571fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2726095f1fc4SLee Schermerhorn goto out; 272701f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2728095f1fc4SLee Schermerhorn goto out; 272971fe804bSLee Schermerhorn } else 273071fe804bSLee Schermerhorn nodes_clear(nodes); 273171fe804bSLee Schermerhorn 2732095f1fc4SLee Schermerhorn if (flags) 2733095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2734095f1fc4SLee Schermerhorn 2735479e2802SPeter Zijlstra for (mode = 0; mode < MPOL_MAX; mode++) { 2736345ace9cSLee Schermerhorn if (!strcmp(str, policy_modes[mode])) { 2737095f1fc4SLee Schermerhorn break; 2738095f1fc4SLee Schermerhorn } 2739095f1fc4SLee Schermerhorn } 2740a720094dSMel Gorman if (mode >= MPOL_MAX) 2741095f1fc4SLee Schermerhorn goto out; 2742095f1fc4SLee Schermerhorn 274371fe804bSLee Schermerhorn switch (mode) { 2744095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 274571fe804bSLee Schermerhorn /* 274671fe804bSLee Schermerhorn * Insist on a nodelist of one node only 274771fe804bSLee Schermerhorn */ 2748095f1fc4SLee Schermerhorn if (nodelist) { 2749095f1fc4SLee Schermerhorn char *rest = nodelist; 2750095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2751095f1fc4SLee Schermerhorn rest++; 2752926f2ae0SKOSAKI Motohiro if (*rest) 2753926f2ae0SKOSAKI Motohiro goto out; 2754095f1fc4SLee Schermerhorn } 2755095f1fc4SLee Schermerhorn break; 2756095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2757095f1fc4SLee Schermerhorn /* 2758095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2759095f1fc4SLee Schermerhorn */ 2760095f1fc4SLee Schermerhorn if (!nodelist) 276101f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 27623f226aa1SLee Schermerhorn break; 276371fe804bSLee Schermerhorn case MPOL_LOCAL: 27643f226aa1SLee Schermerhorn /* 276571fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 27663f226aa1SLee Schermerhorn */ 276771fe804bSLee Schermerhorn if (nodelist) 27683f226aa1SLee Schermerhorn goto out; 276971fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 27703f226aa1SLee Schermerhorn break; 2771413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2772413b43deSRavikiran G Thirumalai /* 2773413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2774413b43deSRavikiran G Thirumalai */ 2775413b43deSRavikiran G Thirumalai if (!nodelist) 2776413b43deSRavikiran G Thirumalai err = 0; 2777413b43deSRavikiran G Thirumalai goto out; 2778d69b2e63SKOSAKI Motohiro case MPOL_BIND: 277971fe804bSLee Schermerhorn /* 2780d69b2e63SKOSAKI Motohiro * Insist on a nodelist 278171fe804bSLee Schermerhorn */ 2782d69b2e63SKOSAKI Motohiro if (!nodelist) 2783d69b2e63SKOSAKI Motohiro goto out; 2784095f1fc4SLee Schermerhorn } 2785095f1fc4SLee Schermerhorn 278671fe804bSLee Schermerhorn mode_flags = 0; 2787095f1fc4SLee Schermerhorn if (flags) { 2788095f1fc4SLee Schermerhorn /* 2789095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2790095f1fc4SLee Schermerhorn * mode flags. 2791095f1fc4SLee Schermerhorn */ 2792095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 279371fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2794095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 279571fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2796095f1fc4SLee Schermerhorn else 2797926f2ae0SKOSAKI Motohiro goto out; 2798095f1fc4SLee Schermerhorn } 279971fe804bSLee Schermerhorn 280071fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 280171fe804bSLee Schermerhorn if (IS_ERR(new)) 2802926f2ae0SKOSAKI Motohiro goto out; 2803926f2ae0SKOSAKI Motohiro 2804f2a07f40SHugh Dickins /* 2805f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2806f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2807f2a07f40SHugh Dickins */ 2808f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2809f2a07f40SHugh Dickins new->v.nodes = nodes; 2810f2a07f40SHugh Dickins else if (nodelist) 2811f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2812f2a07f40SHugh Dickins else 2813f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2814f2a07f40SHugh Dickins 2815f2a07f40SHugh Dickins /* 2816f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2817f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2818f2a07f40SHugh Dickins */ 2819e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2820f2a07f40SHugh Dickins 2821926f2ae0SKOSAKI Motohiro err = 0; 282271fe804bSLee Schermerhorn 2823095f1fc4SLee Schermerhorn out: 2824095f1fc4SLee Schermerhorn /* Restore string for error message */ 2825095f1fc4SLee Schermerhorn if (nodelist) 2826095f1fc4SLee Schermerhorn *--nodelist = ':'; 2827095f1fc4SLee Schermerhorn if (flags) 2828095f1fc4SLee Schermerhorn *--flags = '='; 282971fe804bSLee Schermerhorn if (!err) 283071fe804bSLee Schermerhorn *mpol = new; 2831095f1fc4SLee Schermerhorn return err; 2832095f1fc4SLee Schermerhorn } 2833095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2834095f1fc4SLee Schermerhorn 283571fe804bSLee Schermerhorn /** 283671fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 283771fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 283871fe804bSLee Schermerhorn * @maxlen: length of @buffer 283971fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 284071fe804bSLee Schermerhorn * 28411a75a6c8SChristoph Lameter * Convert a mempolicy into a string. 28421a75a6c8SChristoph Lameter * Returns the number of characters in buffer (if positive) 28431a75a6c8SChristoph Lameter * or an error (negative) 28441a75a6c8SChristoph Lameter */ 2845a7a88b23SHugh Dickins int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 28461a75a6c8SChristoph Lameter { 28471a75a6c8SChristoph Lameter char *p = buffer; 28481a75a6c8SChristoph Lameter int l; 28491a75a6c8SChristoph Lameter nodemask_t nodes; 2850bea904d5SLee Schermerhorn unsigned short mode; 2851f5b087b5SDavid Rientjes unsigned short flags = pol ? pol->flags : 0; 28521a75a6c8SChristoph Lameter 28532291990aSLee Schermerhorn /* 28542291990aSLee Schermerhorn * Sanity check: room for longest mode, flag and some nodes 28552291990aSLee Schermerhorn */ 28562291990aSLee Schermerhorn VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16); 28572291990aSLee Schermerhorn 2858bea904d5SLee Schermerhorn if (!pol || pol == &default_policy) 2859bea904d5SLee Schermerhorn mode = MPOL_DEFAULT; 2860bea904d5SLee Schermerhorn else 2861bea904d5SLee Schermerhorn mode = pol->mode; 2862bea904d5SLee Schermerhorn 28631a75a6c8SChristoph Lameter switch (mode) { 28641a75a6c8SChristoph Lameter case MPOL_DEFAULT: 28651a75a6c8SChristoph Lameter nodes_clear(nodes); 28661a75a6c8SChristoph Lameter break; 28671a75a6c8SChristoph Lameter 28681a75a6c8SChristoph Lameter case MPOL_PREFERRED: 28691a75a6c8SChristoph Lameter nodes_clear(nodes); 2870fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 2871f2a07f40SHugh Dickins mode = MPOL_LOCAL; 287253f2556bSLee Schermerhorn else 2873fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 28741a75a6c8SChristoph Lameter break; 28751a75a6c8SChristoph Lameter 28761a75a6c8SChristoph Lameter case MPOL_BIND: 287719770b32SMel Gorman /* Fall through */ 28781a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 28791a75a6c8SChristoph Lameter nodes = pol->v.nodes; 28801a75a6c8SChristoph Lameter break; 28811a75a6c8SChristoph Lameter 28821a75a6c8SChristoph Lameter default: 288380de7c31SDave Jones return -EINVAL; 28841a75a6c8SChristoph Lameter } 28851a75a6c8SChristoph Lameter 2886345ace9cSLee Schermerhorn l = strlen(policy_modes[mode]); 28871a75a6c8SChristoph Lameter if (buffer + maxlen < p + l + 1) 28881a75a6c8SChristoph Lameter return -ENOSPC; 28891a75a6c8SChristoph Lameter 2890345ace9cSLee Schermerhorn strcpy(p, policy_modes[mode]); 28911a75a6c8SChristoph Lameter p += l; 28921a75a6c8SChristoph Lameter 2893fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2894f5b087b5SDavid Rientjes if (buffer + maxlen < p + 2) 2895f5b087b5SDavid Rientjes return -ENOSPC; 2896f5b087b5SDavid Rientjes *p++ = '='; 2897f5b087b5SDavid Rientjes 28982291990aSLee Schermerhorn /* 28992291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 29002291990aSLee Schermerhorn */ 2901f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 29022291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 29032291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 29042291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2905f5b087b5SDavid Rientjes } 2906f5b087b5SDavid Rientjes 29071a75a6c8SChristoph Lameter if (!nodes_empty(nodes)) { 29081a75a6c8SChristoph Lameter if (buffer + maxlen < p + 2) 29091a75a6c8SChristoph Lameter return -ENOSPC; 2910095f1fc4SLee Schermerhorn *p++ = ':'; 29111a75a6c8SChristoph Lameter p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); 29121a75a6c8SChristoph Lameter } 29131a75a6c8SChristoph Lameter return p - buffer; 29141a75a6c8SChristoph Lameter } 2915