11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 711da177e4SLinus Torvalds #include <linux/mm.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 761da177e4SLinus Torvalds #include <linux/nodemask.h> 771da177e4SLinus Torvalds #include <linux/cpuset.h> 781da177e4SLinus Torvalds #include <linux/slab.h> 791da177e4SLinus Torvalds #include <linux/string.h> 80b95f1b31SPaul Gortmaker #include <linux/export.h> 81b488893aSPavel Emelyanov #include <linux/nsproxy.h> 821da177e4SLinus Torvalds #include <linux/interrupt.h> 831da177e4SLinus Torvalds #include <linux/init.h> 841da177e4SLinus Torvalds #include <linux/compat.h> 85dc9aa5b9SChristoph Lameter #include <linux/swap.h> 861a75a6c8SChristoph Lameter #include <linux/seq_file.h> 871a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 88b20a3503SChristoph Lameter #include <linux/migrate.h> 8962b61f61SHugh Dickins #include <linux/ksm.h> 9095a402c3SChristoph Lameter #include <linux/rmap.h> 9186c3a764SDavid Quigley #include <linux/security.h> 92dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 93095f1fc4SLee Schermerhorn #include <linux/ctype.h> 946d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 95b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 96b1de0d13SMitchel Humpherys #include <linux/printk.h> 97dc9aa5b9SChristoph Lameter 981da177e4SLinus Torvalds #include <asm/tlbflush.h> 991da177e4SLinus Torvalds #include <asm/uaccess.h> 100778d3b0fSMichal Hocko #include <linux/random.h> 1011da177e4SLinus Torvalds 10262695a84SNick Piggin #include "internal.h" 10362695a84SNick Piggin 10438e35860SChristoph Lameter /* Internal flags */ 105dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10638e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 107dc9aa5b9SChristoph Lameter 108fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 109fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1101da177e4SLinus Torvalds 1111da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1121da177e4SLinus Torvalds policied. */ 1136267276fSChristoph Lameter enum zone_type policy_zone = 0; 1141da177e4SLinus Torvalds 115bea904d5SLee Schermerhorn /* 116bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 117bea904d5SLee Schermerhorn */ 118e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1191da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 120bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 121fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1221da177e4SLinus Torvalds }; 1231da177e4SLinus Torvalds 1245606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1255606e387SMel Gorman 12674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1275606e387SMel Gorman { 1285606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 129f15ca78eSOleg Nesterov int node; 1305606e387SMel Gorman 131f15ca78eSOleg Nesterov if (pol) 132f15ca78eSOleg Nesterov return pol; 1335606e387SMel Gorman 134f15ca78eSOleg Nesterov node = numa_node_id(); 1351da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1361da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 137f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 138f15ca78eSOleg Nesterov if (pol->mode) 139f15ca78eSOleg Nesterov return pol; 1401da6f0e1SJianguo Wu } 1415606e387SMel Gorman 142f15ca78eSOleg Nesterov return &default_policy; 1435606e387SMel Gorman } 1445606e387SMel Gorman 14537012946SDavid Rientjes static const struct mempolicy_operations { 14637012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 147708c1bbcSMiao Xie /* 148708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 149708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 150708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 151708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 152708c1bbcSMiao Xie * page. 153708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 154708c1bbcSMiao Xie * rebind directly. 155708c1bbcSMiao Xie * 156708c1bbcSMiao Xie * step: 157708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 158708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 159708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 160708c1bbcSMiao Xie */ 161708c1bbcSMiao Xie void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, 162708c1bbcSMiao Xie enum mpol_rebind_step step); 16337012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 16437012946SDavid Rientjes 16519770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */ 16637012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask) 1671da177e4SLinus Torvalds { 168d3eb1570SLai Jiangshan return nodes_intersects(*nodemask, node_states[N_MEMORY]); 1691da177e4SLinus Torvalds } 1701da177e4SLinus Torvalds 171f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 172f5b087b5SDavid Rientjes { 1736d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1744c50bc01SDavid Rientjes } 1754c50bc01SDavid Rientjes 1764c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1774c50bc01SDavid Rientjes const nodemask_t *rel) 1784c50bc01SDavid Rientjes { 1794c50bc01SDavid Rientjes nodemask_t tmp; 1804c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1814c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 182f5b087b5SDavid Rientjes } 183f5b087b5SDavid Rientjes 18437012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 18537012946SDavid Rientjes { 18637012946SDavid Rientjes if (nodes_empty(*nodes)) 18737012946SDavid Rientjes return -EINVAL; 18837012946SDavid Rientjes pol->v.nodes = *nodes; 18937012946SDavid Rientjes return 0; 19037012946SDavid Rientjes } 19137012946SDavid Rientjes 19237012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 19337012946SDavid Rientjes { 19437012946SDavid Rientjes if (!nodes) 195fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 19637012946SDavid Rientjes else if (nodes_empty(*nodes)) 19737012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 19837012946SDavid Rientjes else 19937012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 20037012946SDavid Rientjes return 0; 20137012946SDavid Rientjes } 20237012946SDavid Rientjes 20337012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 20437012946SDavid Rientjes { 20537012946SDavid Rientjes if (!is_valid_nodemask(nodes)) 20637012946SDavid Rientjes return -EINVAL; 20737012946SDavid Rientjes pol->v.nodes = *nodes; 20837012946SDavid Rientjes return 0; 20937012946SDavid Rientjes } 21037012946SDavid Rientjes 21158568d2aSMiao Xie /* 21258568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 21358568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 21458568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 21558568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 21658568d2aSMiao Xie * 21758568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 21858568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 21958568d2aSMiao Xie */ 2204bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2214bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22258568d2aSMiao Xie { 22358568d2aSMiao Xie int ret; 22458568d2aSMiao Xie 22558568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 22658568d2aSMiao Xie if (pol == NULL) 22758568d2aSMiao Xie return 0; 22801f13bd6SLai Jiangshan /* Check N_MEMORY */ 2294bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 23001f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 23158568d2aSMiao Xie 23258568d2aSMiao Xie VM_BUG_ON(!nodes); 23358568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 23458568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 23558568d2aSMiao Xie else { 23658568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2374bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1); 23858568d2aSMiao Xie else 2394bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2404bfc4495SKAMEZAWA Hiroyuki 24158568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 24258568d2aSMiao Xie pol->w.user_nodemask = *nodes; 24358568d2aSMiao Xie else 24458568d2aSMiao Xie pol->w.cpuset_mems_allowed = 24558568d2aSMiao Xie cpuset_current_mems_allowed; 24658568d2aSMiao Xie } 24758568d2aSMiao Xie 2484bfc4495SKAMEZAWA Hiroyuki if (nodes) 2494bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2504bfc4495SKAMEZAWA Hiroyuki else 2514bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 25258568d2aSMiao Xie return ret; 25358568d2aSMiao Xie } 25458568d2aSMiao Xie 25558568d2aSMiao Xie /* 25658568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25758568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 25858568d2aSMiao Xie */ 259028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 260028fec41SDavid Rientjes nodemask_t *nodes) 2611da177e4SLinus Torvalds { 2621da177e4SLinus Torvalds struct mempolicy *policy; 2631da177e4SLinus Torvalds 264028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26500ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 266140d5a49SPaul Mundt 2673e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2683e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 26937012946SDavid Rientjes return ERR_PTR(-EINVAL); 270d3a71033SLee Schermerhorn return NULL; 27137012946SDavid Rientjes } 2723e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2733e1f0645SDavid Rientjes 2743e1f0645SDavid Rientjes /* 2753e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2763e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2773e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2783e1f0645SDavid Rientjes */ 2793e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2803e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2813e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2823e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2833e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2843e1f0645SDavid Rientjes } 285479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 286479e2802SPeter Zijlstra if (!nodes_empty(*nodes)) 287479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 288479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2893e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2903e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2911da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2921da177e4SLinus Torvalds if (!policy) 2931da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2941da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 29545c4745aSLee Schermerhorn policy->mode = mode; 29637012946SDavid Rientjes policy->flags = flags; 2973e1f0645SDavid Rientjes 29837012946SDavid Rientjes return policy; 29937012946SDavid Rientjes } 30037012946SDavid Rientjes 30152cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30252cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 30352cd3b07SLee Schermerhorn { 30452cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 30552cd3b07SLee Schermerhorn return; 30652cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 30752cd3b07SLee Schermerhorn } 30852cd3b07SLee Schermerhorn 309708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, 310708c1bbcSMiao Xie enum mpol_rebind_step step) 31137012946SDavid Rientjes { 31237012946SDavid Rientjes } 31337012946SDavid Rientjes 314708c1bbcSMiao Xie /* 315708c1bbcSMiao Xie * step: 316708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 317708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 318708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 319708c1bbcSMiao Xie */ 320708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, 321708c1bbcSMiao Xie enum mpol_rebind_step step) 3221d0d2680SDavid Rientjes { 3231d0d2680SDavid Rientjes nodemask_t tmp; 3241d0d2680SDavid Rientjes 32537012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32637012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32737012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32837012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3291d0d2680SDavid Rientjes else { 330708c1bbcSMiao Xie /* 331708c1bbcSMiao Xie * if step == 1, we use ->w.cpuset_mems_allowed to cache the 332708c1bbcSMiao Xie * result 333708c1bbcSMiao Xie */ 334708c1bbcSMiao Xie if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { 335708c1bbcSMiao Xie nodes_remap(tmp, pol->v.nodes, 336708c1bbcSMiao Xie pol->w.cpuset_mems_allowed, *nodes); 337708c1bbcSMiao Xie pol->w.cpuset_mems_allowed = step ? tmp : *nodes; 338708c1bbcSMiao Xie } else if (step == MPOL_REBIND_STEP2) { 339708c1bbcSMiao Xie tmp = pol->w.cpuset_mems_allowed; 34037012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 341708c1bbcSMiao Xie } else 342708c1bbcSMiao Xie BUG(); 3431d0d2680SDavid Rientjes } 34437012946SDavid Rientjes 345708c1bbcSMiao Xie if (nodes_empty(tmp)) 346708c1bbcSMiao Xie tmp = *nodes; 347708c1bbcSMiao Xie 348708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 349708c1bbcSMiao Xie nodes_or(pol->v.nodes, pol->v.nodes, tmp); 350708c1bbcSMiao Xie else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) 3511d0d2680SDavid Rientjes pol->v.nodes = tmp; 352708c1bbcSMiao Xie else 353708c1bbcSMiao Xie BUG(); 354708c1bbcSMiao Xie 3551d0d2680SDavid Rientjes if (!node_isset(current->il_next, tmp)) { 3561d0d2680SDavid Rientjes current->il_next = next_node(current->il_next, tmp); 3571d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3581d0d2680SDavid Rientjes current->il_next = first_node(tmp); 3591d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3601d0d2680SDavid Rientjes current->il_next = numa_node_id(); 3611d0d2680SDavid Rientjes } 36237012946SDavid Rientjes } 36337012946SDavid Rientjes 36437012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 365708c1bbcSMiao Xie const nodemask_t *nodes, 366708c1bbcSMiao Xie enum mpol_rebind_step step) 36737012946SDavid Rientjes { 36837012946SDavid Rientjes nodemask_t tmp; 36937012946SDavid Rientjes 37037012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3711d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3721d0d2680SDavid Rientjes 373fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3741d0d2680SDavid Rientjes pol->v.preferred_node = node; 375fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 376fc36b8d3SLee Schermerhorn } else 377fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 37837012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 37937012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3801d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 381fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3821d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 38337012946SDavid Rientjes pol->w.cpuset_mems_allowed, 38437012946SDavid Rientjes *nodes); 38537012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3861d0d2680SDavid Rientjes } 3871d0d2680SDavid Rientjes } 38837012946SDavid Rientjes 389708c1bbcSMiao Xie /* 390708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 391708c1bbcSMiao Xie * 392708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 393708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 394708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 395708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 396708c1bbcSMiao Xie * page. 397708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 398708c1bbcSMiao Xie * rebind directly. 399708c1bbcSMiao Xie * 400708c1bbcSMiao Xie * step: 401708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 402708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 403708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 404708c1bbcSMiao Xie */ 405708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, 406708c1bbcSMiao Xie enum mpol_rebind_step step) 40737012946SDavid Rientjes { 40837012946SDavid Rientjes if (!pol) 40937012946SDavid Rientjes return; 41089c522c7SWang Sheng-Hui if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE && 41137012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 41237012946SDavid Rientjes return; 413708c1bbcSMiao Xie 414708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) 415708c1bbcSMiao Xie return; 416708c1bbcSMiao Xie 417708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) 418708c1bbcSMiao Xie BUG(); 419708c1bbcSMiao Xie 420708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 421708c1bbcSMiao Xie pol->flags |= MPOL_F_REBINDING; 422708c1bbcSMiao Xie else if (step == MPOL_REBIND_STEP2) 423708c1bbcSMiao Xie pol->flags &= ~MPOL_F_REBINDING; 424708c1bbcSMiao Xie else if (step >= MPOL_REBIND_NSTEP) 425708c1bbcSMiao Xie BUG(); 426708c1bbcSMiao Xie 427708c1bbcSMiao Xie mpol_ops[pol->mode].rebind(pol, newmask, step); 4281d0d2680SDavid Rientjes } 4291d0d2680SDavid Rientjes 4301d0d2680SDavid Rientjes /* 4311d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 4321d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 43358568d2aSMiao Xie * 43458568d2aSMiao Xie * Called with task's alloc_lock held. 4351d0d2680SDavid Rientjes */ 4361d0d2680SDavid Rientjes 437708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 438708c1bbcSMiao Xie enum mpol_rebind_step step) 4391d0d2680SDavid Rientjes { 440708c1bbcSMiao Xie mpol_rebind_policy(tsk->mempolicy, new, step); 4411d0d2680SDavid Rientjes } 4421d0d2680SDavid Rientjes 4431d0d2680SDavid Rientjes /* 4441d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 4451d0d2680SDavid Rientjes * 4461d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 4471d0d2680SDavid Rientjes */ 4481d0d2680SDavid Rientjes 4491d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 4501d0d2680SDavid Rientjes { 4511d0d2680SDavid Rientjes struct vm_area_struct *vma; 4521d0d2680SDavid Rientjes 4531d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 4541d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 455708c1bbcSMiao Xie mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); 4561d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 4571d0d2680SDavid Rientjes } 4581d0d2680SDavid Rientjes 45937012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 46037012946SDavid Rientjes [MPOL_DEFAULT] = { 46137012946SDavid Rientjes .rebind = mpol_rebind_default, 46237012946SDavid Rientjes }, 46337012946SDavid Rientjes [MPOL_INTERLEAVE] = { 46437012946SDavid Rientjes .create = mpol_new_interleave, 46537012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 46637012946SDavid Rientjes }, 46737012946SDavid Rientjes [MPOL_PREFERRED] = { 46837012946SDavid Rientjes .create = mpol_new_preferred, 46937012946SDavid Rientjes .rebind = mpol_rebind_preferred, 47037012946SDavid Rientjes }, 47137012946SDavid Rientjes [MPOL_BIND] = { 47237012946SDavid Rientjes .create = mpol_new_bind, 47337012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 47437012946SDavid Rientjes }, 47537012946SDavid Rientjes }; 47637012946SDavid Rientjes 477fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 478fc301289SChristoph Lameter unsigned long flags); 4791a75a6c8SChristoph Lameter 48098094945SNaoya Horiguchi /* 48198094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 48298094945SNaoya Horiguchi * and move them to the pagelist if they do. 48398094945SNaoya Horiguchi */ 48498094945SNaoya Horiguchi static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 485dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 486dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 48738e35860SChristoph Lameter void *private) 4881da177e4SLinus Torvalds { 48991612e0dSHugh Dickins pte_t *orig_pte; 49091612e0dSHugh Dickins pte_t *pte; 491705e87c0SHugh Dickins spinlock_t *ptl; 492941150a3SHugh Dickins 493705e87c0SHugh Dickins orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 49491612e0dSHugh Dickins do { 4956aab341eSLinus Torvalds struct page *page; 49625ba77c1SAndy Whitcroft int nid; 49791612e0dSHugh Dickins 49891612e0dSHugh Dickins if (!pte_present(*pte)) 49991612e0dSHugh Dickins continue; 5006aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5016aab341eSLinus Torvalds if (!page) 50291612e0dSHugh Dickins continue; 503053837fcSNick Piggin /* 50462b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 50562b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 506053837fcSNick Piggin */ 507b79bc0a0SHugh Dickins if (PageReserved(page)) 508f4598c8bSChristoph Lameter continue; 5096aab341eSLinus Torvalds nid = page_to_nid(page); 51038e35860SChristoph Lameter if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 51138e35860SChristoph Lameter continue; 51238e35860SChristoph Lameter 513b1f72d18SStephen Wilson if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 514fc301289SChristoph Lameter migrate_page_add(page, private, flags); 515dc9aa5b9SChristoph Lameter else 5161da177e4SLinus Torvalds break; 51791612e0dSHugh Dickins } while (pte++, addr += PAGE_SIZE, addr != end); 518705e87c0SHugh Dickins pte_unmap_unlock(orig_pte, ptl); 51991612e0dSHugh Dickins return addr != end; 52091612e0dSHugh Dickins } 52191612e0dSHugh Dickins 52298094945SNaoya Horiguchi static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, 52398094945SNaoya Horiguchi pmd_t *pmd, const nodemask_t *nodes, unsigned long flags, 524e2d8cf40SNaoya Horiguchi void *private) 525e2d8cf40SNaoya Horiguchi { 526e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 527e2d8cf40SNaoya Horiguchi int nid; 528e2d8cf40SNaoya Horiguchi struct page *page; 529cb900f41SKirill A. Shutemov spinlock_t *ptl; 530d4c54919SNaoya Horiguchi pte_t entry; 531e2d8cf40SNaoya Horiguchi 532cb900f41SKirill A. Shutemov ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd); 533d4c54919SNaoya Horiguchi entry = huge_ptep_get((pte_t *)pmd); 534d4c54919SNaoya Horiguchi if (!pte_present(entry)) 535d4c54919SNaoya Horiguchi goto unlock; 536d4c54919SNaoya Horiguchi page = pte_page(entry); 537e2d8cf40SNaoya Horiguchi nid = page_to_nid(page); 538e2d8cf40SNaoya Horiguchi if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 539e2d8cf40SNaoya Horiguchi goto unlock; 540e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 541e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 542e2d8cf40SNaoya Horiguchi (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 543e2d8cf40SNaoya Horiguchi isolate_huge_page(page, private); 544e2d8cf40SNaoya Horiguchi unlock: 545cb900f41SKirill A. Shutemov spin_unlock(ptl); 546e2d8cf40SNaoya Horiguchi #else 547e2d8cf40SNaoya Horiguchi BUG(); 548e2d8cf40SNaoya Horiguchi #endif 549e2d8cf40SNaoya Horiguchi } 550e2d8cf40SNaoya Horiguchi 55198094945SNaoya Horiguchi static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, 552dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 553dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 55438e35860SChristoph Lameter void *private) 55591612e0dSHugh Dickins { 55691612e0dSHugh Dickins pmd_t *pmd; 55791612e0dSHugh Dickins unsigned long next; 55891612e0dSHugh Dickins 55991612e0dSHugh Dickins pmd = pmd_offset(pud, addr); 56091612e0dSHugh Dickins do { 56191612e0dSHugh Dickins next = pmd_addr_end(addr, end); 562e2d8cf40SNaoya Horiguchi if (!pmd_present(*pmd)) 563e2d8cf40SNaoya Horiguchi continue; 564e2d8cf40SNaoya Horiguchi if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) { 56598094945SNaoya Horiguchi queue_pages_hugetlb_pmd_range(vma, pmd, nodes, 566e2d8cf40SNaoya Horiguchi flags, private); 567e2d8cf40SNaoya Horiguchi continue; 568e2d8cf40SNaoya Horiguchi } 569e180377fSKirill A. Shutemov split_huge_page_pmd(vma, addr, pmd); 5701a5a9906SAndrea Arcangeli if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 57191612e0dSHugh Dickins continue; 57298094945SNaoya Horiguchi if (queue_pages_pte_range(vma, pmd, addr, next, nodes, 57338e35860SChristoph Lameter flags, private)) 57491612e0dSHugh Dickins return -EIO; 57591612e0dSHugh Dickins } while (pmd++, addr = next, addr != end); 57691612e0dSHugh Dickins return 0; 57791612e0dSHugh Dickins } 57891612e0dSHugh Dickins 57998094945SNaoya Horiguchi static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 580dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 581dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 58238e35860SChristoph Lameter void *private) 58391612e0dSHugh Dickins { 58491612e0dSHugh Dickins pud_t *pud; 58591612e0dSHugh Dickins unsigned long next; 58691612e0dSHugh Dickins 58791612e0dSHugh Dickins pud = pud_offset(pgd, addr); 58891612e0dSHugh Dickins do { 58991612e0dSHugh Dickins next = pud_addr_end(addr, end); 590e2d8cf40SNaoya Horiguchi if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) 591e2d8cf40SNaoya Horiguchi continue; 59291612e0dSHugh Dickins if (pud_none_or_clear_bad(pud)) 59391612e0dSHugh Dickins continue; 59498094945SNaoya Horiguchi if (queue_pages_pmd_range(vma, pud, addr, next, nodes, 59538e35860SChristoph Lameter flags, private)) 59691612e0dSHugh Dickins return -EIO; 59791612e0dSHugh Dickins } while (pud++, addr = next, addr != end); 59891612e0dSHugh Dickins return 0; 59991612e0dSHugh Dickins } 60091612e0dSHugh Dickins 60198094945SNaoya Horiguchi static inline int queue_pages_pgd_range(struct vm_area_struct *vma, 602dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 603dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 60438e35860SChristoph Lameter void *private) 60591612e0dSHugh Dickins { 60691612e0dSHugh Dickins pgd_t *pgd; 60791612e0dSHugh Dickins unsigned long next; 60891612e0dSHugh Dickins 609b5810039SNick Piggin pgd = pgd_offset(vma->vm_mm, addr); 61091612e0dSHugh Dickins do { 61191612e0dSHugh Dickins next = pgd_addr_end(addr, end); 61291612e0dSHugh Dickins if (pgd_none_or_clear_bad(pgd)) 61391612e0dSHugh Dickins continue; 61498094945SNaoya Horiguchi if (queue_pages_pud_range(vma, pgd, addr, next, nodes, 61538e35860SChristoph Lameter flags, private)) 61691612e0dSHugh Dickins return -EIO; 61791612e0dSHugh Dickins } while (pgd++, addr = next, addr != end); 61891612e0dSHugh Dickins return 0; 6191da177e4SLinus Torvalds } 6201da177e4SLinus Torvalds 6215877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 622b24f53a0SLee Schermerhorn /* 6234b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6244b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6254b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6264b10e7d5SMel Gorman * 6274b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6284b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6294b10e7d5SMel Gorman * changes to the core. 630b24f53a0SLee Schermerhorn */ 6314b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6324b10e7d5SMel Gorman unsigned long addr, unsigned long end) 633b24f53a0SLee Schermerhorn { 6344b10e7d5SMel Gorman int nr_updated; 635b24f53a0SLee Schermerhorn 6364b10e7d5SMel Gorman nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); 63703c5a6e1SMel Gorman if (nr_updated) 63803c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 639b24f53a0SLee Schermerhorn 6404b10e7d5SMel Gorman return nr_updated; 641b24f53a0SLee Schermerhorn } 642b24f53a0SLee Schermerhorn #else 643b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 644b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 645b24f53a0SLee Schermerhorn { 646b24f53a0SLee Schermerhorn return 0; 647b24f53a0SLee Schermerhorn } 6485877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 649b24f53a0SLee Schermerhorn 650dc9aa5b9SChristoph Lameter /* 65198094945SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 65298094945SNaoya Horiguchi * 65398094945SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 65498094945SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 65598094945SNaoya Horiguchi * passed via @private.) 656dc9aa5b9SChristoph Lameter */ 657d05f0cdcSHugh Dickins static int 65898094945SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 65938e35860SChristoph Lameter const nodemask_t *nodes, unsigned long flags, void *private) 6601da177e4SLinus Torvalds { 661d05f0cdcSHugh Dickins int err = 0; 662d05f0cdcSHugh Dickins struct vm_area_struct *vma, *prev; 6631da177e4SLinus Torvalds 664d05f0cdcSHugh Dickins vma = find_vma(mm, start); 665d05f0cdcSHugh Dickins if (!vma) 666d05f0cdcSHugh Dickins return -EFAULT; 6671da177e4SLinus Torvalds prev = NULL; 668d05f0cdcSHugh Dickins for (; vma && vma->vm_start < end; vma = vma->vm_next) { 6695b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 670dc9aa5b9SChristoph Lameter 6715b952b3cSAndi Kleen if (endvma > end) 6725b952b3cSAndi Kleen endvma = end; 6735b952b3cSAndi Kleen if (vma->vm_start > start) 6745b952b3cSAndi Kleen start = vma->vm_start; 675b24f53a0SLee Schermerhorn 676b24f53a0SLee Schermerhorn if (!(flags & MPOL_MF_DISCONTIG_OK)) { 677b24f53a0SLee Schermerhorn if (!vma->vm_next && vma->vm_end < end) 678d05f0cdcSHugh Dickins return -EFAULT; 679b24f53a0SLee Schermerhorn if (prev && prev->vm_end < vma->vm_start) 680d05f0cdcSHugh Dickins return -EFAULT; 681b24f53a0SLee Schermerhorn } 682b24f53a0SLee Schermerhorn 683b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 684b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 685b24f53a0SLee Schermerhorn goto next; 686b24f53a0SLee Schermerhorn } 687b24f53a0SLee Schermerhorn 688b24f53a0SLee Schermerhorn if ((flags & MPOL_MF_STRICT) || 689b24f53a0SLee Schermerhorn ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && 690b24f53a0SLee Schermerhorn vma_migratable(vma))) { 691b24f53a0SLee Schermerhorn 69298094945SNaoya Horiguchi err = queue_pages_pgd_range(vma, start, endvma, nodes, 69338e35860SChristoph Lameter flags, private); 694d05f0cdcSHugh Dickins if (err) 6951da177e4SLinus Torvalds break; 6961da177e4SLinus Torvalds } 697b24f53a0SLee Schermerhorn next: 6981da177e4SLinus Torvalds prev = vma; 6991da177e4SLinus Torvalds } 700d05f0cdcSHugh Dickins return err; 7011da177e4SLinus Torvalds } 7021da177e4SLinus Torvalds 703869833f2SKOSAKI Motohiro /* 704869833f2SKOSAKI Motohiro * Apply policy to a single VMA 705869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 706869833f2SKOSAKI Motohiro */ 707869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 708869833f2SKOSAKI Motohiro struct mempolicy *pol) 7098d34694cSKOSAKI Motohiro { 710869833f2SKOSAKI Motohiro int err; 711869833f2SKOSAKI Motohiro struct mempolicy *old; 712869833f2SKOSAKI Motohiro struct mempolicy *new; 7138d34694cSKOSAKI Motohiro 7148d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7158d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7168d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7178d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7188d34694cSKOSAKI Motohiro 719869833f2SKOSAKI Motohiro new = mpol_dup(pol); 720869833f2SKOSAKI Motohiro if (IS_ERR(new)) 721869833f2SKOSAKI Motohiro return PTR_ERR(new); 722869833f2SKOSAKI Motohiro 723869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7248d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 725869833f2SKOSAKI Motohiro if (err) 726869833f2SKOSAKI Motohiro goto err_out; 7278d34694cSKOSAKI Motohiro } 728869833f2SKOSAKI Motohiro 729869833f2SKOSAKI Motohiro old = vma->vm_policy; 730869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 731869833f2SKOSAKI Motohiro mpol_put(old); 732869833f2SKOSAKI Motohiro 733869833f2SKOSAKI Motohiro return 0; 734869833f2SKOSAKI Motohiro err_out: 735869833f2SKOSAKI Motohiro mpol_put(new); 7368d34694cSKOSAKI Motohiro return err; 7378d34694cSKOSAKI Motohiro } 7388d34694cSKOSAKI Motohiro 7391da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7409d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7419d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7421da177e4SLinus Torvalds { 7431da177e4SLinus Torvalds struct vm_area_struct *next; 7449d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7459d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7469d8cebd4SKOSAKI Motohiro int err = 0; 747e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7489d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7499d8cebd4SKOSAKI Motohiro unsigned long vmend; 7501da177e4SLinus Torvalds 751097d5910SLinus Torvalds vma = find_vma(mm, start); 7529d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 7539d8cebd4SKOSAKI Motohiro return -EFAULT; 7549d8cebd4SKOSAKI Motohiro 755097d5910SLinus Torvalds prev = vma->vm_prev; 756e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 757e26a5114SKOSAKI Motohiro prev = vma; 758e26a5114SKOSAKI Motohiro 7599d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 7601da177e4SLinus Torvalds next = vma->vm_next; 7619d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 7629d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 7639d8cebd4SKOSAKI Motohiro 764e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 765e26a5114SKOSAKI Motohiro continue; 766e26a5114SKOSAKI Motohiro 767e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 768e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 7699d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 770e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 7718aacc9f5SCaspar Zhang new_pol); 7729d8cebd4SKOSAKI Motohiro if (prev) { 7739d8cebd4SKOSAKI Motohiro vma = prev; 7749d8cebd4SKOSAKI Motohiro next = vma->vm_next; 7753964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 7769d8cebd4SKOSAKI Motohiro continue; 7773964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 7783964acd0SOleg Nesterov goto replace; 7791da177e4SLinus Torvalds } 7809d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 7819d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 7829d8cebd4SKOSAKI Motohiro if (err) 7839d8cebd4SKOSAKI Motohiro goto out; 7849d8cebd4SKOSAKI Motohiro } 7859d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 7869d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 7879d8cebd4SKOSAKI Motohiro if (err) 7889d8cebd4SKOSAKI Motohiro goto out; 7899d8cebd4SKOSAKI Motohiro } 7903964acd0SOleg Nesterov replace: 791869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 7929d8cebd4SKOSAKI Motohiro if (err) 7939d8cebd4SKOSAKI Motohiro goto out; 7949d8cebd4SKOSAKI Motohiro } 7959d8cebd4SKOSAKI Motohiro 7969d8cebd4SKOSAKI Motohiro out: 7971da177e4SLinus Torvalds return err; 7981da177e4SLinus Torvalds } 7991da177e4SLinus Torvalds 8001da177e4SLinus Torvalds /* Set the process memory policy */ 801028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 802028fec41SDavid Rientjes nodemask_t *nodes) 8031da177e4SLinus Torvalds { 80458568d2aSMiao Xie struct mempolicy *new, *old; 8054bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 80658568d2aSMiao Xie int ret; 8071da177e4SLinus Torvalds 8084bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8094bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 810f4e53d91SLee Schermerhorn 8114bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8124bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8134bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8144bfc4495SKAMEZAWA Hiroyuki goto out; 8154bfc4495SKAMEZAWA Hiroyuki } 816*2c7c3a7dSOleg Nesterov 81758568d2aSMiao Xie task_lock(current); 8184bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 81958568d2aSMiao Xie if (ret) { 82058568d2aSMiao Xie task_unlock(current); 82158568d2aSMiao Xie mpol_put(new); 8224bfc4495SKAMEZAWA Hiroyuki goto out; 82358568d2aSMiao Xie } 82458568d2aSMiao Xie old = current->mempolicy; 8251da177e4SLinus Torvalds current->mempolicy = new; 82645c4745aSLee Schermerhorn if (new && new->mode == MPOL_INTERLEAVE && 827f5b087b5SDavid Rientjes nodes_weight(new->v.nodes)) 828dfcd3c0dSAndi Kleen current->il_next = first_node(new->v.nodes); 82958568d2aSMiao Xie task_unlock(current); 83058568d2aSMiao Xie mpol_put(old); 8314bfc4495SKAMEZAWA Hiroyuki ret = 0; 8324bfc4495SKAMEZAWA Hiroyuki out: 8334bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8344bfc4495SKAMEZAWA Hiroyuki return ret; 8351da177e4SLinus Torvalds } 8361da177e4SLinus Torvalds 837bea904d5SLee Schermerhorn /* 838bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 83958568d2aSMiao Xie * 84058568d2aSMiao Xie * Called with task's alloc_lock held 841bea904d5SLee Schermerhorn */ 842bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8431da177e4SLinus Torvalds { 844dfcd3c0dSAndi Kleen nodes_clear(*nodes); 845bea904d5SLee Schermerhorn if (p == &default_policy) 846bea904d5SLee Schermerhorn return; 847bea904d5SLee Schermerhorn 84845c4745aSLee Schermerhorn switch (p->mode) { 84919770b32SMel Gorman case MPOL_BIND: 85019770b32SMel Gorman /* Fall through */ 8511da177e4SLinus Torvalds case MPOL_INTERLEAVE: 852dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 8531da177e4SLinus Torvalds break; 8541da177e4SLinus Torvalds case MPOL_PREFERRED: 855fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 856dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 85753f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 8581da177e4SLinus Torvalds break; 8591da177e4SLinus Torvalds default: 8601da177e4SLinus Torvalds BUG(); 8611da177e4SLinus Torvalds } 8621da177e4SLinus Torvalds } 8631da177e4SLinus Torvalds 8641da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr) 8651da177e4SLinus Torvalds { 8661da177e4SLinus Torvalds struct page *p; 8671da177e4SLinus Torvalds int err; 8681da177e4SLinus Torvalds 8691da177e4SLinus Torvalds err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 8701da177e4SLinus Torvalds if (err >= 0) { 8711da177e4SLinus Torvalds err = page_to_nid(p); 8721da177e4SLinus Torvalds put_page(p); 8731da177e4SLinus Torvalds } 8741da177e4SLinus Torvalds return err; 8751da177e4SLinus Torvalds } 8761da177e4SLinus Torvalds 8771da177e4SLinus Torvalds /* Retrieve NUMA policy */ 878dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8791da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8801da177e4SLinus Torvalds { 8818bccd85fSChristoph Lameter int err; 8821da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8831da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8841da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 8851da177e4SLinus Torvalds 886754af6f5SLee Schermerhorn if (flags & 887754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8881da177e4SLinus Torvalds return -EINVAL; 889754af6f5SLee Schermerhorn 890754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 891754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 892754af6f5SLee Schermerhorn return -EINVAL; 893754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 89458568d2aSMiao Xie task_lock(current); 895754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 89658568d2aSMiao Xie task_unlock(current); 897754af6f5SLee Schermerhorn return 0; 898754af6f5SLee Schermerhorn } 899754af6f5SLee Schermerhorn 9001da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 901bea904d5SLee Schermerhorn /* 902bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 903bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 904bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 905bea904d5SLee Schermerhorn */ 9061da177e4SLinus Torvalds down_read(&mm->mmap_sem); 9071da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 9081da177e4SLinus Torvalds if (!vma) { 9091da177e4SLinus Torvalds up_read(&mm->mmap_sem); 9101da177e4SLinus Torvalds return -EFAULT; 9111da177e4SLinus Torvalds } 9121da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9131da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9141da177e4SLinus Torvalds else 9151da177e4SLinus Torvalds pol = vma->vm_policy; 9161da177e4SLinus Torvalds } else if (addr) 9171da177e4SLinus Torvalds return -EINVAL; 9181da177e4SLinus Torvalds 9191da177e4SLinus Torvalds if (!pol) 920bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9211da177e4SLinus Torvalds 9221da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9231da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9241da177e4SLinus Torvalds err = lookup_node(mm, addr); 9251da177e4SLinus Torvalds if (err < 0) 9261da177e4SLinus Torvalds goto out; 9278bccd85fSChristoph Lameter *policy = err; 9281da177e4SLinus Torvalds } else if (pol == current->mempolicy && 92945c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 9308bccd85fSChristoph Lameter *policy = current->il_next; 9311da177e4SLinus Torvalds } else { 9321da177e4SLinus Torvalds err = -EINVAL; 9331da177e4SLinus Torvalds goto out; 9341da177e4SLinus Torvalds } 935bea904d5SLee Schermerhorn } else { 936bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 937bea904d5SLee Schermerhorn pol->mode; 938d79df630SDavid Rientjes /* 939d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 940d79df630SDavid Rientjes * the policy to userspace. 941d79df630SDavid Rientjes */ 942d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 943bea904d5SLee Schermerhorn } 9441da177e4SLinus Torvalds 9451da177e4SLinus Torvalds if (vma) { 9461da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 9471da177e4SLinus Torvalds vma = NULL; 9481da177e4SLinus Torvalds } 9491da177e4SLinus Torvalds 9501da177e4SLinus Torvalds err = 0; 95158568d2aSMiao Xie if (nmask) { 952c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 953c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 954c6b6ef8bSLee Schermerhorn } else { 95558568d2aSMiao Xie task_lock(current); 956bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 95758568d2aSMiao Xie task_unlock(current); 95858568d2aSMiao Xie } 959c6b6ef8bSLee Schermerhorn } 9601da177e4SLinus Torvalds 9611da177e4SLinus Torvalds out: 96252cd3b07SLee Schermerhorn mpol_cond_put(pol); 9631da177e4SLinus Torvalds if (vma) 9641da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 9651da177e4SLinus Torvalds return err; 9661da177e4SLinus Torvalds } 9671da177e4SLinus Torvalds 968b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 9698bccd85fSChristoph Lameter /* 9706ce3c4c0SChristoph Lameter * page migration 9716ce3c4c0SChristoph Lameter */ 972fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 973fc301289SChristoph Lameter unsigned long flags) 9746ce3c4c0SChristoph Lameter { 9756ce3c4c0SChristoph Lameter /* 976fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9776ce3c4c0SChristoph Lameter */ 97862695a84SNick Piggin if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 97962695a84SNick Piggin if (!isolate_lru_page(page)) { 98062695a84SNick Piggin list_add_tail(&page->lru, pagelist); 9816d9c285aSKOSAKI Motohiro inc_zone_page_state(page, NR_ISOLATED_ANON + 9826d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 98362695a84SNick Piggin } 98462695a84SNick Piggin } 9856ce3c4c0SChristoph Lameter } 9866ce3c4c0SChristoph Lameter 987742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 98895a402c3SChristoph Lameter { 989e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 990e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 991e2d8cf40SNaoya Horiguchi node); 992e2d8cf40SNaoya Horiguchi else 9936484eb3eSMel Gorman return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); 99495a402c3SChristoph Lameter } 99595a402c3SChristoph Lameter 9966ce3c4c0SChristoph Lameter /* 9977e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 9987e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 9997e2ab150SChristoph Lameter */ 1000dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1001dbcb0f19SAdrian Bunk int flags) 10027e2ab150SChristoph Lameter { 10037e2ab150SChristoph Lameter nodemask_t nmask; 10047e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10057e2ab150SChristoph Lameter int err = 0; 10067e2ab150SChristoph Lameter 10077e2ab150SChristoph Lameter nodes_clear(nmask); 10087e2ab150SChristoph Lameter node_set(source, nmask); 10097e2ab150SChristoph Lameter 101008270807SMinchan Kim /* 101108270807SMinchan Kim * This does not "check" the range but isolates all pages that 101208270807SMinchan Kim * need migration. Between passing in the full user address 101308270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 101408270807SMinchan Kim */ 101508270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 101698094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10177e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10187e2ab150SChristoph Lameter 1019cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 102068711a74SDavid Rientjes err = migrate_pages(&pagelist, new_node_page, NULL, dest, 10219c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 1022cf608ac1SMinchan Kim if (err) 1023e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1024cf608ac1SMinchan Kim } 102595a402c3SChristoph Lameter 10267e2ab150SChristoph Lameter return err; 10277e2ab150SChristoph Lameter } 10287e2ab150SChristoph Lameter 10297e2ab150SChristoph Lameter /* 10307e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10317e2ab150SChristoph Lameter * layout as much as possible. 103239743889SChristoph Lameter * 103339743889SChristoph Lameter * Returns the number of page that could not be moved. 103439743889SChristoph Lameter */ 10350ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10360ce72d4fSAndrew Morton const nodemask_t *to, int flags) 103739743889SChristoph Lameter { 10387e2ab150SChristoph Lameter int busy = 0; 10390aedadf9SChristoph Lameter int err; 10407e2ab150SChristoph Lameter nodemask_t tmp; 104139743889SChristoph Lameter 10420aedadf9SChristoph Lameter err = migrate_prep(); 10430aedadf9SChristoph Lameter if (err) 10440aedadf9SChristoph Lameter return err; 10450aedadf9SChristoph Lameter 104639743889SChristoph Lameter down_read(&mm->mmap_sem); 1047d4984711SChristoph Lameter 10480ce72d4fSAndrew Morton err = migrate_vmas(mm, from, to, flags); 10497b2259b3SChristoph Lameter if (err) 10507b2259b3SChristoph Lameter goto out; 10517b2259b3SChristoph Lameter 10527e2ab150SChristoph Lameter /* 10537e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 10547e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 10557e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 10567e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 10577e2ab150SChristoph Lameter * 10587e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 10597e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 10607e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 10617e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 10627e2ab150SChristoph Lameter * 10637e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 10647e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 10657e2ab150SChristoph Lameter * (nothing left to migrate). 10667e2ab150SChristoph Lameter * 10677e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 10687e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 10697e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 10707e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 10717e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 10727e2ab150SChristoph Lameter * 10737e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 10747e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 10757e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 10767e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1077ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 10787e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 10797e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 10807e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 10817e2ab150SChristoph Lameter */ 10827e2ab150SChristoph Lameter 10830ce72d4fSAndrew Morton tmp = *from; 10847e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 10857e2ab150SChristoph Lameter int s,d; 1086b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 10877e2ab150SChristoph Lameter int dest = 0; 10887e2ab150SChristoph Lameter 10897e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 10904a5b18ccSLarry Woodman 10914a5b18ccSLarry Woodman /* 10924a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 10934a5b18ccSLarry Woodman * node relationship of the pages established between 10944a5b18ccSLarry Woodman * threads and memory areas. 10954a5b18ccSLarry Woodman * 10964a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 10974a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 10984a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 10994a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11004a5b18ccSLarry Woodman * mask. 11014a5b18ccSLarry Woodman * 11024a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11034a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11044a5b18ccSLarry Woodman */ 11054a5b18ccSLarry Woodman 11060ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11070ce72d4fSAndrew Morton (node_isset(s, *to))) 11084a5b18ccSLarry Woodman continue; 11094a5b18ccSLarry Woodman 11100ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11117e2ab150SChristoph Lameter if (s == d) 11127e2ab150SChristoph Lameter continue; 11137e2ab150SChristoph Lameter 11147e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11157e2ab150SChristoph Lameter dest = d; 11167e2ab150SChristoph Lameter 11177e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11187e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11197e2ab150SChristoph Lameter break; 11207e2ab150SChristoph Lameter } 1121b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11227e2ab150SChristoph Lameter break; 11237e2ab150SChristoph Lameter 11247e2ab150SChristoph Lameter node_clear(source, tmp); 11257e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11267e2ab150SChristoph Lameter if (err > 0) 11277e2ab150SChristoph Lameter busy += err; 11287e2ab150SChristoph Lameter if (err < 0) 11297e2ab150SChristoph Lameter break; 113039743889SChristoph Lameter } 11317b2259b3SChristoph Lameter out: 113239743889SChristoph Lameter up_read(&mm->mmap_sem); 11337e2ab150SChristoph Lameter if (err < 0) 11347e2ab150SChristoph Lameter return err; 11357e2ab150SChristoph Lameter return busy; 1136b20a3503SChristoph Lameter 113739743889SChristoph Lameter } 113839743889SChristoph Lameter 11393ad33b24SLee Schermerhorn /* 11403ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1141d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 11423ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11433ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11443ad33b24SLee Schermerhorn * is in virtual address order. 11453ad33b24SLee Schermerhorn */ 1146d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 114795a402c3SChristoph Lameter { 1148d05f0cdcSHugh Dickins struct vm_area_struct *vma; 11493ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 115095a402c3SChristoph Lameter 1151d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 11523ad33b24SLee Schermerhorn while (vma) { 11533ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 11543ad33b24SLee Schermerhorn if (address != -EFAULT) 11553ad33b24SLee Schermerhorn break; 11563ad33b24SLee Schermerhorn vma = vma->vm_next; 11573ad33b24SLee Schermerhorn } 11583ad33b24SLee Schermerhorn 115911c731e8SWanpeng Li if (PageHuge(page)) { 1160cc81717eSMichal Hocko BUG_ON(!vma); 116174060e4dSNaoya Horiguchi return alloc_huge_page_noerr(vma, address, 1); 116211c731e8SWanpeng Li } 116311c731e8SWanpeng Li /* 116411c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 116511c731e8SWanpeng Li */ 11663ad33b24SLee Schermerhorn return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 116795a402c3SChristoph Lameter } 1168b20a3503SChristoph Lameter #else 1169b20a3503SChristoph Lameter 1170b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1171b20a3503SChristoph Lameter unsigned long flags) 1172b20a3503SChristoph Lameter { 1173b20a3503SChristoph Lameter } 1174b20a3503SChristoph Lameter 11750ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11760ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1177b20a3503SChristoph Lameter { 1178b20a3503SChristoph Lameter return -ENOSYS; 1179b20a3503SChristoph Lameter } 118095a402c3SChristoph Lameter 1181d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 118295a402c3SChristoph Lameter { 118395a402c3SChristoph Lameter return NULL; 118495a402c3SChristoph Lameter } 1185b20a3503SChristoph Lameter #endif 1186b20a3503SChristoph Lameter 1187dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1188028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1189028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 11906ce3c4c0SChristoph Lameter { 11916ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 11926ce3c4c0SChristoph Lameter struct mempolicy *new; 11936ce3c4c0SChristoph Lameter unsigned long end; 11946ce3c4c0SChristoph Lameter int err; 11956ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 11966ce3c4c0SChristoph Lameter 1197b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 11986ce3c4c0SChristoph Lameter return -EINVAL; 119974c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12006ce3c4c0SChristoph Lameter return -EPERM; 12016ce3c4c0SChristoph Lameter 12026ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12036ce3c4c0SChristoph Lameter return -EINVAL; 12046ce3c4c0SChristoph Lameter 12056ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12066ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12076ce3c4c0SChristoph Lameter 12086ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12096ce3c4c0SChristoph Lameter end = start + len; 12106ce3c4c0SChristoph Lameter 12116ce3c4c0SChristoph Lameter if (end < start) 12126ce3c4c0SChristoph Lameter return -EINVAL; 12136ce3c4c0SChristoph Lameter if (end == start) 12146ce3c4c0SChristoph Lameter return 0; 12156ce3c4c0SChristoph Lameter 1216028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12176ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12186ce3c4c0SChristoph Lameter return PTR_ERR(new); 12196ce3c4c0SChristoph Lameter 1220b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1221b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1222b24f53a0SLee Schermerhorn 12236ce3c4c0SChristoph Lameter /* 12246ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12256ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12266ce3c4c0SChristoph Lameter */ 12276ce3c4c0SChristoph Lameter if (!new) 12286ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12296ce3c4c0SChristoph Lameter 1230028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1231028fec41SDavid Rientjes start, start + len, mode, mode_flags, 123200ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12336ce3c4c0SChristoph Lameter 12340aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12350aedadf9SChristoph Lameter 12360aedadf9SChristoph Lameter err = migrate_prep(); 12370aedadf9SChristoph Lameter if (err) 1238b05ca738SKOSAKI Motohiro goto mpol_out; 12390aedadf9SChristoph Lameter } 12404bfc4495SKAMEZAWA Hiroyuki { 12414bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 12424bfc4495SKAMEZAWA Hiroyuki if (scratch) { 12436ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 124458568d2aSMiao Xie task_lock(current); 12454bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 124658568d2aSMiao Xie task_unlock(current); 12474bfc4495SKAMEZAWA Hiroyuki if (err) 124858568d2aSMiao Xie up_write(&mm->mmap_sem); 12494bfc4495SKAMEZAWA Hiroyuki } else 12504bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 12514bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 12524bfc4495SKAMEZAWA Hiroyuki } 1253b05ca738SKOSAKI Motohiro if (err) 1254b05ca738SKOSAKI Motohiro goto mpol_out; 1255b05ca738SKOSAKI Motohiro 1256d05f0cdcSHugh Dickins err = queue_pages_range(mm, start, end, nmask, 12576ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1258d05f0cdcSHugh Dickins if (!err) 12599d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 12607e2ab150SChristoph Lameter 1261b24f53a0SLee Schermerhorn if (!err) { 1262b24f53a0SLee Schermerhorn int nr_failed = 0; 1263b24f53a0SLee Schermerhorn 1264cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1265b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1266d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1267d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1268cf608ac1SMinchan Kim if (nr_failed) 126974060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1270cf608ac1SMinchan Kim } 12716ce3c4c0SChristoph Lameter 1272b24f53a0SLee Schermerhorn if (nr_failed && (flags & MPOL_MF_STRICT)) 12736ce3c4c0SChristoph Lameter err = -EIO; 1274ab8a3e14SKOSAKI Motohiro } else 1275b0e5fd73SJoonsoo Kim putback_movable_pages(&pagelist); 1276b20a3503SChristoph Lameter 12776ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1278b05ca738SKOSAKI Motohiro mpol_out: 1279f0be3d32SLee Schermerhorn mpol_put(new); 12806ce3c4c0SChristoph Lameter return err; 12816ce3c4c0SChristoph Lameter } 12826ce3c4c0SChristoph Lameter 128339743889SChristoph Lameter /* 12848bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 12858bccd85fSChristoph Lameter */ 12868bccd85fSChristoph Lameter 12878bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 128839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 12898bccd85fSChristoph Lameter unsigned long maxnode) 12908bccd85fSChristoph Lameter { 12918bccd85fSChristoph Lameter unsigned long k; 12928bccd85fSChristoph Lameter unsigned long nlongs; 12938bccd85fSChristoph Lameter unsigned long endmask; 12948bccd85fSChristoph Lameter 12958bccd85fSChristoph Lameter --maxnode; 12968bccd85fSChristoph Lameter nodes_clear(*nodes); 12978bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 12988bccd85fSChristoph Lameter return 0; 1299a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1300636f13c1SChris Wright return -EINVAL; 13018bccd85fSChristoph Lameter 13028bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 13038bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 13048bccd85fSChristoph Lameter endmask = ~0UL; 13058bccd85fSChristoph Lameter else 13068bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 13078bccd85fSChristoph Lameter 13088bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 13098bccd85fSChristoph Lameter if the non supported part is all zero. */ 13108bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 13118bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 13128bccd85fSChristoph Lameter return -EINVAL; 13138bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 13148bccd85fSChristoph Lameter unsigned long t; 13158bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 13168bccd85fSChristoph Lameter return -EFAULT; 13178bccd85fSChristoph Lameter if (k == nlongs - 1) { 13188bccd85fSChristoph Lameter if (t & endmask) 13198bccd85fSChristoph Lameter return -EINVAL; 13208bccd85fSChristoph Lameter } else if (t) 13218bccd85fSChristoph Lameter return -EINVAL; 13228bccd85fSChristoph Lameter } 13238bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 13248bccd85fSChristoph Lameter endmask = ~0UL; 13258bccd85fSChristoph Lameter } 13268bccd85fSChristoph Lameter 13278bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 13288bccd85fSChristoph Lameter return -EFAULT; 13298bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 13308bccd85fSChristoph Lameter return 0; 13318bccd85fSChristoph Lameter } 13328bccd85fSChristoph Lameter 13338bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 13348bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 13358bccd85fSChristoph Lameter nodemask_t *nodes) 13368bccd85fSChristoph Lameter { 13378bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 13388bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 13398bccd85fSChristoph Lameter 13408bccd85fSChristoph Lameter if (copy > nbytes) { 13418bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 13428bccd85fSChristoph Lameter return -EINVAL; 13438bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 13448bccd85fSChristoph Lameter return -EFAULT; 13458bccd85fSChristoph Lameter copy = nbytes; 13468bccd85fSChristoph Lameter } 13478bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 13488bccd85fSChristoph Lameter } 13498bccd85fSChristoph Lameter 1350938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1351f7f28ca9SRasmus Villemoes unsigned long, mode, const unsigned long __user *, nmask, 1352938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 13538bccd85fSChristoph Lameter { 13548bccd85fSChristoph Lameter nodemask_t nodes; 13558bccd85fSChristoph Lameter int err; 1356028fec41SDavid Rientjes unsigned short mode_flags; 13578bccd85fSChristoph Lameter 1358028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1359028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1360a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1361a3b51e01SDavid Rientjes return -EINVAL; 13624c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 13634c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 13644c50bc01SDavid Rientjes return -EINVAL; 13658bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13668bccd85fSChristoph Lameter if (err) 13678bccd85fSChristoph Lameter return err; 1368028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 13698bccd85fSChristoph Lameter } 13708bccd85fSChristoph Lameter 13718bccd85fSChristoph Lameter /* Set the process memory policy */ 137223c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1373938bb9f5SHeiko Carstens unsigned long, maxnode) 13748bccd85fSChristoph Lameter { 13758bccd85fSChristoph Lameter int err; 13768bccd85fSChristoph Lameter nodemask_t nodes; 1377028fec41SDavid Rientjes unsigned short flags; 13788bccd85fSChristoph Lameter 1379028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1380028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1381028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 13828bccd85fSChristoph Lameter return -EINVAL; 13834c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 13844c50bc01SDavid Rientjes return -EINVAL; 13858bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13868bccd85fSChristoph Lameter if (err) 13878bccd85fSChristoph Lameter return err; 1388028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 13898bccd85fSChristoph Lameter } 13908bccd85fSChristoph Lameter 1391938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1392938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1393938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 139439743889SChristoph Lameter { 1395c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 1396596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 139739743889SChristoph Lameter struct task_struct *task; 139839743889SChristoph Lameter nodemask_t task_nodes; 139939743889SChristoph Lameter int err; 1400596d7cfaSKOSAKI Motohiro nodemask_t *old; 1401596d7cfaSKOSAKI Motohiro nodemask_t *new; 1402596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 140339743889SChristoph Lameter 1404596d7cfaSKOSAKI Motohiro if (!scratch) 1405596d7cfaSKOSAKI Motohiro return -ENOMEM; 140639743889SChristoph Lameter 1407596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1408596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1409596d7cfaSKOSAKI Motohiro 1410596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 141139743889SChristoph Lameter if (err) 1412596d7cfaSKOSAKI Motohiro goto out; 1413596d7cfaSKOSAKI Motohiro 1414596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1415596d7cfaSKOSAKI Motohiro if (err) 1416596d7cfaSKOSAKI Motohiro goto out; 141739743889SChristoph Lameter 141839743889SChristoph Lameter /* Find the mm_struct */ 141955cfaa3cSZeng Zhaoming rcu_read_lock(); 1420228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 142139743889SChristoph Lameter if (!task) { 142255cfaa3cSZeng Zhaoming rcu_read_unlock(); 1423596d7cfaSKOSAKI Motohiro err = -ESRCH; 1424596d7cfaSKOSAKI Motohiro goto out; 142539743889SChristoph Lameter } 14263268c63eSChristoph Lameter get_task_struct(task); 142739743889SChristoph Lameter 1428596d7cfaSKOSAKI Motohiro err = -EINVAL; 142939743889SChristoph Lameter 143039743889SChristoph Lameter /* 143139743889SChristoph Lameter * Check if this process has the right to modify the specified 143239743889SChristoph Lameter * process. The right exists if the process has administrative 14337f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 143439743889SChristoph Lameter * userid as the target process. 143539743889SChristoph Lameter */ 1436c69e8d9cSDavid Howells tcred = __task_cred(task); 1437b38a86ebSEric W. Biederman if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1438b38a86ebSEric W. Biederman !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 143974c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1440c69e8d9cSDavid Howells rcu_read_unlock(); 144139743889SChristoph Lameter err = -EPERM; 14423268c63eSChristoph Lameter goto out_put; 144339743889SChristoph Lameter } 1444c69e8d9cSDavid Howells rcu_read_unlock(); 144539743889SChristoph Lameter 144639743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 144739743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1448596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 144939743889SChristoph Lameter err = -EPERM; 14503268c63eSChristoph Lameter goto out_put; 145139743889SChristoph Lameter } 145239743889SChristoph Lameter 145301f13bd6SLai Jiangshan if (!nodes_subset(*new, node_states[N_MEMORY])) { 14543b42d28bSChristoph Lameter err = -EINVAL; 14553268c63eSChristoph Lameter goto out_put; 14563b42d28bSChristoph Lameter } 14573b42d28bSChristoph Lameter 145886c3a764SDavid Quigley err = security_task_movememory(task); 145986c3a764SDavid Quigley if (err) 14603268c63eSChristoph Lameter goto out_put; 146186c3a764SDavid Quigley 14623268c63eSChristoph Lameter mm = get_task_mm(task); 14633268c63eSChristoph Lameter put_task_struct(task); 1464f2a9ef88SSasha Levin 1465f2a9ef88SSasha Levin if (!mm) { 1466f2a9ef88SSasha Levin err = -EINVAL; 1467f2a9ef88SSasha Levin goto out; 1468f2a9ef88SSasha Levin } 1469f2a9ef88SSasha Levin 1470596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 147174c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 14723268c63eSChristoph Lameter 147339743889SChristoph Lameter mmput(mm); 14743268c63eSChristoph Lameter out: 1475596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1476596d7cfaSKOSAKI Motohiro 147739743889SChristoph Lameter return err; 14783268c63eSChristoph Lameter 14793268c63eSChristoph Lameter out_put: 14803268c63eSChristoph Lameter put_task_struct(task); 14813268c63eSChristoph Lameter goto out; 14823268c63eSChristoph Lameter 148339743889SChristoph Lameter } 148439743889SChristoph Lameter 148539743889SChristoph Lameter 14868bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1487938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1488938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1489938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 14908bccd85fSChristoph Lameter { 1491dbcb0f19SAdrian Bunk int err; 1492dbcb0f19SAdrian Bunk int uninitialized_var(pval); 14938bccd85fSChristoph Lameter nodemask_t nodes; 14948bccd85fSChristoph Lameter 14958bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 14968bccd85fSChristoph Lameter return -EINVAL; 14978bccd85fSChristoph Lameter 14988bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 14998bccd85fSChristoph Lameter 15008bccd85fSChristoph Lameter if (err) 15018bccd85fSChristoph Lameter return err; 15028bccd85fSChristoph Lameter 15038bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 15048bccd85fSChristoph Lameter return -EFAULT; 15058bccd85fSChristoph Lameter 15068bccd85fSChristoph Lameter if (nmask) 15078bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 15088bccd85fSChristoph Lameter 15098bccd85fSChristoph Lameter return err; 15108bccd85fSChristoph Lameter } 15118bccd85fSChristoph Lameter 15121da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 15131da177e4SLinus Torvalds 1514c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1515c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1516c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1517c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 15181da177e4SLinus Torvalds { 15191da177e4SLinus Torvalds long err; 15201da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15211da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15221da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15231da177e4SLinus Torvalds 15241da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15251da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15261da177e4SLinus Torvalds 15271da177e4SLinus Torvalds if (nmask) 15281da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 15291da177e4SLinus Torvalds 15301da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 15311da177e4SLinus Torvalds 15321da177e4SLinus Torvalds if (!err && nmask) { 15332bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 15342bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 15352bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 15361da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 15371da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 15381da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 15391da177e4SLinus Torvalds } 15401da177e4SLinus Torvalds 15411da177e4SLinus Torvalds return err; 15421da177e4SLinus Torvalds } 15431da177e4SLinus Torvalds 1544c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1545c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 15461da177e4SLinus Torvalds { 15471da177e4SLinus Torvalds long err = 0; 15481da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15491da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15501da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15511da177e4SLinus Torvalds 15521da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15531da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15541da177e4SLinus Torvalds 15551da177e4SLinus Torvalds if (nmask) { 15561da177e4SLinus Torvalds err = compat_get_bitmap(bm, nmask, nr_bits); 15571da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 15581da177e4SLinus Torvalds err |= copy_to_user(nm, bm, alloc_size); 15591da177e4SLinus Torvalds } 15601da177e4SLinus Torvalds 15611da177e4SLinus Torvalds if (err) 15621da177e4SLinus Torvalds return -EFAULT; 15631da177e4SLinus Torvalds 15641da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 15651da177e4SLinus Torvalds } 15661da177e4SLinus Torvalds 1567c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1568c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1569c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 15701da177e4SLinus Torvalds { 15711da177e4SLinus Torvalds long err = 0; 15721da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15731da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1574dfcd3c0dSAndi Kleen nodemask_t bm; 15751da177e4SLinus Torvalds 15761da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15771da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15781da177e4SLinus Torvalds 15791da177e4SLinus Torvalds if (nmask) { 1580dfcd3c0dSAndi Kleen err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 15811da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 1582dfcd3c0dSAndi Kleen err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 15831da177e4SLinus Torvalds } 15841da177e4SLinus Torvalds 15851da177e4SLinus Torvalds if (err) 15861da177e4SLinus Torvalds return -EFAULT; 15871da177e4SLinus Torvalds 15881da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 15891da177e4SLinus Torvalds } 15901da177e4SLinus Torvalds 15911da177e4SLinus Torvalds #endif 15921da177e4SLinus Torvalds 159374d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 159474d2c3a0SOleg Nesterov unsigned long addr) 15951da177e4SLinus Torvalds { 15968d90274bSOleg Nesterov struct mempolicy *pol = NULL; 15971da177e4SLinus Torvalds 15981da177e4SLinus Torvalds if (vma) { 1599480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 16008d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 160100442ad0SMel Gorman } else if (vma->vm_policy) { 16021da177e4SLinus Torvalds pol = vma->vm_policy; 160300442ad0SMel Gorman 160400442ad0SMel Gorman /* 160500442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 160600442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 160700442ad0SMel Gorman * count on these policies which will be dropped by 160800442ad0SMel Gorman * mpol_cond_put() later 160900442ad0SMel Gorman */ 161000442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 161100442ad0SMel Gorman mpol_get(pol); 161200442ad0SMel Gorman } 16131da177e4SLinus Torvalds } 1614f15ca78eSOleg Nesterov 161574d2c3a0SOleg Nesterov return pol; 161674d2c3a0SOleg Nesterov } 161774d2c3a0SOleg Nesterov 161874d2c3a0SOleg Nesterov /* 161974d2c3a0SOleg Nesterov * get_vma_policy(@task, @vma, @addr) 162074d2c3a0SOleg Nesterov * @task: task for fallback if vma policy == default 162174d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 162274d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 162374d2c3a0SOleg Nesterov * 162474d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 162574d2c3a0SOleg Nesterov * Falls back to @task or system default policy, as necessary. 162674d2c3a0SOleg Nesterov * Current or other task's task mempolicy and non-shared vma policies must be 162774d2c3a0SOleg Nesterov * protected by task_lock(task) by the caller. 162874d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 162974d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 163074d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 163174d2c3a0SOleg Nesterov * extra reference for shared policies. 163274d2c3a0SOleg Nesterov */ 163374d2c3a0SOleg Nesterov struct mempolicy *get_vma_policy(struct task_struct *task, 163474d2c3a0SOleg Nesterov struct vm_area_struct *vma, unsigned long addr) 163574d2c3a0SOleg Nesterov { 163674d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 163774d2c3a0SOleg Nesterov 16388d90274bSOleg Nesterov if (!pol) 16398d90274bSOleg Nesterov pol = get_task_policy(task); 16408d90274bSOleg Nesterov 16411da177e4SLinus Torvalds return pol; 16421da177e4SLinus Torvalds } 16431da177e4SLinus Torvalds 16446b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1645fc314724SMel Gorman { 16466b6482bbSOleg Nesterov struct mempolicy *pol; 1647f15ca78eSOleg Nesterov 1648fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1649fc314724SMel Gorman bool ret = false; 1650fc314724SMel Gorman 1651fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1652fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1653fc314724SMel Gorman ret = true; 1654fc314724SMel Gorman mpol_cond_put(pol); 1655fc314724SMel Gorman 1656fc314724SMel Gorman return ret; 16578d90274bSOleg Nesterov } 16588d90274bSOleg Nesterov 1659fc314724SMel Gorman pol = vma->vm_policy; 16608d90274bSOleg Nesterov if (!pol) 16616b6482bbSOleg Nesterov pol = get_task_policy(current); 1662fc314724SMel Gorman 1663fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1664fc314724SMel Gorman } 1665fc314724SMel Gorman 1666d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1667d3eb1570SLai Jiangshan { 1668d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1669d3eb1570SLai Jiangshan 1670d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1671d3eb1570SLai Jiangshan 1672d3eb1570SLai Jiangshan /* 1673d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1674d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1675d3eb1570SLai Jiangshan * 1676d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1677d3eb1570SLai Jiangshan * so if the following test faile, it implies 1678d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1679d3eb1570SLai Jiangshan */ 1680d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1681d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1682d3eb1570SLai Jiangshan 1683d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1684d3eb1570SLai Jiangshan } 1685d3eb1570SLai Jiangshan 168652cd3b07SLee Schermerhorn /* 168752cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 168852cd3b07SLee Schermerhorn * page allocation 168952cd3b07SLee Schermerhorn */ 169052cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 169119770b32SMel Gorman { 169219770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 169345c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1694d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 169519770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 169619770b32SMel Gorman return &policy->v.nodes; 169719770b32SMel Gorman 169819770b32SMel Gorman return NULL; 169919770b32SMel Gorman } 170019770b32SMel Gorman 170152cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */ 17022f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, 17032f5f9486SAndi Kleen int nd) 17041da177e4SLinus Torvalds { 170545c4745aSLee Schermerhorn switch (policy->mode) { 17061da177e4SLinus Torvalds case MPOL_PREFERRED: 1707fc36b8d3SLee Schermerhorn if (!(policy->flags & MPOL_F_LOCAL)) 17081da177e4SLinus Torvalds nd = policy->v.preferred_node; 17091da177e4SLinus Torvalds break; 17101da177e4SLinus Torvalds case MPOL_BIND: 171119770b32SMel Gorman /* 171252cd3b07SLee Schermerhorn * Normally, MPOL_BIND allocations are node-local within the 171352cd3b07SLee Schermerhorn * allowed nodemask. However, if __GFP_THISNODE is set and the 17146eb27e1fSBob Liu * current node isn't part of the mask, we use the zonelist for 171552cd3b07SLee Schermerhorn * the first node in the mask instead. 171619770b32SMel Gorman */ 171719770b32SMel Gorman if (unlikely(gfp & __GFP_THISNODE) && 171819770b32SMel Gorman unlikely(!node_isset(nd, policy->v.nodes))) 171919770b32SMel Gorman nd = first_node(policy->v.nodes); 172019770b32SMel Gorman break; 17211da177e4SLinus Torvalds default: 17221da177e4SLinus Torvalds BUG(); 17231da177e4SLinus Torvalds } 17240e88460dSMel Gorman return node_zonelist(nd, gfp); 17251da177e4SLinus Torvalds } 17261da177e4SLinus Torvalds 17271da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 17281da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 17291da177e4SLinus Torvalds { 17301da177e4SLinus Torvalds unsigned nid, next; 17311da177e4SLinus Torvalds struct task_struct *me = current; 17321da177e4SLinus Torvalds 17331da177e4SLinus Torvalds nid = me->il_next; 1734dfcd3c0dSAndi Kleen next = next_node(nid, policy->v.nodes); 17351da177e4SLinus Torvalds if (next >= MAX_NUMNODES) 1736dfcd3c0dSAndi Kleen next = first_node(policy->v.nodes); 1737f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 17381da177e4SLinus Torvalds me->il_next = next; 17391da177e4SLinus Torvalds return nid; 17401da177e4SLinus Torvalds } 17411da177e4SLinus Torvalds 1742dc85da15SChristoph Lameter /* 1743dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1744dc85da15SChristoph Lameter * next slab entry. 1745dc85da15SChristoph Lameter */ 17462a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1747dc85da15SChristoph Lameter { 1748e7b691b0SAndi Kleen struct mempolicy *policy; 17492a389610SDavid Rientjes int node = numa_mem_id(); 1750e7b691b0SAndi Kleen 1751e7b691b0SAndi Kleen if (in_interrupt()) 17522a389610SDavid Rientjes return node; 1753e7b691b0SAndi Kleen 1754e7b691b0SAndi Kleen policy = current->mempolicy; 1755fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 17562a389610SDavid Rientjes return node; 1757765c4507SChristoph Lameter 1758bea904d5SLee Schermerhorn switch (policy->mode) { 1759bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1760fc36b8d3SLee Schermerhorn /* 1761fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1762fc36b8d3SLee Schermerhorn */ 1763bea904d5SLee Schermerhorn return policy->v.preferred_node; 1764bea904d5SLee Schermerhorn 1765dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1766dc85da15SChristoph Lameter return interleave_nodes(policy); 1767dc85da15SChristoph Lameter 1768dd1a239fSMel Gorman case MPOL_BIND: { 1769dc85da15SChristoph Lameter /* 1770dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1771dc85da15SChristoph Lameter * first node. 1772dc85da15SChristoph Lameter */ 177319770b32SMel Gorman struct zonelist *zonelist; 177419770b32SMel Gorman struct zone *zone; 177519770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 17762a389610SDavid Rientjes zonelist = &NODE_DATA(node)->node_zonelists[0]; 177719770b32SMel Gorman (void)first_zones_zonelist(zonelist, highest_zoneidx, 177819770b32SMel Gorman &policy->v.nodes, 177919770b32SMel Gorman &zone); 17802a389610SDavid Rientjes return zone ? zone->node : node; 1781dd1a239fSMel Gorman } 1782dc85da15SChristoph Lameter 1783dc85da15SChristoph Lameter default: 1784bea904d5SLee Schermerhorn BUG(); 1785dc85da15SChristoph Lameter } 1786dc85da15SChristoph Lameter } 1787dc85da15SChristoph Lameter 17881da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */ 17891da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 17901da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long off) 17911da177e4SLinus Torvalds { 1792dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1793f5b087b5SDavid Rientjes unsigned target; 17941da177e4SLinus Torvalds int c; 1795b76ac7e7SJianguo Wu int nid = NUMA_NO_NODE; 17961da177e4SLinus Torvalds 1797f5b087b5SDavid Rientjes if (!nnodes) 1798f5b087b5SDavid Rientjes return numa_node_id(); 1799f5b087b5SDavid Rientjes target = (unsigned int)off % nnodes; 18001da177e4SLinus Torvalds c = 0; 18011da177e4SLinus Torvalds do { 1802dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 18031da177e4SLinus Torvalds c++; 18041da177e4SLinus Torvalds } while (c <= target); 18051da177e4SLinus Torvalds return nid; 18061da177e4SLinus Torvalds } 18071da177e4SLinus Torvalds 18085da7ca86SChristoph Lameter /* Determine a node number for interleave */ 18095da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 18105da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 18115da7ca86SChristoph Lameter { 18125da7ca86SChristoph Lameter if (vma) { 18135da7ca86SChristoph Lameter unsigned long off; 18145da7ca86SChristoph Lameter 18153b98b087SNishanth Aravamudan /* 18163b98b087SNishanth Aravamudan * for small pages, there is no difference between 18173b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 18183b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 18193b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 18203b98b087SNishanth Aravamudan * a useful offset. 18213b98b087SNishanth Aravamudan */ 18223b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 18233b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 18245da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 18255da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 18265da7ca86SChristoph Lameter } else 18275da7ca86SChristoph Lameter return interleave_nodes(pol); 18285da7ca86SChristoph Lameter } 18295da7ca86SChristoph Lameter 1830778d3b0fSMichal Hocko /* 1831778d3b0fSMichal Hocko * Return the bit number of a random bit set in the nodemask. 1832b76ac7e7SJianguo Wu * (returns NUMA_NO_NODE if nodemask is empty) 1833778d3b0fSMichal Hocko */ 1834778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp) 1835778d3b0fSMichal Hocko { 1836b76ac7e7SJianguo Wu int w, bit = NUMA_NO_NODE; 1837778d3b0fSMichal Hocko 1838778d3b0fSMichal Hocko w = nodes_weight(*maskp); 1839778d3b0fSMichal Hocko if (w) 1840778d3b0fSMichal Hocko bit = bitmap_ord_to_pos(maskp->bits, 1841778d3b0fSMichal Hocko get_random_int() % w, MAX_NUMNODES); 1842778d3b0fSMichal Hocko return bit; 1843778d3b0fSMichal Hocko } 1844778d3b0fSMichal Hocko 184500ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1846480eccf9SLee Schermerhorn /* 1847480eccf9SLee Schermerhorn * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1848b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1849b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1850b46e14acSFabian Frederick * @gfp_flags: for requested zone 1851b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1852b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1853480eccf9SLee Schermerhorn * 185452cd3b07SLee Schermerhorn * Returns a zonelist suitable for a huge page allocation and a pointer 185552cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 185652cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 185752cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1858c0ff7453SMiao Xie * 1859d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 1860480eccf9SLee Schermerhorn */ 1861396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 186219770b32SMel Gorman gfp_t gfp_flags, struct mempolicy **mpol, 186319770b32SMel Gorman nodemask_t **nodemask) 18645da7ca86SChristoph Lameter { 1865480eccf9SLee Schermerhorn struct zonelist *zl; 18665da7ca86SChristoph Lameter 186752cd3b07SLee Schermerhorn *mpol = get_vma_policy(current, vma, addr); 186819770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 18695da7ca86SChristoph Lameter 187052cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 187152cd3b07SLee Schermerhorn zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1872a5516438SAndi Kleen huge_page_shift(hstate_vma(vma))), gfp_flags); 187352cd3b07SLee Schermerhorn } else { 18742f5f9486SAndi Kleen zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); 187552cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 187652cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1877480eccf9SLee Schermerhorn } 1878480eccf9SLee Schermerhorn return zl; 18795da7ca86SChristoph Lameter } 188006808b08SLee Schermerhorn 188106808b08SLee Schermerhorn /* 188206808b08SLee Schermerhorn * init_nodemask_of_mempolicy 188306808b08SLee Schermerhorn * 188406808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 188506808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 188606808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 188706808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 188806808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 188906808b08SLee Schermerhorn * of non-default mempolicy. 189006808b08SLee Schermerhorn * 189106808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 189206808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 189306808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 189406808b08SLee Schermerhorn * 189506808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 189606808b08SLee Schermerhorn */ 189706808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 189806808b08SLee Schermerhorn { 189906808b08SLee Schermerhorn struct mempolicy *mempolicy; 190006808b08SLee Schermerhorn int nid; 190106808b08SLee Schermerhorn 190206808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 190306808b08SLee Schermerhorn return false; 190406808b08SLee Schermerhorn 1905c0ff7453SMiao Xie task_lock(current); 190606808b08SLee Schermerhorn mempolicy = current->mempolicy; 190706808b08SLee Schermerhorn switch (mempolicy->mode) { 190806808b08SLee Schermerhorn case MPOL_PREFERRED: 190906808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 191006808b08SLee Schermerhorn nid = numa_node_id(); 191106808b08SLee Schermerhorn else 191206808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 191306808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 191406808b08SLee Schermerhorn break; 191506808b08SLee Schermerhorn 191606808b08SLee Schermerhorn case MPOL_BIND: 191706808b08SLee Schermerhorn /* Fall through */ 191806808b08SLee Schermerhorn case MPOL_INTERLEAVE: 191906808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 192006808b08SLee Schermerhorn break; 192106808b08SLee Schermerhorn 192206808b08SLee Schermerhorn default: 192306808b08SLee Schermerhorn BUG(); 192406808b08SLee Schermerhorn } 1925c0ff7453SMiao Xie task_unlock(current); 192606808b08SLee Schermerhorn 192706808b08SLee Schermerhorn return true; 192806808b08SLee Schermerhorn } 192900ac59adSChen, Kenneth W #endif 19305da7ca86SChristoph Lameter 19316f48d0ebSDavid Rientjes /* 19326f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 19336f48d0ebSDavid Rientjes * 19346f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 19356f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 19366f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 19376f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 19386f48d0ebSDavid Rientjes * 19396f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 19406f48d0ebSDavid Rientjes */ 19416f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 19426f48d0ebSDavid Rientjes const nodemask_t *mask) 19436f48d0ebSDavid Rientjes { 19446f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 19456f48d0ebSDavid Rientjes bool ret = true; 19466f48d0ebSDavid Rientjes 19476f48d0ebSDavid Rientjes if (!mask) 19486f48d0ebSDavid Rientjes return ret; 19496f48d0ebSDavid Rientjes task_lock(tsk); 19506f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 19516f48d0ebSDavid Rientjes if (!mempolicy) 19526f48d0ebSDavid Rientjes goto out; 19536f48d0ebSDavid Rientjes 19546f48d0ebSDavid Rientjes switch (mempolicy->mode) { 19556f48d0ebSDavid Rientjes case MPOL_PREFERRED: 19566f48d0ebSDavid Rientjes /* 19576f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 19586f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 19596f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 19606f48d0ebSDavid Rientjes * nodes in mask. 19616f48d0ebSDavid Rientjes */ 19626f48d0ebSDavid Rientjes break; 19636f48d0ebSDavid Rientjes case MPOL_BIND: 19646f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 19656f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 19666f48d0ebSDavid Rientjes break; 19676f48d0ebSDavid Rientjes default: 19686f48d0ebSDavid Rientjes BUG(); 19696f48d0ebSDavid Rientjes } 19706f48d0ebSDavid Rientjes out: 19716f48d0ebSDavid Rientjes task_unlock(tsk); 19726f48d0ebSDavid Rientjes return ret; 19736f48d0ebSDavid Rientjes } 19746f48d0ebSDavid Rientjes 19751da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 19761da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1977662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1978662f3a0bSAndi Kleen unsigned nid) 19791da177e4SLinus Torvalds { 19801da177e4SLinus Torvalds struct zonelist *zl; 19811da177e4SLinus Torvalds struct page *page; 19821da177e4SLinus Torvalds 19830e88460dSMel Gorman zl = node_zonelist(nid, gfp); 19841da177e4SLinus Torvalds page = __alloc_pages(gfp, order, zl); 1985dd1a239fSMel Gorman if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) 1986ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 19871da177e4SLinus Torvalds return page; 19881da177e4SLinus Torvalds } 19891da177e4SLinus Torvalds 19901da177e4SLinus Torvalds /** 19910bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 19921da177e4SLinus Torvalds * 19931da177e4SLinus Torvalds * @gfp: 19941da177e4SLinus Torvalds * %GFP_USER user allocation. 19951da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 19961da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 19971da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 19981da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 19991da177e4SLinus Torvalds * 20000bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 20011da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 20021da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 20031da177e4SLinus Torvalds * 20041da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 20051da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 20061da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 20071da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 20081da177e4SLinus Torvalds * all allocations for pages that will be mapped into 20091da177e4SLinus Torvalds * user space. Returns NULL when no page can be allocated. 20101da177e4SLinus Torvalds * 20111da177e4SLinus Torvalds * Should be called with the mm_sem of the vma hold. 20121da177e4SLinus Torvalds */ 20131da177e4SLinus Torvalds struct page * 20140bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 20152f5f9486SAndi Kleen unsigned long addr, int node) 20161da177e4SLinus Torvalds { 2017cc9a6c87SMel Gorman struct mempolicy *pol; 2018c0ff7453SMiao Xie struct page *page; 2019cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 20201da177e4SLinus Torvalds 2021cc9a6c87SMel Gorman retry_cpuset: 2022cc9a6c87SMel Gorman pol = get_vma_policy(current, vma, addr); 2023d26914d1SMel Gorman cpuset_mems_cookie = read_mems_allowed_begin(); 2024cc9a6c87SMel Gorman 202545c4745aSLee Schermerhorn if (unlikely(pol->mode == MPOL_INTERLEAVE)) { 20261da177e4SLinus Torvalds unsigned nid; 20275da7ca86SChristoph Lameter 20288eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 202952cd3b07SLee Schermerhorn mpol_cond_put(pol); 20300bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2031d26914d1SMel Gorman if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2032cc9a6c87SMel Gorman goto retry_cpuset; 2033cc9a6c87SMel Gorman 2034c0ff7453SMiao Xie return page; 20351da177e4SLinus Torvalds } 2036212a0a6fSDavid Rientjes page = __alloc_pages_nodemask(gfp, order, 2037212a0a6fSDavid Rientjes policy_zonelist(gfp, pol, node), 20380bbbc0b3SAndrea Arcangeli policy_nodemask(gfp, pol)); 20392386740dSOleg Nesterov mpol_cond_put(pol); 2040d26914d1SMel Gorman if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2041cc9a6c87SMel Gorman goto retry_cpuset; 2042c0ff7453SMiao Xie return page; 20431da177e4SLinus Torvalds } 20441da177e4SLinus Torvalds 20451da177e4SLinus Torvalds /** 20461da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 20471da177e4SLinus Torvalds * 20481da177e4SLinus Torvalds * @gfp: 20491da177e4SLinus Torvalds * %GFP_USER user allocation, 20501da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 20511da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 20521da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 20531da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 20541da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 20551da177e4SLinus Torvalds * 20561da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 20571da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 20581da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 20591da177e4SLinus Torvalds * 2060cf2a473cSPaul Jackson * Don't call cpuset_update_task_memory_state() unless 20611da177e4SLinus Torvalds * 1) it's ok to take cpuset_sem (can WAIT), and 20621da177e4SLinus Torvalds * 2) allocating for current task (not interrupt). 20631da177e4SLinus Torvalds */ 2064dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 20651da177e4SLinus Torvalds { 20668d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2067c0ff7453SMiao Xie struct page *page; 2068cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 20691da177e4SLinus Torvalds 20708d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 20718d90274bSOleg Nesterov pol = get_task_policy(current); 207252cd3b07SLee Schermerhorn 2073cc9a6c87SMel Gorman retry_cpuset: 2074d26914d1SMel Gorman cpuset_mems_cookie = read_mems_allowed_begin(); 2075cc9a6c87SMel Gorman 207652cd3b07SLee Schermerhorn /* 207752cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 207852cd3b07SLee Schermerhorn * nor system default_policy 207952cd3b07SLee Schermerhorn */ 208045c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2081c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2082c0ff7453SMiao Xie else 2083c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 20845c4b4be3SAndi Kleen policy_zonelist(gfp, pol, numa_node_id()), 20855c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2086cc9a6c87SMel Gorman 2087d26914d1SMel Gorman if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2088cc9a6c87SMel Gorman goto retry_cpuset; 2089cc9a6c87SMel Gorman 2090c0ff7453SMiao Xie return page; 20911da177e4SLinus Torvalds } 20921da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 20931da177e4SLinus Torvalds 2094ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2095ef0855d3SOleg Nesterov { 2096ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2097ef0855d3SOleg Nesterov 2098ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2099ef0855d3SOleg Nesterov return PTR_ERR(pol); 2100ef0855d3SOleg Nesterov dst->vm_policy = pol; 2101ef0855d3SOleg Nesterov return 0; 2102ef0855d3SOleg Nesterov } 2103ef0855d3SOleg Nesterov 21044225399aSPaul Jackson /* 2105846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 21064225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 21074225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 21084225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 21094225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2110708c1bbcSMiao Xie * 2111708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2112708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 21134225399aSPaul Jackson */ 21144225399aSPaul Jackson 2115846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2116846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 21171da177e4SLinus Torvalds { 21181da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 21191da177e4SLinus Torvalds 21201da177e4SLinus Torvalds if (!new) 21211da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2122708c1bbcSMiao Xie 2123708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2124708c1bbcSMiao Xie if (old == current->mempolicy) { 2125708c1bbcSMiao Xie task_lock(current); 2126708c1bbcSMiao Xie *new = *old; 2127708c1bbcSMiao Xie task_unlock(current); 2128708c1bbcSMiao Xie } else 2129708c1bbcSMiao Xie *new = *old; 2130708c1bbcSMiao Xie 21314225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 21324225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2133708c1bbcSMiao Xie if (new->flags & MPOL_F_REBINDING) 2134708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); 2135708c1bbcSMiao Xie else 2136708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 21374225399aSPaul Jackson } 21381da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 21391da177e4SLinus Torvalds return new; 21401da177e4SLinus Torvalds } 21411da177e4SLinus Torvalds 21421da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2143fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 21441da177e4SLinus Torvalds { 21451da177e4SLinus Torvalds if (!a || !b) 2146fcfb4dccSKOSAKI Motohiro return false; 214745c4745aSLee Schermerhorn if (a->mode != b->mode) 2148fcfb4dccSKOSAKI Motohiro return false; 214919800502SBob Liu if (a->flags != b->flags) 2150fcfb4dccSKOSAKI Motohiro return false; 215119800502SBob Liu if (mpol_store_user_nodemask(a)) 215219800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2153fcfb4dccSKOSAKI Motohiro return false; 215419800502SBob Liu 215545c4745aSLee Schermerhorn switch (a->mode) { 215619770b32SMel Gorman case MPOL_BIND: 215719770b32SMel Gorman /* Fall through */ 21581da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2159fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 21601da177e4SLinus Torvalds case MPOL_PREFERRED: 216175719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 21621da177e4SLinus Torvalds default: 21631da177e4SLinus Torvalds BUG(); 2164fcfb4dccSKOSAKI Motohiro return false; 21651da177e4SLinus Torvalds } 21661da177e4SLinus Torvalds } 21671da177e4SLinus Torvalds 21681da177e4SLinus Torvalds /* 21691da177e4SLinus Torvalds * Shared memory backing store policy support. 21701da177e4SLinus Torvalds * 21711da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 21721da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 21731da177e4SLinus Torvalds * They are protected by the sp->lock spinlock, which should be held 21741da177e4SLinus Torvalds * for any accesses to the tree. 21751da177e4SLinus Torvalds */ 21761da177e4SLinus Torvalds 21771da177e4SLinus Torvalds /* lookup first element intersecting start-end */ 217842288fe3SMel Gorman /* Caller holds sp->lock */ 21791da177e4SLinus Torvalds static struct sp_node * 21801da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 21811da177e4SLinus Torvalds { 21821da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 21831da177e4SLinus Torvalds 21841da177e4SLinus Torvalds while (n) { 21851da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 21861da177e4SLinus Torvalds 21871da177e4SLinus Torvalds if (start >= p->end) 21881da177e4SLinus Torvalds n = n->rb_right; 21891da177e4SLinus Torvalds else if (end <= p->start) 21901da177e4SLinus Torvalds n = n->rb_left; 21911da177e4SLinus Torvalds else 21921da177e4SLinus Torvalds break; 21931da177e4SLinus Torvalds } 21941da177e4SLinus Torvalds if (!n) 21951da177e4SLinus Torvalds return NULL; 21961da177e4SLinus Torvalds for (;;) { 21971da177e4SLinus Torvalds struct sp_node *w = NULL; 21981da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 21991da177e4SLinus Torvalds if (!prev) 22001da177e4SLinus Torvalds break; 22011da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 22021da177e4SLinus Torvalds if (w->end <= start) 22031da177e4SLinus Torvalds break; 22041da177e4SLinus Torvalds n = prev; 22051da177e4SLinus Torvalds } 22061da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 22071da177e4SLinus Torvalds } 22081da177e4SLinus Torvalds 22091da177e4SLinus Torvalds /* Insert a new shared policy into the list. */ 22101da177e4SLinus Torvalds /* Caller holds sp->lock */ 22111da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 22121da177e4SLinus Torvalds { 22131da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 22141da177e4SLinus Torvalds struct rb_node *parent = NULL; 22151da177e4SLinus Torvalds struct sp_node *nd; 22161da177e4SLinus Torvalds 22171da177e4SLinus Torvalds while (*p) { 22181da177e4SLinus Torvalds parent = *p; 22191da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 22201da177e4SLinus Torvalds if (new->start < nd->start) 22211da177e4SLinus Torvalds p = &(*p)->rb_left; 22221da177e4SLinus Torvalds else if (new->end > nd->end) 22231da177e4SLinus Torvalds p = &(*p)->rb_right; 22241da177e4SLinus Torvalds else 22251da177e4SLinus Torvalds BUG(); 22261da177e4SLinus Torvalds } 22271da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 22281da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2229140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 223045c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 22311da177e4SLinus Torvalds } 22321da177e4SLinus Torvalds 22331da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 22341da177e4SLinus Torvalds struct mempolicy * 22351da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 22361da177e4SLinus Torvalds { 22371da177e4SLinus Torvalds struct mempolicy *pol = NULL; 22381da177e4SLinus Torvalds struct sp_node *sn; 22391da177e4SLinus Torvalds 22401da177e4SLinus Torvalds if (!sp->root.rb_node) 22411da177e4SLinus Torvalds return NULL; 224242288fe3SMel Gorman spin_lock(&sp->lock); 22431da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 22441da177e4SLinus Torvalds if (sn) { 22451da177e4SLinus Torvalds mpol_get(sn->policy); 22461da177e4SLinus Torvalds pol = sn->policy; 22471da177e4SLinus Torvalds } 224842288fe3SMel Gorman spin_unlock(&sp->lock); 22491da177e4SLinus Torvalds return pol; 22501da177e4SLinus Torvalds } 22511da177e4SLinus Torvalds 225263f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 225363f74ca2SKOSAKI Motohiro { 225463f74ca2SKOSAKI Motohiro mpol_put(n->policy); 225563f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 225663f74ca2SKOSAKI Motohiro } 225763f74ca2SKOSAKI Motohiro 2258771fb4d8SLee Schermerhorn /** 2259771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2260771fb4d8SLee Schermerhorn * 2261b46e14acSFabian Frederick * @page: page to be checked 2262b46e14acSFabian Frederick * @vma: vm area where page mapped 2263b46e14acSFabian Frederick * @addr: virtual address where page mapped 2264771fb4d8SLee Schermerhorn * 2265771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2266771fb4d8SLee Schermerhorn * node id. 2267771fb4d8SLee Schermerhorn * 2268771fb4d8SLee Schermerhorn * Returns: 2269771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2270771fb4d8SLee Schermerhorn * node - node id where the page should be 2271771fb4d8SLee Schermerhorn * 2272771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2273771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2274771fb4d8SLee Schermerhorn */ 2275771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2276771fb4d8SLee Schermerhorn { 2277771fb4d8SLee Schermerhorn struct mempolicy *pol; 2278771fb4d8SLee Schermerhorn struct zone *zone; 2279771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2280771fb4d8SLee Schermerhorn unsigned long pgoff; 228190572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 228290572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 2283771fb4d8SLee Schermerhorn int polnid = -1; 2284771fb4d8SLee Schermerhorn int ret = -1; 2285771fb4d8SLee Schermerhorn 2286771fb4d8SLee Schermerhorn BUG_ON(!vma); 2287771fb4d8SLee Schermerhorn 2288771fb4d8SLee Schermerhorn pol = get_vma_policy(current, vma, addr); 2289771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2290771fb4d8SLee Schermerhorn goto out; 2291771fb4d8SLee Schermerhorn 2292771fb4d8SLee Schermerhorn switch (pol->mode) { 2293771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2294771fb4d8SLee Schermerhorn BUG_ON(addr >= vma->vm_end); 2295771fb4d8SLee Schermerhorn BUG_ON(addr < vma->vm_start); 2296771fb4d8SLee Schermerhorn 2297771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2298771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2299771fb4d8SLee Schermerhorn polnid = offset_il_node(pol, vma, pgoff); 2300771fb4d8SLee Schermerhorn break; 2301771fb4d8SLee Schermerhorn 2302771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2303771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2304771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2305771fb4d8SLee Schermerhorn else 2306771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2307771fb4d8SLee Schermerhorn break; 2308771fb4d8SLee Schermerhorn 2309771fb4d8SLee Schermerhorn case MPOL_BIND: 2310771fb4d8SLee Schermerhorn /* 2311771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2312771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2313771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2314771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2315771fb4d8SLee Schermerhorn */ 2316771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2317771fb4d8SLee Schermerhorn goto out; 2318771fb4d8SLee Schermerhorn (void)first_zones_zonelist( 2319771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2320771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2321771fb4d8SLee Schermerhorn &pol->v.nodes, &zone); 2322771fb4d8SLee Schermerhorn polnid = zone->node; 2323771fb4d8SLee Schermerhorn break; 2324771fb4d8SLee Schermerhorn 2325771fb4d8SLee Schermerhorn default: 2326771fb4d8SLee Schermerhorn BUG(); 2327771fb4d8SLee Schermerhorn } 23285606e387SMel Gorman 23295606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2330e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 233190572890SPeter Zijlstra polnid = thisnid; 23325606e387SMel Gorman 233310f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2334de1c9ce6SRik van Riel goto out; 2335de1c9ce6SRik van Riel } 2336e42c8ff2SMel Gorman 2337771fb4d8SLee Schermerhorn if (curnid != polnid) 2338771fb4d8SLee Schermerhorn ret = polnid; 2339771fb4d8SLee Schermerhorn out: 2340771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2341771fb4d8SLee Schermerhorn 2342771fb4d8SLee Schermerhorn return ret; 2343771fb4d8SLee Schermerhorn } 2344771fb4d8SLee Schermerhorn 23451da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 23461da177e4SLinus Torvalds { 2347140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 23481da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 234963f74ca2SKOSAKI Motohiro sp_free(n); 23501da177e4SLinus Torvalds } 23511da177e4SLinus Torvalds 235242288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 235342288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 235442288fe3SMel Gorman { 235542288fe3SMel Gorman node->start = start; 235642288fe3SMel Gorman node->end = end; 235742288fe3SMel Gorman node->policy = pol; 235842288fe3SMel Gorman } 235942288fe3SMel Gorman 2360dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2361dbcb0f19SAdrian Bunk struct mempolicy *pol) 23621da177e4SLinus Torvalds { 2363869833f2SKOSAKI Motohiro struct sp_node *n; 2364869833f2SKOSAKI Motohiro struct mempolicy *newpol; 23651da177e4SLinus Torvalds 2366869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 23671da177e4SLinus Torvalds if (!n) 23681da177e4SLinus Torvalds return NULL; 2369869833f2SKOSAKI Motohiro 2370869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2371869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2372869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2373869833f2SKOSAKI Motohiro return NULL; 2374869833f2SKOSAKI Motohiro } 2375869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 237642288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2377869833f2SKOSAKI Motohiro 23781da177e4SLinus Torvalds return n; 23791da177e4SLinus Torvalds } 23801da177e4SLinus Torvalds 23811da177e4SLinus Torvalds /* Replace a policy range. */ 23821da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 23831da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 23841da177e4SLinus Torvalds { 2385b22d127aSMel Gorman struct sp_node *n; 238642288fe3SMel Gorman struct sp_node *n_new = NULL; 238742288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2388b22d127aSMel Gorman int ret = 0; 23891da177e4SLinus Torvalds 239042288fe3SMel Gorman restart: 239142288fe3SMel Gorman spin_lock(&sp->lock); 23921da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 23931da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 23941da177e4SLinus Torvalds while (n && n->start < end) { 23951da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 23961da177e4SLinus Torvalds if (n->start >= start) { 23971da177e4SLinus Torvalds if (n->end <= end) 23981da177e4SLinus Torvalds sp_delete(sp, n); 23991da177e4SLinus Torvalds else 24001da177e4SLinus Torvalds n->start = end; 24011da177e4SLinus Torvalds } else { 24021da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 24031da177e4SLinus Torvalds if (n->end > end) { 240442288fe3SMel Gorman if (!n_new) 240542288fe3SMel Gorman goto alloc_new; 240642288fe3SMel Gorman 240742288fe3SMel Gorman *mpol_new = *n->policy; 240842288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 24097880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 24101da177e4SLinus Torvalds n->end = start; 24115ca39575SHillf Danton sp_insert(sp, n_new); 241242288fe3SMel Gorman n_new = NULL; 241342288fe3SMel Gorman mpol_new = NULL; 24141da177e4SLinus Torvalds break; 24151da177e4SLinus Torvalds } else 24161da177e4SLinus Torvalds n->end = start; 24171da177e4SLinus Torvalds } 24181da177e4SLinus Torvalds if (!next) 24191da177e4SLinus Torvalds break; 24201da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 24211da177e4SLinus Torvalds } 24221da177e4SLinus Torvalds if (new) 24231da177e4SLinus Torvalds sp_insert(sp, new); 242442288fe3SMel Gorman spin_unlock(&sp->lock); 242542288fe3SMel Gorman ret = 0; 242642288fe3SMel Gorman 242742288fe3SMel Gorman err_out: 242842288fe3SMel Gorman if (mpol_new) 242942288fe3SMel Gorman mpol_put(mpol_new); 243042288fe3SMel Gorman if (n_new) 243142288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 243242288fe3SMel Gorman 2433b22d127aSMel Gorman return ret; 243442288fe3SMel Gorman 243542288fe3SMel Gorman alloc_new: 243642288fe3SMel Gorman spin_unlock(&sp->lock); 243742288fe3SMel Gorman ret = -ENOMEM; 243842288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 243942288fe3SMel Gorman if (!n_new) 244042288fe3SMel Gorman goto err_out; 244142288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 244242288fe3SMel Gorman if (!mpol_new) 244342288fe3SMel Gorman goto err_out; 244442288fe3SMel Gorman goto restart; 24451da177e4SLinus Torvalds } 24461da177e4SLinus Torvalds 244771fe804bSLee Schermerhorn /** 244871fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 244971fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 245071fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 245171fe804bSLee Schermerhorn * 245271fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 245371fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 245471fe804bSLee Schermerhorn * This must be released on exit. 24554bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 245671fe804bSLee Schermerhorn */ 245771fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 24587339ff83SRobin Holt { 245958568d2aSMiao Xie int ret; 246058568d2aSMiao Xie 246171fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 246242288fe3SMel Gorman spin_lock_init(&sp->lock); 24637339ff83SRobin Holt 246471fe804bSLee Schermerhorn if (mpol) { 24657339ff83SRobin Holt struct vm_area_struct pvma; 246671fe804bSLee Schermerhorn struct mempolicy *new; 24674bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 24687339ff83SRobin Holt 24694bfc4495SKAMEZAWA Hiroyuki if (!scratch) 24705c0c1654SLee Schermerhorn goto put_mpol; 247171fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 247271fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 247315d77835SLee Schermerhorn if (IS_ERR(new)) 24740cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 247558568d2aSMiao Xie 247658568d2aSMiao Xie task_lock(current); 24774bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 247858568d2aSMiao Xie task_unlock(current); 247915d77835SLee Schermerhorn if (ret) 24805c0c1654SLee Schermerhorn goto put_new; 248171fe804bSLee Schermerhorn 248271fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 24837339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 248471fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 248571fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 248615d77835SLee Schermerhorn 24875c0c1654SLee Schermerhorn put_new: 248871fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 24890cae3457SDan Carpenter free_scratch: 24904bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 24915c0c1654SLee Schermerhorn put_mpol: 24925c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 24937339ff83SRobin Holt } 24947339ff83SRobin Holt } 24957339ff83SRobin Holt 24961da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 24971da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 24981da177e4SLinus Torvalds { 24991da177e4SLinus Torvalds int err; 25001da177e4SLinus Torvalds struct sp_node *new = NULL; 25011da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 25021da177e4SLinus Torvalds 2503028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 25041da177e4SLinus Torvalds vma->vm_pgoff, 250545c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2506028fec41SDavid Rientjes npol ? npol->flags : -1, 250700ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 25081da177e4SLinus Torvalds 25091da177e4SLinus Torvalds if (npol) { 25101da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 25111da177e4SLinus Torvalds if (!new) 25121da177e4SLinus Torvalds return -ENOMEM; 25131da177e4SLinus Torvalds } 25141da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 25151da177e4SLinus Torvalds if (err && new) 251663f74ca2SKOSAKI Motohiro sp_free(new); 25171da177e4SLinus Torvalds return err; 25181da177e4SLinus Torvalds } 25191da177e4SLinus Torvalds 25201da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 25211da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 25221da177e4SLinus Torvalds { 25231da177e4SLinus Torvalds struct sp_node *n; 25241da177e4SLinus Torvalds struct rb_node *next; 25251da177e4SLinus Torvalds 25261da177e4SLinus Torvalds if (!p->root.rb_node) 25271da177e4SLinus Torvalds return; 252842288fe3SMel Gorman spin_lock(&p->lock); 25291da177e4SLinus Torvalds next = rb_first(&p->root); 25301da177e4SLinus Torvalds while (next) { 25311da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25321da177e4SLinus Torvalds next = rb_next(&n->nd); 253363f74ca2SKOSAKI Motohiro sp_delete(p, n); 25341da177e4SLinus Torvalds } 253542288fe3SMel Gorman spin_unlock(&p->lock); 25361da177e4SLinus Torvalds } 25371da177e4SLinus Torvalds 25381a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2539c297663cSMel Gorman static int __initdata numabalancing_override; 25401a687c2eSMel Gorman 25411a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 25421a687c2eSMel Gorman { 25431a687c2eSMel Gorman bool numabalancing_default = false; 25441a687c2eSMel Gorman 25451a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 25461a687c2eSMel Gorman numabalancing_default = true; 25471a687c2eSMel Gorman 2548c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2549c297663cSMel Gorman if (numabalancing_override) 2550c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2551c297663cSMel Gorman 25521a687c2eSMel Gorman if (nr_node_ids > 1 && !numabalancing_override) { 25534a404beaSAndrew Morton pr_info("%s automatic NUMA balancing. " 2554c297663cSMel Gorman "Configure with numa_balancing= or the " 2555c297663cSMel Gorman "kernel.numa_balancing sysctl", 2556c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 25571a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 25581a687c2eSMel Gorman } 25591a687c2eSMel Gorman } 25601a687c2eSMel Gorman 25611a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 25621a687c2eSMel Gorman { 25631a687c2eSMel Gorman int ret = 0; 25641a687c2eSMel Gorman if (!str) 25651a687c2eSMel Gorman goto out; 25661a687c2eSMel Gorman 25671a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2568c297663cSMel Gorman numabalancing_override = 1; 25691a687c2eSMel Gorman ret = 1; 25701a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2571c297663cSMel Gorman numabalancing_override = -1; 25721a687c2eSMel Gorman ret = 1; 25731a687c2eSMel Gorman } 25741a687c2eSMel Gorman out: 25751a687c2eSMel Gorman if (!ret) 25764a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 25771a687c2eSMel Gorman 25781a687c2eSMel Gorman return ret; 25791a687c2eSMel Gorman } 25801a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 25811a687c2eSMel Gorman #else 25821a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 25831a687c2eSMel Gorman { 25841a687c2eSMel Gorman } 25851a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 25861a687c2eSMel Gorman 25871da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 25881da177e4SLinus Torvalds void __init numa_policy_init(void) 25891da177e4SLinus Torvalds { 2590b71636e2SPaul Mundt nodemask_t interleave_nodes; 2591b71636e2SPaul Mundt unsigned long largest = 0; 2592b71636e2SPaul Mundt int nid, prefer = 0; 2593b71636e2SPaul Mundt 25941da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 25951da177e4SLinus Torvalds sizeof(struct mempolicy), 259620c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 25971da177e4SLinus Torvalds 25981da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 25991da177e4SLinus Torvalds sizeof(struct sp_node), 260020c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 26011da177e4SLinus Torvalds 26025606e387SMel Gorman for_each_node(nid) { 26035606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 26045606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 26055606e387SMel Gorman .mode = MPOL_PREFERRED, 26065606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 26075606e387SMel Gorman .v = { .preferred_node = nid, }, 26085606e387SMel Gorman }; 26095606e387SMel Gorman } 26105606e387SMel Gorman 2611b71636e2SPaul Mundt /* 2612b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2613b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2614b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2615b71636e2SPaul Mundt */ 2616b71636e2SPaul Mundt nodes_clear(interleave_nodes); 261701f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2618b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 26191da177e4SLinus Torvalds 2620b71636e2SPaul Mundt /* Preserve the largest node */ 2621b71636e2SPaul Mundt if (largest < total_pages) { 2622b71636e2SPaul Mundt largest = total_pages; 2623b71636e2SPaul Mundt prefer = nid; 2624b71636e2SPaul Mundt } 2625b71636e2SPaul Mundt 2626b71636e2SPaul Mundt /* Interleave this node? */ 2627b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2628b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2629b71636e2SPaul Mundt } 2630b71636e2SPaul Mundt 2631b71636e2SPaul Mundt /* All too small, use the largest */ 2632b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2633b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2634b71636e2SPaul Mundt 2635028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2636b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 26371a687c2eSMel Gorman 26381a687c2eSMel Gorman check_numabalancing_enable(); 26391da177e4SLinus Torvalds } 26401da177e4SLinus Torvalds 26418bccd85fSChristoph Lameter /* Reset policy of current process to default */ 26421da177e4SLinus Torvalds void numa_default_policy(void) 26431da177e4SLinus Torvalds { 2644028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 26451da177e4SLinus Torvalds } 264668860ec1SPaul Jackson 26474225399aSPaul Jackson /* 2648095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2649095f1fc4SLee Schermerhorn */ 2650095f1fc4SLee Schermerhorn 2651095f1fc4SLee Schermerhorn /* 2652f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 26531a75a6c8SChristoph Lameter */ 2654345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2655345ace9cSLee Schermerhorn { 2656345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2657345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2658345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2659345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2660d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2661345ace9cSLee Schermerhorn }; 26621a75a6c8SChristoph Lameter 2663095f1fc4SLee Schermerhorn 2664095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2665095f1fc4SLee Schermerhorn /** 2666f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2667095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 266871fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2669095f1fc4SLee Schermerhorn * 2670095f1fc4SLee Schermerhorn * Format of input: 2671095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2672095f1fc4SLee Schermerhorn * 267371fe804bSLee Schermerhorn * On success, returns 0, else 1 2674095f1fc4SLee Schermerhorn */ 2675a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2676095f1fc4SLee Schermerhorn { 267771fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2678b4652e84SLee Schermerhorn unsigned short mode; 2679f2a07f40SHugh Dickins unsigned short mode_flags; 268071fe804bSLee Schermerhorn nodemask_t nodes; 2681095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2682095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2683095f1fc4SLee Schermerhorn int err = 1; 2684095f1fc4SLee Schermerhorn 2685095f1fc4SLee Schermerhorn if (nodelist) { 2686095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2687095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 268871fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2689095f1fc4SLee Schermerhorn goto out; 269001f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2691095f1fc4SLee Schermerhorn goto out; 269271fe804bSLee Schermerhorn } else 269371fe804bSLee Schermerhorn nodes_clear(nodes); 269471fe804bSLee Schermerhorn 2695095f1fc4SLee Schermerhorn if (flags) 2696095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2697095f1fc4SLee Schermerhorn 2698479e2802SPeter Zijlstra for (mode = 0; mode < MPOL_MAX; mode++) { 2699345ace9cSLee Schermerhorn if (!strcmp(str, policy_modes[mode])) { 2700095f1fc4SLee Schermerhorn break; 2701095f1fc4SLee Schermerhorn } 2702095f1fc4SLee Schermerhorn } 2703a720094dSMel Gorman if (mode >= MPOL_MAX) 2704095f1fc4SLee Schermerhorn goto out; 2705095f1fc4SLee Schermerhorn 270671fe804bSLee Schermerhorn switch (mode) { 2707095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 270871fe804bSLee Schermerhorn /* 270971fe804bSLee Schermerhorn * Insist on a nodelist of one node only 271071fe804bSLee Schermerhorn */ 2711095f1fc4SLee Schermerhorn if (nodelist) { 2712095f1fc4SLee Schermerhorn char *rest = nodelist; 2713095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2714095f1fc4SLee Schermerhorn rest++; 2715926f2ae0SKOSAKI Motohiro if (*rest) 2716926f2ae0SKOSAKI Motohiro goto out; 2717095f1fc4SLee Schermerhorn } 2718095f1fc4SLee Schermerhorn break; 2719095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2720095f1fc4SLee Schermerhorn /* 2721095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2722095f1fc4SLee Schermerhorn */ 2723095f1fc4SLee Schermerhorn if (!nodelist) 272401f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 27253f226aa1SLee Schermerhorn break; 272671fe804bSLee Schermerhorn case MPOL_LOCAL: 27273f226aa1SLee Schermerhorn /* 272871fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 27293f226aa1SLee Schermerhorn */ 273071fe804bSLee Schermerhorn if (nodelist) 27313f226aa1SLee Schermerhorn goto out; 273271fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 27333f226aa1SLee Schermerhorn break; 2734413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2735413b43deSRavikiran G Thirumalai /* 2736413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2737413b43deSRavikiran G Thirumalai */ 2738413b43deSRavikiran G Thirumalai if (!nodelist) 2739413b43deSRavikiran G Thirumalai err = 0; 2740413b43deSRavikiran G Thirumalai goto out; 2741d69b2e63SKOSAKI Motohiro case MPOL_BIND: 274271fe804bSLee Schermerhorn /* 2743d69b2e63SKOSAKI Motohiro * Insist on a nodelist 274471fe804bSLee Schermerhorn */ 2745d69b2e63SKOSAKI Motohiro if (!nodelist) 2746d69b2e63SKOSAKI Motohiro goto out; 2747095f1fc4SLee Schermerhorn } 2748095f1fc4SLee Schermerhorn 274971fe804bSLee Schermerhorn mode_flags = 0; 2750095f1fc4SLee Schermerhorn if (flags) { 2751095f1fc4SLee Schermerhorn /* 2752095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2753095f1fc4SLee Schermerhorn * mode flags. 2754095f1fc4SLee Schermerhorn */ 2755095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 275671fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2757095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 275871fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2759095f1fc4SLee Schermerhorn else 2760926f2ae0SKOSAKI Motohiro goto out; 2761095f1fc4SLee Schermerhorn } 276271fe804bSLee Schermerhorn 276371fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 276471fe804bSLee Schermerhorn if (IS_ERR(new)) 2765926f2ae0SKOSAKI Motohiro goto out; 2766926f2ae0SKOSAKI Motohiro 2767f2a07f40SHugh Dickins /* 2768f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2769f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2770f2a07f40SHugh Dickins */ 2771f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2772f2a07f40SHugh Dickins new->v.nodes = nodes; 2773f2a07f40SHugh Dickins else if (nodelist) 2774f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2775f2a07f40SHugh Dickins else 2776f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2777f2a07f40SHugh Dickins 2778f2a07f40SHugh Dickins /* 2779f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2780f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2781f2a07f40SHugh Dickins */ 2782e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2783f2a07f40SHugh Dickins 2784926f2ae0SKOSAKI Motohiro err = 0; 278571fe804bSLee Schermerhorn 2786095f1fc4SLee Schermerhorn out: 2787095f1fc4SLee Schermerhorn /* Restore string for error message */ 2788095f1fc4SLee Schermerhorn if (nodelist) 2789095f1fc4SLee Schermerhorn *--nodelist = ':'; 2790095f1fc4SLee Schermerhorn if (flags) 2791095f1fc4SLee Schermerhorn *--flags = '='; 279271fe804bSLee Schermerhorn if (!err) 279371fe804bSLee Schermerhorn *mpol = new; 2794095f1fc4SLee Schermerhorn return err; 2795095f1fc4SLee Schermerhorn } 2796095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2797095f1fc4SLee Schermerhorn 279871fe804bSLee Schermerhorn /** 279971fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 280071fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 280171fe804bSLee Schermerhorn * @maxlen: length of @buffer 280271fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 280371fe804bSLee Schermerhorn * 2804948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2805948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2806948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 28071a75a6c8SChristoph Lameter */ 2808948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 28091a75a6c8SChristoph Lameter { 28101a75a6c8SChristoph Lameter char *p = buffer; 2811948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2812948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2813948927eeSDavid Rientjes unsigned short flags = 0; 28141a75a6c8SChristoph Lameter 28158790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2816bea904d5SLee Schermerhorn mode = pol->mode; 2817948927eeSDavid Rientjes flags = pol->flags; 2818948927eeSDavid Rientjes } 2819bea904d5SLee Schermerhorn 28201a75a6c8SChristoph Lameter switch (mode) { 28211a75a6c8SChristoph Lameter case MPOL_DEFAULT: 28221a75a6c8SChristoph Lameter break; 28231a75a6c8SChristoph Lameter case MPOL_PREFERRED: 2824fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 2825f2a07f40SHugh Dickins mode = MPOL_LOCAL; 282653f2556bSLee Schermerhorn else 2827fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 28281a75a6c8SChristoph Lameter break; 28291a75a6c8SChristoph Lameter case MPOL_BIND: 28301a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 28311a75a6c8SChristoph Lameter nodes = pol->v.nodes; 28321a75a6c8SChristoph Lameter break; 28331a75a6c8SChristoph Lameter default: 2834948927eeSDavid Rientjes WARN_ON_ONCE(1); 2835948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 2836948927eeSDavid Rientjes return; 28371a75a6c8SChristoph Lameter } 28381a75a6c8SChristoph Lameter 2839b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 28401a75a6c8SChristoph Lameter 2841fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2842948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 2843f5b087b5SDavid Rientjes 28442291990aSLee Schermerhorn /* 28452291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 28462291990aSLee Schermerhorn */ 2847f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 28482291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 28492291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 28502291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2851f5b087b5SDavid Rientjes } 2852f5b087b5SDavid Rientjes 28531a75a6c8SChristoph Lameter if (!nodes_empty(nodes)) { 2854948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, ":"); 28551a75a6c8SChristoph Lameter p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); 28561a75a6c8SChristoph Lameter } 28571a75a6c8SChristoph Lameter } 2858