11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 711da177e4SLinus Torvalds #include <linux/mm.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 761da177e4SLinus Torvalds #include <linux/nodemask.h> 771da177e4SLinus Torvalds #include <linux/cpuset.h> 781da177e4SLinus Torvalds #include <linux/slab.h> 791da177e4SLinus Torvalds #include <linux/string.h> 80b95f1b31SPaul Gortmaker #include <linux/export.h> 81b488893aSPavel Emelyanov #include <linux/nsproxy.h> 821da177e4SLinus Torvalds #include <linux/interrupt.h> 831da177e4SLinus Torvalds #include <linux/init.h> 841da177e4SLinus Torvalds #include <linux/compat.h> 85dc9aa5b9SChristoph Lameter #include <linux/swap.h> 861a75a6c8SChristoph Lameter #include <linux/seq_file.h> 871a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 88b20a3503SChristoph Lameter #include <linux/migrate.h> 8962b61f61SHugh Dickins #include <linux/ksm.h> 9095a402c3SChristoph Lameter #include <linux/rmap.h> 9186c3a764SDavid Quigley #include <linux/security.h> 92dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 93095f1fc4SLee Schermerhorn #include <linux/ctype.h> 946d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 95b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 96b1de0d13SMitchel Humpherys #include <linux/printk.h> 97dc9aa5b9SChristoph Lameter 981da177e4SLinus Torvalds #include <asm/tlbflush.h> 991da177e4SLinus Torvalds #include <asm/uaccess.h> 100778d3b0fSMichal Hocko #include <linux/random.h> 1011da177e4SLinus Torvalds 10262695a84SNick Piggin #include "internal.h" 10362695a84SNick Piggin 10438e35860SChristoph Lameter /* Internal flags */ 105dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10638e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 107dc9aa5b9SChristoph Lameter 108fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 109fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1101da177e4SLinus Torvalds 1111da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1121da177e4SLinus Torvalds policied. */ 1136267276fSChristoph Lameter enum zone_type policy_zone = 0; 1141da177e4SLinus Torvalds 115bea904d5SLee Schermerhorn /* 116bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 117bea904d5SLee Schermerhorn */ 118e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1191da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 120bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 121fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1221da177e4SLinus Torvalds }; 1231da177e4SLinus Torvalds 1245606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1255606e387SMel Gorman 12674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1275606e387SMel Gorman { 1285606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 129f15ca78eSOleg Nesterov int node; 1305606e387SMel Gorman 131f15ca78eSOleg Nesterov if (pol) 132f15ca78eSOleg Nesterov return pol; 1335606e387SMel Gorman 134f15ca78eSOleg Nesterov node = numa_node_id(); 1351da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1361da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 137f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 138f15ca78eSOleg Nesterov if (pol->mode) 139f15ca78eSOleg Nesterov return pol; 1401da6f0e1SJianguo Wu } 1415606e387SMel Gorman 142f15ca78eSOleg Nesterov return &default_policy; 1435606e387SMel Gorman } 1445606e387SMel Gorman 14537012946SDavid Rientjes static const struct mempolicy_operations { 14637012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 147708c1bbcSMiao Xie /* 148708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 149708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 150708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 151708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 152708c1bbcSMiao Xie * page. 153708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 154708c1bbcSMiao Xie * rebind directly. 155708c1bbcSMiao Xie * 156708c1bbcSMiao Xie * step: 157708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 158708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 159708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 160708c1bbcSMiao Xie */ 161708c1bbcSMiao Xie void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, 162708c1bbcSMiao Xie enum mpol_rebind_step step); 16337012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 16437012946SDavid Rientjes 165f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 166f5b087b5SDavid Rientjes { 1676d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1684c50bc01SDavid Rientjes } 1694c50bc01SDavid Rientjes 1704c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1714c50bc01SDavid Rientjes const nodemask_t *rel) 1724c50bc01SDavid Rientjes { 1734c50bc01SDavid Rientjes nodemask_t tmp; 1744c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1754c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 176f5b087b5SDavid Rientjes } 177f5b087b5SDavid Rientjes 17837012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 17937012946SDavid Rientjes { 18037012946SDavid Rientjes if (nodes_empty(*nodes)) 18137012946SDavid Rientjes return -EINVAL; 18237012946SDavid Rientjes pol->v.nodes = *nodes; 18337012946SDavid Rientjes return 0; 18437012946SDavid Rientjes } 18537012946SDavid Rientjes 18637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 18737012946SDavid Rientjes { 18837012946SDavid Rientjes if (!nodes) 189fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 19037012946SDavid Rientjes else if (nodes_empty(*nodes)) 19137012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 19237012946SDavid Rientjes else 19337012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 19437012946SDavid Rientjes return 0; 19537012946SDavid Rientjes } 19637012946SDavid Rientjes 19737012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 19837012946SDavid Rientjes { 199859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 20037012946SDavid Rientjes return -EINVAL; 20137012946SDavid Rientjes pol->v.nodes = *nodes; 20237012946SDavid Rientjes return 0; 20337012946SDavid Rientjes } 20437012946SDavid Rientjes 20558568d2aSMiao Xie /* 20658568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 20758568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 20858568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 20958568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 21058568d2aSMiao Xie * 21158568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 21258568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 21358568d2aSMiao Xie */ 2144bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2154bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 21658568d2aSMiao Xie { 21758568d2aSMiao Xie int ret; 21858568d2aSMiao Xie 21958568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 22058568d2aSMiao Xie if (pol == NULL) 22158568d2aSMiao Xie return 0; 22201f13bd6SLai Jiangshan /* Check N_MEMORY */ 2234bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 22401f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 22558568d2aSMiao Xie 22658568d2aSMiao Xie VM_BUG_ON(!nodes); 22758568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 22858568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 22958568d2aSMiao Xie else { 23058568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2314bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 23258568d2aSMiao Xie else 2334bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2344bfc4495SKAMEZAWA Hiroyuki 23558568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 23658568d2aSMiao Xie pol->w.user_nodemask = *nodes; 23758568d2aSMiao Xie else 23858568d2aSMiao Xie pol->w.cpuset_mems_allowed = 23958568d2aSMiao Xie cpuset_current_mems_allowed; 24058568d2aSMiao Xie } 24158568d2aSMiao Xie 2424bfc4495SKAMEZAWA Hiroyuki if (nodes) 2434bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2444bfc4495SKAMEZAWA Hiroyuki else 2454bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 24658568d2aSMiao Xie return ret; 24758568d2aSMiao Xie } 24858568d2aSMiao Xie 24958568d2aSMiao Xie /* 25058568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25158568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 25258568d2aSMiao Xie */ 253028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 254028fec41SDavid Rientjes nodemask_t *nodes) 2551da177e4SLinus Torvalds { 2561da177e4SLinus Torvalds struct mempolicy *policy; 2571da177e4SLinus Torvalds 258028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 25900ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 260140d5a49SPaul Mundt 2613e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2623e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 26337012946SDavid Rientjes return ERR_PTR(-EINVAL); 264d3a71033SLee Schermerhorn return NULL; 26537012946SDavid Rientjes } 2663e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2673e1f0645SDavid Rientjes 2683e1f0645SDavid Rientjes /* 2693e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2703e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2713e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2723e1f0645SDavid Rientjes */ 2733e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2743e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2753e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2763e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2773e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2783e1f0645SDavid Rientjes } 279479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 280479e2802SPeter Zijlstra if (!nodes_empty(*nodes)) 281479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 282479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2833e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2843e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2851da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2861da177e4SLinus Torvalds if (!policy) 2871da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2881da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 28945c4745aSLee Schermerhorn policy->mode = mode; 29037012946SDavid Rientjes policy->flags = flags; 2913e1f0645SDavid Rientjes 29237012946SDavid Rientjes return policy; 29337012946SDavid Rientjes } 29437012946SDavid Rientjes 29552cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 29652cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 29752cd3b07SLee Schermerhorn { 29852cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 29952cd3b07SLee Schermerhorn return; 30052cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 30152cd3b07SLee Schermerhorn } 30252cd3b07SLee Schermerhorn 303708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, 304708c1bbcSMiao Xie enum mpol_rebind_step step) 30537012946SDavid Rientjes { 30637012946SDavid Rientjes } 30737012946SDavid Rientjes 308708c1bbcSMiao Xie /* 309708c1bbcSMiao Xie * step: 310708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 311708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 312708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 313708c1bbcSMiao Xie */ 314708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, 315708c1bbcSMiao Xie enum mpol_rebind_step step) 3161d0d2680SDavid Rientjes { 3171d0d2680SDavid Rientjes nodemask_t tmp; 3181d0d2680SDavid Rientjes 31937012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32037012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32137012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32237012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3231d0d2680SDavid Rientjes else { 324708c1bbcSMiao Xie /* 325708c1bbcSMiao Xie * if step == 1, we use ->w.cpuset_mems_allowed to cache the 326708c1bbcSMiao Xie * result 327708c1bbcSMiao Xie */ 328708c1bbcSMiao Xie if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { 329708c1bbcSMiao Xie nodes_remap(tmp, pol->v.nodes, 330708c1bbcSMiao Xie pol->w.cpuset_mems_allowed, *nodes); 331708c1bbcSMiao Xie pol->w.cpuset_mems_allowed = step ? tmp : *nodes; 332708c1bbcSMiao Xie } else if (step == MPOL_REBIND_STEP2) { 333708c1bbcSMiao Xie tmp = pol->w.cpuset_mems_allowed; 33437012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 335708c1bbcSMiao Xie } else 336708c1bbcSMiao Xie BUG(); 3371d0d2680SDavid Rientjes } 33837012946SDavid Rientjes 339708c1bbcSMiao Xie if (nodes_empty(tmp)) 340708c1bbcSMiao Xie tmp = *nodes; 341708c1bbcSMiao Xie 342708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 343708c1bbcSMiao Xie nodes_or(pol->v.nodes, pol->v.nodes, tmp); 344708c1bbcSMiao Xie else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) 3451d0d2680SDavid Rientjes pol->v.nodes = tmp; 346708c1bbcSMiao Xie else 347708c1bbcSMiao Xie BUG(); 348708c1bbcSMiao Xie 3491d0d2680SDavid Rientjes if (!node_isset(current->il_next, tmp)) { 3501d0d2680SDavid Rientjes current->il_next = next_node(current->il_next, tmp); 3511d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3521d0d2680SDavid Rientjes current->il_next = first_node(tmp); 3531d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3541d0d2680SDavid Rientjes current->il_next = numa_node_id(); 3551d0d2680SDavid Rientjes } 35637012946SDavid Rientjes } 35737012946SDavid Rientjes 35837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 359708c1bbcSMiao Xie const nodemask_t *nodes, 360708c1bbcSMiao Xie enum mpol_rebind_step step) 36137012946SDavid Rientjes { 36237012946SDavid Rientjes nodemask_t tmp; 36337012946SDavid Rientjes 36437012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3651d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3661d0d2680SDavid Rientjes 367fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3681d0d2680SDavid Rientjes pol->v.preferred_node = node; 369fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 370fc36b8d3SLee Schermerhorn } else 371fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 37237012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 37337012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3741d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 375fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3761d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 37737012946SDavid Rientjes pol->w.cpuset_mems_allowed, 37837012946SDavid Rientjes *nodes); 37937012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3801d0d2680SDavid Rientjes } 3811d0d2680SDavid Rientjes } 38237012946SDavid Rientjes 383708c1bbcSMiao Xie /* 384708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 385708c1bbcSMiao Xie * 386708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 387708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 388708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 389708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 390708c1bbcSMiao Xie * page. 391708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 392708c1bbcSMiao Xie * rebind directly. 393708c1bbcSMiao Xie * 394708c1bbcSMiao Xie * step: 395708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 396708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 397708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 398708c1bbcSMiao Xie */ 399708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, 400708c1bbcSMiao Xie enum mpol_rebind_step step) 40137012946SDavid Rientjes { 40237012946SDavid Rientjes if (!pol) 40337012946SDavid Rientjes return; 40489c522c7SWang Sheng-Hui if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE && 40537012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 40637012946SDavid Rientjes return; 407708c1bbcSMiao Xie 408708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) 409708c1bbcSMiao Xie return; 410708c1bbcSMiao Xie 411708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) 412708c1bbcSMiao Xie BUG(); 413708c1bbcSMiao Xie 414708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 415708c1bbcSMiao Xie pol->flags |= MPOL_F_REBINDING; 416708c1bbcSMiao Xie else if (step == MPOL_REBIND_STEP2) 417708c1bbcSMiao Xie pol->flags &= ~MPOL_F_REBINDING; 418708c1bbcSMiao Xie else if (step >= MPOL_REBIND_NSTEP) 419708c1bbcSMiao Xie BUG(); 420708c1bbcSMiao Xie 421708c1bbcSMiao Xie mpol_ops[pol->mode].rebind(pol, newmask, step); 4221d0d2680SDavid Rientjes } 4231d0d2680SDavid Rientjes 4241d0d2680SDavid Rientjes /* 4251d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 4261d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 42758568d2aSMiao Xie * 42858568d2aSMiao Xie * Called with task's alloc_lock held. 4291d0d2680SDavid Rientjes */ 4301d0d2680SDavid Rientjes 431708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 432708c1bbcSMiao Xie enum mpol_rebind_step step) 4331d0d2680SDavid Rientjes { 434708c1bbcSMiao Xie mpol_rebind_policy(tsk->mempolicy, new, step); 4351d0d2680SDavid Rientjes } 4361d0d2680SDavid Rientjes 4371d0d2680SDavid Rientjes /* 4381d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 4391d0d2680SDavid Rientjes * 4401d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 4411d0d2680SDavid Rientjes */ 4421d0d2680SDavid Rientjes 4431d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 4441d0d2680SDavid Rientjes { 4451d0d2680SDavid Rientjes struct vm_area_struct *vma; 4461d0d2680SDavid Rientjes 4471d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 4481d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 449708c1bbcSMiao Xie mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); 4501d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 4511d0d2680SDavid Rientjes } 4521d0d2680SDavid Rientjes 45337012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 45437012946SDavid Rientjes [MPOL_DEFAULT] = { 45537012946SDavid Rientjes .rebind = mpol_rebind_default, 45637012946SDavid Rientjes }, 45737012946SDavid Rientjes [MPOL_INTERLEAVE] = { 45837012946SDavid Rientjes .create = mpol_new_interleave, 45937012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 46037012946SDavid Rientjes }, 46137012946SDavid Rientjes [MPOL_PREFERRED] = { 46237012946SDavid Rientjes .create = mpol_new_preferred, 46337012946SDavid Rientjes .rebind = mpol_rebind_preferred, 46437012946SDavid Rientjes }, 46537012946SDavid Rientjes [MPOL_BIND] = { 46637012946SDavid Rientjes .create = mpol_new_bind, 46737012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 46837012946SDavid Rientjes }, 46937012946SDavid Rientjes }; 47037012946SDavid Rientjes 471fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 472fc301289SChristoph Lameter unsigned long flags); 4731a75a6c8SChristoph Lameter 47498094945SNaoya Horiguchi /* 47598094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 47698094945SNaoya Horiguchi * and move them to the pagelist if they do. 47798094945SNaoya Horiguchi */ 47898094945SNaoya Horiguchi static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 479dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 480dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 48138e35860SChristoph Lameter void *private) 4821da177e4SLinus Torvalds { 48391612e0dSHugh Dickins pte_t *orig_pte; 48491612e0dSHugh Dickins pte_t *pte; 485705e87c0SHugh Dickins spinlock_t *ptl; 486941150a3SHugh Dickins 487705e87c0SHugh Dickins orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 48891612e0dSHugh Dickins do { 4896aab341eSLinus Torvalds struct page *page; 49025ba77c1SAndy Whitcroft int nid; 49191612e0dSHugh Dickins 49291612e0dSHugh Dickins if (!pte_present(*pte)) 49391612e0dSHugh Dickins continue; 4946aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 4956aab341eSLinus Torvalds if (!page) 49691612e0dSHugh Dickins continue; 497053837fcSNick Piggin /* 49862b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 49962b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 500053837fcSNick Piggin */ 501b79bc0a0SHugh Dickins if (PageReserved(page)) 502f4598c8bSChristoph Lameter continue; 5036aab341eSLinus Torvalds nid = page_to_nid(page); 50438e35860SChristoph Lameter if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 50538e35860SChristoph Lameter continue; 50638e35860SChristoph Lameter 507b1f72d18SStephen Wilson if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 508fc301289SChristoph Lameter migrate_page_add(page, private, flags); 509dc9aa5b9SChristoph Lameter else 5101da177e4SLinus Torvalds break; 51191612e0dSHugh Dickins } while (pte++, addr += PAGE_SIZE, addr != end); 512705e87c0SHugh Dickins pte_unmap_unlock(orig_pte, ptl); 51391612e0dSHugh Dickins return addr != end; 51491612e0dSHugh Dickins } 51591612e0dSHugh Dickins 51698094945SNaoya Horiguchi static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, 51798094945SNaoya Horiguchi pmd_t *pmd, const nodemask_t *nodes, unsigned long flags, 518e2d8cf40SNaoya Horiguchi void *private) 519e2d8cf40SNaoya Horiguchi { 520e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 521e2d8cf40SNaoya Horiguchi int nid; 522e2d8cf40SNaoya Horiguchi struct page *page; 523cb900f41SKirill A. Shutemov spinlock_t *ptl; 524d4c54919SNaoya Horiguchi pte_t entry; 525e2d8cf40SNaoya Horiguchi 526cb900f41SKirill A. Shutemov ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd); 527d4c54919SNaoya Horiguchi entry = huge_ptep_get((pte_t *)pmd); 528d4c54919SNaoya Horiguchi if (!pte_present(entry)) 529d4c54919SNaoya Horiguchi goto unlock; 530d4c54919SNaoya Horiguchi page = pte_page(entry); 531e2d8cf40SNaoya Horiguchi nid = page_to_nid(page); 532e2d8cf40SNaoya Horiguchi if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 533e2d8cf40SNaoya Horiguchi goto unlock; 534e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 535e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 536e2d8cf40SNaoya Horiguchi (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 537e2d8cf40SNaoya Horiguchi isolate_huge_page(page, private); 538e2d8cf40SNaoya Horiguchi unlock: 539cb900f41SKirill A. Shutemov spin_unlock(ptl); 540e2d8cf40SNaoya Horiguchi #else 541e2d8cf40SNaoya Horiguchi BUG(); 542e2d8cf40SNaoya Horiguchi #endif 543e2d8cf40SNaoya Horiguchi } 544e2d8cf40SNaoya Horiguchi 54598094945SNaoya Horiguchi static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, 546dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 547dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 54838e35860SChristoph Lameter void *private) 54991612e0dSHugh Dickins { 55091612e0dSHugh Dickins pmd_t *pmd; 55191612e0dSHugh Dickins unsigned long next; 55291612e0dSHugh Dickins 55391612e0dSHugh Dickins pmd = pmd_offset(pud, addr); 55491612e0dSHugh Dickins do { 55591612e0dSHugh Dickins next = pmd_addr_end(addr, end); 556e2d8cf40SNaoya Horiguchi if (!pmd_present(*pmd)) 557e2d8cf40SNaoya Horiguchi continue; 558e2d8cf40SNaoya Horiguchi if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) { 55998094945SNaoya Horiguchi queue_pages_hugetlb_pmd_range(vma, pmd, nodes, 560e2d8cf40SNaoya Horiguchi flags, private); 561e2d8cf40SNaoya Horiguchi continue; 562e2d8cf40SNaoya Horiguchi } 563e180377fSKirill A. Shutemov split_huge_page_pmd(vma, addr, pmd); 5641a5a9906SAndrea Arcangeli if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 56591612e0dSHugh Dickins continue; 56698094945SNaoya Horiguchi if (queue_pages_pte_range(vma, pmd, addr, next, nodes, 56738e35860SChristoph Lameter flags, private)) 56891612e0dSHugh Dickins return -EIO; 56991612e0dSHugh Dickins } while (pmd++, addr = next, addr != end); 57091612e0dSHugh Dickins return 0; 57191612e0dSHugh Dickins } 57291612e0dSHugh Dickins 57398094945SNaoya Horiguchi static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 574dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 575dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 57638e35860SChristoph Lameter void *private) 57791612e0dSHugh Dickins { 57891612e0dSHugh Dickins pud_t *pud; 57991612e0dSHugh Dickins unsigned long next; 58091612e0dSHugh Dickins 58191612e0dSHugh Dickins pud = pud_offset(pgd, addr); 58291612e0dSHugh Dickins do { 58391612e0dSHugh Dickins next = pud_addr_end(addr, end); 584e2d8cf40SNaoya Horiguchi if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) 585e2d8cf40SNaoya Horiguchi continue; 58691612e0dSHugh Dickins if (pud_none_or_clear_bad(pud)) 58791612e0dSHugh Dickins continue; 58898094945SNaoya Horiguchi if (queue_pages_pmd_range(vma, pud, addr, next, nodes, 58938e35860SChristoph Lameter flags, private)) 59091612e0dSHugh Dickins return -EIO; 59191612e0dSHugh Dickins } while (pud++, addr = next, addr != end); 59291612e0dSHugh Dickins return 0; 59391612e0dSHugh Dickins } 59491612e0dSHugh Dickins 59598094945SNaoya Horiguchi static inline int queue_pages_pgd_range(struct vm_area_struct *vma, 596dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 597dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 59838e35860SChristoph Lameter void *private) 59991612e0dSHugh Dickins { 60091612e0dSHugh Dickins pgd_t *pgd; 60191612e0dSHugh Dickins unsigned long next; 60291612e0dSHugh Dickins 603b5810039SNick Piggin pgd = pgd_offset(vma->vm_mm, addr); 60491612e0dSHugh Dickins do { 60591612e0dSHugh Dickins next = pgd_addr_end(addr, end); 60691612e0dSHugh Dickins if (pgd_none_or_clear_bad(pgd)) 60791612e0dSHugh Dickins continue; 60898094945SNaoya Horiguchi if (queue_pages_pud_range(vma, pgd, addr, next, nodes, 60938e35860SChristoph Lameter flags, private)) 61091612e0dSHugh Dickins return -EIO; 61191612e0dSHugh Dickins } while (pgd++, addr = next, addr != end); 61291612e0dSHugh Dickins return 0; 6131da177e4SLinus Torvalds } 6141da177e4SLinus Torvalds 6155877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 616b24f53a0SLee Schermerhorn /* 6174b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6184b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6194b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6204b10e7d5SMel Gorman * 6214b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6224b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6234b10e7d5SMel Gorman * changes to the core. 624b24f53a0SLee Schermerhorn */ 6254b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6264b10e7d5SMel Gorman unsigned long addr, unsigned long end) 627b24f53a0SLee Schermerhorn { 6284b10e7d5SMel Gorman int nr_updated; 629b24f53a0SLee Schermerhorn 6304b10e7d5SMel Gorman nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); 63103c5a6e1SMel Gorman if (nr_updated) 63203c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 633b24f53a0SLee Schermerhorn 6344b10e7d5SMel Gorman return nr_updated; 635b24f53a0SLee Schermerhorn } 636b24f53a0SLee Schermerhorn #else 637b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 638b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 639b24f53a0SLee Schermerhorn { 640b24f53a0SLee Schermerhorn return 0; 641b24f53a0SLee Schermerhorn } 6425877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 643b24f53a0SLee Schermerhorn 644dc9aa5b9SChristoph Lameter /* 64598094945SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 64698094945SNaoya Horiguchi * 64798094945SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 64898094945SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 64998094945SNaoya Horiguchi * passed via @private.) 650dc9aa5b9SChristoph Lameter */ 651d05f0cdcSHugh Dickins static int 65298094945SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 65338e35860SChristoph Lameter const nodemask_t *nodes, unsigned long flags, void *private) 6541da177e4SLinus Torvalds { 655d05f0cdcSHugh Dickins int err = 0; 656d05f0cdcSHugh Dickins struct vm_area_struct *vma, *prev; 6571da177e4SLinus Torvalds 658d05f0cdcSHugh Dickins vma = find_vma(mm, start); 659d05f0cdcSHugh Dickins if (!vma) 660d05f0cdcSHugh Dickins return -EFAULT; 6611da177e4SLinus Torvalds prev = NULL; 662d05f0cdcSHugh Dickins for (; vma && vma->vm_start < end; vma = vma->vm_next) { 6635b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 664dc9aa5b9SChristoph Lameter 6655b952b3cSAndi Kleen if (endvma > end) 6665b952b3cSAndi Kleen endvma = end; 6675b952b3cSAndi Kleen if (vma->vm_start > start) 6685b952b3cSAndi Kleen start = vma->vm_start; 669b24f53a0SLee Schermerhorn 670b24f53a0SLee Schermerhorn if (!(flags & MPOL_MF_DISCONTIG_OK)) { 671b24f53a0SLee Schermerhorn if (!vma->vm_next && vma->vm_end < end) 672d05f0cdcSHugh Dickins return -EFAULT; 673b24f53a0SLee Schermerhorn if (prev && prev->vm_end < vma->vm_start) 674d05f0cdcSHugh Dickins return -EFAULT; 675b24f53a0SLee Schermerhorn } 676b24f53a0SLee Schermerhorn 677b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6782c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6792c0346a3SMel Gorman if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) 680b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 681b24f53a0SLee Schermerhorn goto next; 682b24f53a0SLee Schermerhorn } 683b24f53a0SLee Schermerhorn 684b24f53a0SLee Schermerhorn if ((flags & MPOL_MF_STRICT) || 685b24f53a0SLee Schermerhorn ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && 686b24f53a0SLee Schermerhorn vma_migratable(vma))) { 687b24f53a0SLee Schermerhorn 68898094945SNaoya Horiguchi err = queue_pages_pgd_range(vma, start, endvma, nodes, 68938e35860SChristoph Lameter flags, private); 690d05f0cdcSHugh Dickins if (err) 6911da177e4SLinus Torvalds break; 6921da177e4SLinus Torvalds } 693b24f53a0SLee Schermerhorn next: 6941da177e4SLinus Torvalds prev = vma; 6951da177e4SLinus Torvalds } 696d05f0cdcSHugh Dickins return err; 6971da177e4SLinus Torvalds } 6981da177e4SLinus Torvalds 699869833f2SKOSAKI Motohiro /* 700869833f2SKOSAKI Motohiro * Apply policy to a single VMA 701869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 702869833f2SKOSAKI Motohiro */ 703869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 704869833f2SKOSAKI Motohiro struct mempolicy *pol) 7058d34694cSKOSAKI Motohiro { 706869833f2SKOSAKI Motohiro int err; 707869833f2SKOSAKI Motohiro struct mempolicy *old; 708869833f2SKOSAKI Motohiro struct mempolicy *new; 7098d34694cSKOSAKI Motohiro 7108d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7118d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7128d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7138d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7148d34694cSKOSAKI Motohiro 715869833f2SKOSAKI Motohiro new = mpol_dup(pol); 716869833f2SKOSAKI Motohiro if (IS_ERR(new)) 717869833f2SKOSAKI Motohiro return PTR_ERR(new); 718869833f2SKOSAKI Motohiro 719869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7208d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 721869833f2SKOSAKI Motohiro if (err) 722869833f2SKOSAKI Motohiro goto err_out; 7238d34694cSKOSAKI Motohiro } 724869833f2SKOSAKI Motohiro 725869833f2SKOSAKI Motohiro old = vma->vm_policy; 726869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 727869833f2SKOSAKI Motohiro mpol_put(old); 728869833f2SKOSAKI Motohiro 729869833f2SKOSAKI Motohiro return 0; 730869833f2SKOSAKI Motohiro err_out: 731869833f2SKOSAKI Motohiro mpol_put(new); 7328d34694cSKOSAKI Motohiro return err; 7338d34694cSKOSAKI Motohiro } 7348d34694cSKOSAKI Motohiro 7351da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7369d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7379d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7381da177e4SLinus Torvalds { 7391da177e4SLinus Torvalds struct vm_area_struct *next; 7409d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7419d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7429d8cebd4SKOSAKI Motohiro int err = 0; 743e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7449d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7459d8cebd4SKOSAKI Motohiro unsigned long vmend; 7461da177e4SLinus Torvalds 747097d5910SLinus Torvalds vma = find_vma(mm, start); 7489d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 7499d8cebd4SKOSAKI Motohiro return -EFAULT; 7509d8cebd4SKOSAKI Motohiro 751097d5910SLinus Torvalds prev = vma->vm_prev; 752e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 753e26a5114SKOSAKI Motohiro prev = vma; 754e26a5114SKOSAKI Motohiro 7559d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 7561da177e4SLinus Torvalds next = vma->vm_next; 7579d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 7589d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 7599d8cebd4SKOSAKI Motohiro 760e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 761e26a5114SKOSAKI Motohiro continue; 762e26a5114SKOSAKI Motohiro 763e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 764e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 7659d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 766e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 7678aacc9f5SCaspar Zhang new_pol); 7689d8cebd4SKOSAKI Motohiro if (prev) { 7699d8cebd4SKOSAKI Motohiro vma = prev; 7709d8cebd4SKOSAKI Motohiro next = vma->vm_next; 7713964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 7729d8cebd4SKOSAKI Motohiro continue; 7733964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 7743964acd0SOleg Nesterov goto replace; 7751da177e4SLinus Torvalds } 7769d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 7779d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 7789d8cebd4SKOSAKI Motohiro if (err) 7799d8cebd4SKOSAKI Motohiro goto out; 7809d8cebd4SKOSAKI Motohiro } 7819d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 7829d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 7839d8cebd4SKOSAKI Motohiro if (err) 7849d8cebd4SKOSAKI Motohiro goto out; 7859d8cebd4SKOSAKI Motohiro } 7863964acd0SOleg Nesterov replace: 787869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 7889d8cebd4SKOSAKI Motohiro if (err) 7899d8cebd4SKOSAKI Motohiro goto out; 7909d8cebd4SKOSAKI Motohiro } 7919d8cebd4SKOSAKI Motohiro 7929d8cebd4SKOSAKI Motohiro out: 7931da177e4SLinus Torvalds return err; 7941da177e4SLinus Torvalds } 7951da177e4SLinus Torvalds 7961da177e4SLinus Torvalds /* Set the process memory policy */ 797028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 798028fec41SDavid Rientjes nodemask_t *nodes) 7991da177e4SLinus Torvalds { 80058568d2aSMiao Xie struct mempolicy *new, *old; 8014bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 80258568d2aSMiao Xie int ret; 8031da177e4SLinus Torvalds 8044bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8054bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 806f4e53d91SLee Schermerhorn 8074bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8084bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8094bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8104bfc4495SKAMEZAWA Hiroyuki goto out; 8114bfc4495SKAMEZAWA Hiroyuki } 8122c7c3a7dSOleg Nesterov 81358568d2aSMiao Xie task_lock(current); 8144bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 81558568d2aSMiao Xie if (ret) { 81658568d2aSMiao Xie task_unlock(current); 81758568d2aSMiao Xie mpol_put(new); 8184bfc4495SKAMEZAWA Hiroyuki goto out; 81958568d2aSMiao Xie } 82058568d2aSMiao Xie old = current->mempolicy; 8211da177e4SLinus Torvalds current->mempolicy = new; 82245c4745aSLee Schermerhorn if (new && new->mode == MPOL_INTERLEAVE && 823f5b087b5SDavid Rientjes nodes_weight(new->v.nodes)) 824dfcd3c0dSAndi Kleen current->il_next = first_node(new->v.nodes); 82558568d2aSMiao Xie task_unlock(current); 82658568d2aSMiao Xie mpol_put(old); 8274bfc4495SKAMEZAWA Hiroyuki ret = 0; 8284bfc4495SKAMEZAWA Hiroyuki out: 8294bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8304bfc4495SKAMEZAWA Hiroyuki return ret; 8311da177e4SLinus Torvalds } 8321da177e4SLinus Torvalds 833bea904d5SLee Schermerhorn /* 834bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 83558568d2aSMiao Xie * 83658568d2aSMiao Xie * Called with task's alloc_lock held 837bea904d5SLee Schermerhorn */ 838bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8391da177e4SLinus Torvalds { 840dfcd3c0dSAndi Kleen nodes_clear(*nodes); 841bea904d5SLee Schermerhorn if (p == &default_policy) 842bea904d5SLee Schermerhorn return; 843bea904d5SLee Schermerhorn 84445c4745aSLee Schermerhorn switch (p->mode) { 84519770b32SMel Gorman case MPOL_BIND: 84619770b32SMel Gorman /* Fall through */ 8471da177e4SLinus Torvalds case MPOL_INTERLEAVE: 848dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 8491da177e4SLinus Torvalds break; 8501da177e4SLinus Torvalds case MPOL_PREFERRED: 851fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 852dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 85353f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 8541da177e4SLinus Torvalds break; 8551da177e4SLinus Torvalds default: 8561da177e4SLinus Torvalds BUG(); 8571da177e4SLinus Torvalds } 8581da177e4SLinus Torvalds } 8591da177e4SLinus Torvalds 8601da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr) 8611da177e4SLinus Torvalds { 8621da177e4SLinus Torvalds struct page *p; 8631da177e4SLinus Torvalds int err; 8641da177e4SLinus Torvalds 8651da177e4SLinus Torvalds err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 8661da177e4SLinus Torvalds if (err >= 0) { 8671da177e4SLinus Torvalds err = page_to_nid(p); 8681da177e4SLinus Torvalds put_page(p); 8691da177e4SLinus Torvalds } 8701da177e4SLinus Torvalds return err; 8711da177e4SLinus Torvalds } 8721da177e4SLinus Torvalds 8731da177e4SLinus Torvalds /* Retrieve NUMA policy */ 874dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8751da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8761da177e4SLinus Torvalds { 8778bccd85fSChristoph Lameter int err; 8781da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8791da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8801da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 8811da177e4SLinus Torvalds 882754af6f5SLee Schermerhorn if (flags & 883754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8841da177e4SLinus Torvalds return -EINVAL; 885754af6f5SLee Schermerhorn 886754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 887754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 888754af6f5SLee Schermerhorn return -EINVAL; 889754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 89058568d2aSMiao Xie task_lock(current); 891754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 89258568d2aSMiao Xie task_unlock(current); 893754af6f5SLee Schermerhorn return 0; 894754af6f5SLee Schermerhorn } 895754af6f5SLee Schermerhorn 8961da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 897bea904d5SLee Schermerhorn /* 898bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 899bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 900bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 901bea904d5SLee Schermerhorn */ 9021da177e4SLinus Torvalds down_read(&mm->mmap_sem); 9031da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 9041da177e4SLinus Torvalds if (!vma) { 9051da177e4SLinus Torvalds up_read(&mm->mmap_sem); 9061da177e4SLinus Torvalds return -EFAULT; 9071da177e4SLinus Torvalds } 9081da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9091da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9101da177e4SLinus Torvalds else 9111da177e4SLinus Torvalds pol = vma->vm_policy; 9121da177e4SLinus Torvalds } else if (addr) 9131da177e4SLinus Torvalds return -EINVAL; 9141da177e4SLinus Torvalds 9151da177e4SLinus Torvalds if (!pol) 916bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9171da177e4SLinus Torvalds 9181da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9191da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9201da177e4SLinus Torvalds err = lookup_node(mm, addr); 9211da177e4SLinus Torvalds if (err < 0) 9221da177e4SLinus Torvalds goto out; 9238bccd85fSChristoph Lameter *policy = err; 9241da177e4SLinus Torvalds } else if (pol == current->mempolicy && 92545c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 9268bccd85fSChristoph Lameter *policy = current->il_next; 9271da177e4SLinus Torvalds } else { 9281da177e4SLinus Torvalds err = -EINVAL; 9291da177e4SLinus Torvalds goto out; 9301da177e4SLinus Torvalds } 931bea904d5SLee Schermerhorn } else { 932bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 933bea904d5SLee Schermerhorn pol->mode; 934d79df630SDavid Rientjes /* 935d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 936d79df630SDavid Rientjes * the policy to userspace. 937d79df630SDavid Rientjes */ 938d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 939bea904d5SLee Schermerhorn } 9401da177e4SLinus Torvalds 9411da177e4SLinus Torvalds if (vma) { 9421da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 9431da177e4SLinus Torvalds vma = NULL; 9441da177e4SLinus Torvalds } 9451da177e4SLinus Torvalds 9461da177e4SLinus Torvalds err = 0; 94758568d2aSMiao Xie if (nmask) { 948c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 949c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 950c6b6ef8bSLee Schermerhorn } else { 95158568d2aSMiao Xie task_lock(current); 952bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 95358568d2aSMiao Xie task_unlock(current); 95458568d2aSMiao Xie } 955c6b6ef8bSLee Schermerhorn } 9561da177e4SLinus Torvalds 9571da177e4SLinus Torvalds out: 95852cd3b07SLee Schermerhorn mpol_cond_put(pol); 9591da177e4SLinus Torvalds if (vma) 9601da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 9611da177e4SLinus Torvalds return err; 9621da177e4SLinus Torvalds } 9631da177e4SLinus Torvalds 964b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 9658bccd85fSChristoph Lameter /* 9666ce3c4c0SChristoph Lameter * page migration 9676ce3c4c0SChristoph Lameter */ 968fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 969fc301289SChristoph Lameter unsigned long flags) 9706ce3c4c0SChristoph Lameter { 9716ce3c4c0SChristoph Lameter /* 972fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9736ce3c4c0SChristoph Lameter */ 97462695a84SNick Piggin if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 97562695a84SNick Piggin if (!isolate_lru_page(page)) { 97662695a84SNick Piggin list_add_tail(&page->lru, pagelist); 9776d9c285aSKOSAKI Motohiro inc_zone_page_state(page, NR_ISOLATED_ANON + 9786d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 97962695a84SNick Piggin } 98062695a84SNick Piggin } 9816ce3c4c0SChristoph Lameter } 9826ce3c4c0SChristoph Lameter 983742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 98495a402c3SChristoph Lameter { 985e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 986e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 987e2d8cf40SNaoya Horiguchi node); 988e2d8cf40SNaoya Horiguchi else 9896484eb3eSMel Gorman return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); 99095a402c3SChristoph Lameter } 99195a402c3SChristoph Lameter 9926ce3c4c0SChristoph Lameter /* 9937e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 9947e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 9957e2ab150SChristoph Lameter */ 996dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 997dbcb0f19SAdrian Bunk int flags) 9987e2ab150SChristoph Lameter { 9997e2ab150SChristoph Lameter nodemask_t nmask; 10007e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10017e2ab150SChristoph Lameter int err = 0; 10027e2ab150SChristoph Lameter 10037e2ab150SChristoph Lameter nodes_clear(nmask); 10047e2ab150SChristoph Lameter node_set(source, nmask); 10057e2ab150SChristoph Lameter 100608270807SMinchan Kim /* 100708270807SMinchan Kim * This does not "check" the range but isolates all pages that 100808270807SMinchan Kim * need migration. Between passing in the full user address 100908270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 101008270807SMinchan Kim */ 101108270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 101298094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10137e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10147e2ab150SChristoph Lameter 1015cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 101668711a74SDavid Rientjes err = migrate_pages(&pagelist, new_node_page, NULL, dest, 10179c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 1018cf608ac1SMinchan Kim if (err) 1019e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1020cf608ac1SMinchan Kim } 102195a402c3SChristoph Lameter 10227e2ab150SChristoph Lameter return err; 10237e2ab150SChristoph Lameter } 10247e2ab150SChristoph Lameter 10257e2ab150SChristoph Lameter /* 10267e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10277e2ab150SChristoph Lameter * layout as much as possible. 102839743889SChristoph Lameter * 102939743889SChristoph Lameter * Returns the number of page that could not be moved. 103039743889SChristoph Lameter */ 10310ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10320ce72d4fSAndrew Morton const nodemask_t *to, int flags) 103339743889SChristoph Lameter { 10347e2ab150SChristoph Lameter int busy = 0; 10350aedadf9SChristoph Lameter int err; 10367e2ab150SChristoph Lameter nodemask_t tmp; 103739743889SChristoph Lameter 10380aedadf9SChristoph Lameter err = migrate_prep(); 10390aedadf9SChristoph Lameter if (err) 10400aedadf9SChristoph Lameter return err; 10410aedadf9SChristoph Lameter 104239743889SChristoph Lameter down_read(&mm->mmap_sem); 1043d4984711SChristoph Lameter 10447e2ab150SChristoph Lameter /* 10457e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 10467e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 10477e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 10487e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 10497e2ab150SChristoph Lameter * 10507e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 10517e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 10527e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 10537e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 10547e2ab150SChristoph Lameter * 10557e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 10567e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 10577e2ab150SChristoph Lameter * (nothing left to migrate). 10587e2ab150SChristoph Lameter * 10597e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 10607e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 10617e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 10627e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 10637e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 10647e2ab150SChristoph Lameter * 10657e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 10667e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 10677e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 10687e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1069ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 10707e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 10717e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 10727e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 10737e2ab150SChristoph Lameter */ 10747e2ab150SChristoph Lameter 10750ce72d4fSAndrew Morton tmp = *from; 10767e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 10777e2ab150SChristoph Lameter int s,d; 1078b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 10797e2ab150SChristoph Lameter int dest = 0; 10807e2ab150SChristoph Lameter 10817e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 10824a5b18ccSLarry Woodman 10834a5b18ccSLarry Woodman /* 10844a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 10854a5b18ccSLarry Woodman * node relationship of the pages established between 10864a5b18ccSLarry Woodman * threads and memory areas. 10874a5b18ccSLarry Woodman * 10884a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 10894a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 10904a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 10914a5b18ccSLarry Woodman * copying memory from a node that is in the destination 10924a5b18ccSLarry Woodman * mask. 10934a5b18ccSLarry Woodman * 10944a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 10954a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 10964a5b18ccSLarry Woodman */ 10974a5b18ccSLarry Woodman 10980ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 10990ce72d4fSAndrew Morton (node_isset(s, *to))) 11004a5b18ccSLarry Woodman continue; 11014a5b18ccSLarry Woodman 11020ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11037e2ab150SChristoph Lameter if (s == d) 11047e2ab150SChristoph Lameter continue; 11057e2ab150SChristoph Lameter 11067e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11077e2ab150SChristoph Lameter dest = d; 11087e2ab150SChristoph Lameter 11097e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11107e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11117e2ab150SChristoph Lameter break; 11127e2ab150SChristoph Lameter } 1113b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11147e2ab150SChristoph Lameter break; 11157e2ab150SChristoph Lameter 11167e2ab150SChristoph Lameter node_clear(source, tmp); 11177e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11187e2ab150SChristoph Lameter if (err > 0) 11197e2ab150SChristoph Lameter busy += err; 11207e2ab150SChristoph Lameter if (err < 0) 11217e2ab150SChristoph Lameter break; 112239743889SChristoph Lameter } 112339743889SChristoph Lameter up_read(&mm->mmap_sem); 11247e2ab150SChristoph Lameter if (err < 0) 11257e2ab150SChristoph Lameter return err; 11267e2ab150SChristoph Lameter return busy; 1127b20a3503SChristoph Lameter 112839743889SChristoph Lameter } 112939743889SChristoph Lameter 11303ad33b24SLee Schermerhorn /* 11313ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1132d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 11333ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11343ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11353ad33b24SLee Schermerhorn * is in virtual address order. 11363ad33b24SLee Schermerhorn */ 1137d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 113895a402c3SChristoph Lameter { 1139d05f0cdcSHugh Dickins struct vm_area_struct *vma; 11403ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 114195a402c3SChristoph Lameter 1142d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 11433ad33b24SLee Schermerhorn while (vma) { 11443ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 11453ad33b24SLee Schermerhorn if (address != -EFAULT) 11463ad33b24SLee Schermerhorn break; 11473ad33b24SLee Schermerhorn vma = vma->vm_next; 11483ad33b24SLee Schermerhorn } 11493ad33b24SLee Schermerhorn 115011c731e8SWanpeng Li if (PageHuge(page)) { 1151cc81717eSMichal Hocko BUG_ON(!vma); 115274060e4dSNaoya Horiguchi return alloc_huge_page_noerr(vma, address, 1); 115311c731e8SWanpeng Li } 115411c731e8SWanpeng Li /* 115511c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 115611c731e8SWanpeng Li */ 11573ad33b24SLee Schermerhorn return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 115895a402c3SChristoph Lameter } 1159b20a3503SChristoph Lameter #else 1160b20a3503SChristoph Lameter 1161b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1162b20a3503SChristoph Lameter unsigned long flags) 1163b20a3503SChristoph Lameter { 1164b20a3503SChristoph Lameter } 1165b20a3503SChristoph Lameter 11660ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11670ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1168b20a3503SChristoph Lameter { 1169b20a3503SChristoph Lameter return -ENOSYS; 1170b20a3503SChristoph Lameter } 117195a402c3SChristoph Lameter 1172d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 117395a402c3SChristoph Lameter { 117495a402c3SChristoph Lameter return NULL; 117595a402c3SChristoph Lameter } 1176b20a3503SChristoph Lameter #endif 1177b20a3503SChristoph Lameter 1178dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1179028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1180028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 11816ce3c4c0SChristoph Lameter { 11826ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 11836ce3c4c0SChristoph Lameter struct mempolicy *new; 11846ce3c4c0SChristoph Lameter unsigned long end; 11856ce3c4c0SChristoph Lameter int err; 11866ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 11876ce3c4c0SChristoph Lameter 1188b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 11896ce3c4c0SChristoph Lameter return -EINVAL; 119074c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 11916ce3c4c0SChristoph Lameter return -EPERM; 11926ce3c4c0SChristoph Lameter 11936ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 11946ce3c4c0SChristoph Lameter return -EINVAL; 11956ce3c4c0SChristoph Lameter 11966ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 11976ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 11986ce3c4c0SChristoph Lameter 11996ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12006ce3c4c0SChristoph Lameter end = start + len; 12016ce3c4c0SChristoph Lameter 12026ce3c4c0SChristoph Lameter if (end < start) 12036ce3c4c0SChristoph Lameter return -EINVAL; 12046ce3c4c0SChristoph Lameter if (end == start) 12056ce3c4c0SChristoph Lameter return 0; 12066ce3c4c0SChristoph Lameter 1207028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12086ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12096ce3c4c0SChristoph Lameter return PTR_ERR(new); 12106ce3c4c0SChristoph Lameter 1211b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1212b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1213b24f53a0SLee Schermerhorn 12146ce3c4c0SChristoph Lameter /* 12156ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12166ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12176ce3c4c0SChristoph Lameter */ 12186ce3c4c0SChristoph Lameter if (!new) 12196ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12206ce3c4c0SChristoph Lameter 1221028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1222028fec41SDavid Rientjes start, start + len, mode, mode_flags, 122300ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12246ce3c4c0SChristoph Lameter 12250aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12260aedadf9SChristoph Lameter 12270aedadf9SChristoph Lameter err = migrate_prep(); 12280aedadf9SChristoph Lameter if (err) 1229b05ca738SKOSAKI Motohiro goto mpol_out; 12300aedadf9SChristoph Lameter } 12314bfc4495SKAMEZAWA Hiroyuki { 12324bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 12334bfc4495SKAMEZAWA Hiroyuki if (scratch) { 12346ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 123558568d2aSMiao Xie task_lock(current); 12364bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 123758568d2aSMiao Xie task_unlock(current); 12384bfc4495SKAMEZAWA Hiroyuki if (err) 123958568d2aSMiao Xie up_write(&mm->mmap_sem); 12404bfc4495SKAMEZAWA Hiroyuki } else 12414bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 12424bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 12434bfc4495SKAMEZAWA Hiroyuki } 1244b05ca738SKOSAKI Motohiro if (err) 1245b05ca738SKOSAKI Motohiro goto mpol_out; 1246b05ca738SKOSAKI Motohiro 1247d05f0cdcSHugh Dickins err = queue_pages_range(mm, start, end, nmask, 12486ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1249d05f0cdcSHugh Dickins if (!err) 12509d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 12517e2ab150SChristoph Lameter 1252b24f53a0SLee Schermerhorn if (!err) { 1253b24f53a0SLee Schermerhorn int nr_failed = 0; 1254b24f53a0SLee Schermerhorn 1255cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1256b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1257d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1258d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1259cf608ac1SMinchan Kim if (nr_failed) 126074060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1261cf608ac1SMinchan Kim } 12626ce3c4c0SChristoph Lameter 1263b24f53a0SLee Schermerhorn if (nr_failed && (flags & MPOL_MF_STRICT)) 12646ce3c4c0SChristoph Lameter err = -EIO; 1265ab8a3e14SKOSAKI Motohiro } else 1266b0e5fd73SJoonsoo Kim putback_movable_pages(&pagelist); 1267b20a3503SChristoph Lameter 12686ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1269b05ca738SKOSAKI Motohiro mpol_out: 1270f0be3d32SLee Schermerhorn mpol_put(new); 12716ce3c4c0SChristoph Lameter return err; 12726ce3c4c0SChristoph Lameter } 12736ce3c4c0SChristoph Lameter 127439743889SChristoph Lameter /* 12758bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 12768bccd85fSChristoph Lameter */ 12778bccd85fSChristoph Lameter 12788bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 127939743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 12808bccd85fSChristoph Lameter unsigned long maxnode) 12818bccd85fSChristoph Lameter { 12828bccd85fSChristoph Lameter unsigned long k; 12838bccd85fSChristoph Lameter unsigned long nlongs; 12848bccd85fSChristoph Lameter unsigned long endmask; 12858bccd85fSChristoph Lameter 12868bccd85fSChristoph Lameter --maxnode; 12878bccd85fSChristoph Lameter nodes_clear(*nodes); 12888bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 12898bccd85fSChristoph Lameter return 0; 1290a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1291636f13c1SChris Wright return -EINVAL; 12928bccd85fSChristoph Lameter 12938bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 12948bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 12958bccd85fSChristoph Lameter endmask = ~0UL; 12968bccd85fSChristoph Lameter else 12978bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 12988bccd85fSChristoph Lameter 12998bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 13008bccd85fSChristoph Lameter if the non supported part is all zero. */ 13018bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 13028bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 13038bccd85fSChristoph Lameter return -EINVAL; 13048bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 13058bccd85fSChristoph Lameter unsigned long t; 13068bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 13078bccd85fSChristoph Lameter return -EFAULT; 13088bccd85fSChristoph Lameter if (k == nlongs - 1) { 13098bccd85fSChristoph Lameter if (t & endmask) 13108bccd85fSChristoph Lameter return -EINVAL; 13118bccd85fSChristoph Lameter } else if (t) 13128bccd85fSChristoph Lameter return -EINVAL; 13138bccd85fSChristoph Lameter } 13148bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 13158bccd85fSChristoph Lameter endmask = ~0UL; 13168bccd85fSChristoph Lameter } 13178bccd85fSChristoph Lameter 13188bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 13198bccd85fSChristoph Lameter return -EFAULT; 13208bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 13218bccd85fSChristoph Lameter return 0; 13228bccd85fSChristoph Lameter } 13238bccd85fSChristoph Lameter 13248bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 13258bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 13268bccd85fSChristoph Lameter nodemask_t *nodes) 13278bccd85fSChristoph Lameter { 13288bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 13298bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 13308bccd85fSChristoph Lameter 13318bccd85fSChristoph Lameter if (copy > nbytes) { 13328bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 13338bccd85fSChristoph Lameter return -EINVAL; 13348bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 13358bccd85fSChristoph Lameter return -EFAULT; 13368bccd85fSChristoph Lameter copy = nbytes; 13378bccd85fSChristoph Lameter } 13388bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 13398bccd85fSChristoph Lameter } 13408bccd85fSChristoph Lameter 1341938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1342f7f28ca9SRasmus Villemoes unsigned long, mode, const unsigned long __user *, nmask, 1343938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 13448bccd85fSChristoph Lameter { 13458bccd85fSChristoph Lameter nodemask_t nodes; 13468bccd85fSChristoph Lameter int err; 1347028fec41SDavid Rientjes unsigned short mode_flags; 13488bccd85fSChristoph Lameter 1349028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1350028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1351a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1352a3b51e01SDavid Rientjes return -EINVAL; 13534c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 13544c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 13554c50bc01SDavid Rientjes return -EINVAL; 13568bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13578bccd85fSChristoph Lameter if (err) 13588bccd85fSChristoph Lameter return err; 1359028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 13608bccd85fSChristoph Lameter } 13618bccd85fSChristoph Lameter 13628bccd85fSChristoph Lameter /* Set the process memory policy */ 136323c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1364938bb9f5SHeiko Carstens unsigned long, maxnode) 13658bccd85fSChristoph Lameter { 13668bccd85fSChristoph Lameter int err; 13678bccd85fSChristoph Lameter nodemask_t nodes; 1368028fec41SDavid Rientjes unsigned short flags; 13698bccd85fSChristoph Lameter 1370028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1371028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1372028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 13738bccd85fSChristoph Lameter return -EINVAL; 13744c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 13754c50bc01SDavid Rientjes return -EINVAL; 13768bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13778bccd85fSChristoph Lameter if (err) 13788bccd85fSChristoph Lameter return err; 1379028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 13808bccd85fSChristoph Lameter } 13818bccd85fSChristoph Lameter 1382938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1383938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1384938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 138539743889SChristoph Lameter { 1386c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 1387596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 138839743889SChristoph Lameter struct task_struct *task; 138939743889SChristoph Lameter nodemask_t task_nodes; 139039743889SChristoph Lameter int err; 1391596d7cfaSKOSAKI Motohiro nodemask_t *old; 1392596d7cfaSKOSAKI Motohiro nodemask_t *new; 1393596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 139439743889SChristoph Lameter 1395596d7cfaSKOSAKI Motohiro if (!scratch) 1396596d7cfaSKOSAKI Motohiro return -ENOMEM; 139739743889SChristoph Lameter 1398596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1399596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1400596d7cfaSKOSAKI Motohiro 1401596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 140239743889SChristoph Lameter if (err) 1403596d7cfaSKOSAKI Motohiro goto out; 1404596d7cfaSKOSAKI Motohiro 1405596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1406596d7cfaSKOSAKI Motohiro if (err) 1407596d7cfaSKOSAKI Motohiro goto out; 140839743889SChristoph Lameter 140939743889SChristoph Lameter /* Find the mm_struct */ 141055cfaa3cSZeng Zhaoming rcu_read_lock(); 1411228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 141239743889SChristoph Lameter if (!task) { 141355cfaa3cSZeng Zhaoming rcu_read_unlock(); 1414596d7cfaSKOSAKI Motohiro err = -ESRCH; 1415596d7cfaSKOSAKI Motohiro goto out; 141639743889SChristoph Lameter } 14173268c63eSChristoph Lameter get_task_struct(task); 141839743889SChristoph Lameter 1419596d7cfaSKOSAKI Motohiro err = -EINVAL; 142039743889SChristoph Lameter 142139743889SChristoph Lameter /* 142239743889SChristoph Lameter * Check if this process has the right to modify the specified 142339743889SChristoph Lameter * process. The right exists if the process has administrative 14247f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 142539743889SChristoph Lameter * userid as the target process. 142639743889SChristoph Lameter */ 1427c69e8d9cSDavid Howells tcred = __task_cred(task); 1428b38a86ebSEric W. Biederman if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1429b38a86ebSEric W. Biederman !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 143074c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1431c69e8d9cSDavid Howells rcu_read_unlock(); 143239743889SChristoph Lameter err = -EPERM; 14333268c63eSChristoph Lameter goto out_put; 143439743889SChristoph Lameter } 1435c69e8d9cSDavid Howells rcu_read_unlock(); 143639743889SChristoph Lameter 143739743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 143839743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1439596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 144039743889SChristoph Lameter err = -EPERM; 14413268c63eSChristoph Lameter goto out_put; 144239743889SChristoph Lameter } 144339743889SChristoph Lameter 144401f13bd6SLai Jiangshan if (!nodes_subset(*new, node_states[N_MEMORY])) { 14453b42d28bSChristoph Lameter err = -EINVAL; 14463268c63eSChristoph Lameter goto out_put; 14473b42d28bSChristoph Lameter } 14483b42d28bSChristoph Lameter 144986c3a764SDavid Quigley err = security_task_movememory(task); 145086c3a764SDavid Quigley if (err) 14513268c63eSChristoph Lameter goto out_put; 145286c3a764SDavid Quigley 14533268c63eSChristoph Lameter mm = get_task_mm(task); 14543268c63eSChristoph Lameter put_task_struct(task); 1455f2a9ef88SSasha Levin 1456f2a9ef88SSasha Levin if (!mm) { 1457f2a9ef88SSasha Levin err = -EINVAL; 1458f2a9ef88SSasha Levin goto out; 1459f2a9ef88SSasha Levin } 1460f2a9ef88SSasha Levin 1461596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 146274c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 14633268c63eSChristoph Lameter 146439743889SChristoph Lameter mmput(mm); 14653268c63eSChristoph Lameter out: 1466596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1467596d7cfaSKOSAKI Motohiro 146839743889SChristoph Lameter return err; 14693268c63eSChristoph Lameter 14703268c63eSChristoph Lameter out_put: 14713268c63eSChristoph Lameter put_task_struct(task); 14723268c63eSChristoph Lameter goto out; 14733268c63eSChristoph Lameter 147439743889SChristoph Lameter } 147539743889SChristoph Lameter 147639743889SChristoph Lameter 14778bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1478938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1479938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1480938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 14818bccd85fSChristoph Lameter { 1482dbcb0f19SAdrian Bunk int err; 1483dbcb0f19SAdrian Bunk int uninitialized_var(pval); 14848bccd85fSChristoph Lameter nodemask_t nodes; 14858bccd85fSChristoph Lameter 14868bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 14878bccd85fSChristoph Lameter return -EINVAL; 14888bccd85fSChristoph Lameter 14898bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 14908bccd85fSChristoph Lameter 14918bccd85fSChristoph Lameter if (err) 14928bccd85fSChristoph Lameter return err; 14938bccd85fSChristoph Lameter 14948bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 14958bccd85fSChristoph Lameter return -EFAULT; 14968bccd85fSChristoph Lameter 14978bccd85fSChristoph Lameter if (nmask) 14988bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 14998bccd85fSChristoph Lameter 15008bccd85fSChristoph Lameter return err; 15018bccd85fSChristoph Lameter } 15028bccd85fSChristoph Lameter 15031da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 15041da177e4SLinus Torvalds 1505c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1506c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1507c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1508c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 15091da177e4SLinus Torvalds { 15101da177e4SLinus Torvalds long err; 15111da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15121da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15131da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15141da177e4SLinus Torvalds 15151da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15161da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15171da177e4SLinus Torvalds 15181da177e4SLinus Torvalds if (nmask) 15191da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 15201da177e4SLinus Torvalds 15211da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 15221da177e4SLinus Torvalds 15231da177e4SLinus Torvalds if (!err && nmask) { 15242bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 15252bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 15262bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 15271da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 15281da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 15291da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 15301da177e4SLinus Torvalds } 15311da177e4SLinus Torvalds 15321da177e4SLinus Torvalds return err; 15331da177e4SLinus Torvalds } 15341da177e4SLinus Torvalds 1535c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1536c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 15371da177e4SLinus Torvalds { 15381da177e4SLinus Torvalds long err = 0; 15391da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15401da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15411da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15421da177e4SLinus Torvalds 15431da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15441da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15451da177e4SLinus Torvalds 15461da177e4SLinus Torvalds if (nmask) { 15471da177e4SLinus Torvalds err = compat_get_bitmap(bm, nmask, nr_bits); 15481da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 15491da177e4SLinus Torvalds err |= copy_to_user(nm, bm, alloc_size); 15501da177e4SLinus Torvalds } 15511da177e4SLinus Torvalds 15521da177e4SLinus Torvalds if (err) 15531da177e4SLinus Torvalds return -EFAULT; 15541da177e4SLinus Torvalds 15551da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 15561da177e4SLinus Torvalds } 15571da177e4SLinus Torvalds 1558c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1559c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1560c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 15611da177e4SLinus Torvalds { 15621da177e4SLinus Torvalds long err = 0; 15631da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15641da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1565dfcd3c0dSAndi Kleen nodemask_t bm; 15661da177e4SLinus Torvalds 15671da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15681da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15691da177e4SLinus Torvalds 15701da177e4SLinus Torvalds if (nmask) { 1571dfcd3c0dSAndi Kleen err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 15721da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 1573dfcd3c0dSAndi Kleen err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 15741da177e4SLinus Torvalds } 15751da177e4SLinus Torvalds 15761da177e4SLinus Torvalds if (err) 15771da177e4SLinus Torvalds return -EFAULT; 15781da177e4SLinus Torvalds 15791da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 15801da177e4SLinus Torvalds } 15811da177e4SLinus Torvalds 15821da177e4SLinus Torvalds #endif 15831da177e4SLinus Torvalds 158474d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 158574d2c3a0SOleg Nesterov unsigned long addr) 15861da177e4SLinus Torvalds { 15878d90274bSOleg Nesterov struct mempolicy *pol = NULL; 15881da177e4SLinus Torvalds 15891da177e4SLinus Torvalds if (vma) { 1590480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 15918d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 159200442ad0SMel Gorman } else if (vma->vm_policy) { 15931da177e4SLinus Torvalds pol = vma->vm_policy; 159400442ad0SMel Gorman 159500442ad0SMel Gorman /* 159600442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 159700442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 159800442ad0SMel Gorman * count on these policies which will be dropped by 159900442ad0SMel Gorman * mpol_cond_put() later 160000442ad0SMel Gorman */ 160100442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 160200442ad0SMel Gorman mpol_get(pol); 160300442ad0SMel Gorman } 16041da177e4SLinus Torvalds } 1605f15ca78eSOleg Nesterov 160674d2c3a0SOleg Nesterov return pol; 160774d2c3a0SOleg Nesterov } 160874d2c3a0SOleg Nesterov 160974d2c3a0SOleg Nesterov /* 1610dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 161174d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 161274d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 161374d2c3a0SOleg Nesterov * 161474d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1615dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 161674d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 161774d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 161874d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 161974d2c3a0SOleg Nesterov * extra reference for shared policies. 162074d2c3a0SOleg Nesterov */ 1621dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1622dd6eecb9SOleg Nesterov unsigned long addr) 162374d2c3a0SOleg Nesterov { 162474d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 162574d2c3a0SOleg Nesterov 16268d90274bSOleg Nesterov if (!pol) 1627dd6eecb9SOleg Nesterov pol = get_task_policy(current); 16288d90274bSOleg Nesterov 16291da177e4SLinus Torvalds return pol; 16301da177e4SLinus Torvalds } 16311da177e4SLinus Torvalds 16326b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1633fc314724SMel Gorman { 16346b6482bbSOleg Nesterov struct mempolicy *pol; 1635f15ca78eSOleg Nesterov 1636fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1637fc314724SMel Gorman bool ret = false; 1638fc314724SMel Gorman 1639fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1640fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1641fc314724SMel Gorman ret = true; 1642fc314724SMel Gorman mpol_cond_put(pol); 1643fc314724SMel Gorman 1644fc314724SMel Gorman return ret; 16458d90274bSOleg Nesterov } 16468d90274bSOleg Nesterov 1647fc314724SMel Gorman pol = vma->vm_policy; 16488d90274bSOleg Nesterov if (!pol) 16496b6482bbSOleg Nesterov pol = get_task_policy(current); 1650fc314724SMel Gorman 1651fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1652fc314724SMel Gorman } 1653fc314724SMel Gorman 1654d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1655d3eb1570SLai Jiangshan { 1656d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1657d3eb1570SLai Jiangshan 1658d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1659d3eb1570SLai Jiangshan 1660d3eb1570SLai Jiangshan /* 1661d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1662d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1663d3eb1570SLai Jiangshan * 1664d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1665d3eb1570SLai Jiangshan * so if the following test faile, it implies 1666d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1667d3eb1570SLai Jiangshan */ 1668d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1669d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1670d3eb1570SLai Jiangshan 1671d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1672d3eb1570SLai Jiangshan } 1673d3eb1570SLai Jiangshan 167452cd3b07SLee Schermerhorn /* 167552cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 167652cd3b07SLee Schermerhorn * page allocation 167752cd3b07SLee Schermerhorn */ 167852cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 167919770b32SMel Gorman { 168019770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 168145c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1682d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 168319770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 168419770b32SMel Gorman return &policy->v.nodes; 168519770b32SMel Gorman 168619770b32SMel Gorman return NULL; 168719770b32SMel Gorman } 168819770b32SMel Gorman 168952cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */ 16902f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, 16912f5f9486SAndi Kleen int nd) 16921da177e4SLinus Torvalds { 169345c4745aSLee Schermerhorn switch (policy->mode) { 16941da177e4SLinus Torvalds case MPOL_PREFERRED: 1695fc36b8d3SLee Schermerhorn if (!(policy->flags & MPOL_F_LOCAL)) 16961da177e4SLinus Torvalds nd = policy->v.preferred_node; 16971da177e4SLinus Torvalds break; 16981da177e4SLinus Torvalds case MPOL_BIND: 169919770b32SMel Gorman /* 170052cd3b07SLee Schermerhorn * Normally, MPOL_BIND allocations are node-local within the 170152cd3b07SLee Schermerhorn * allowed nodemask. However, if __GFP_THISNODE is set and the 17026eb27e1fSBob Liu * current node isn't part of the mask, we use the zonelist for 170352cd3b07SLee Schermerhorn * the first node in the mask instead. 170419770b32SMel Gorman */ 170519770b32SMel Gorman if (unlikely(gfp & __GFP_THISNODE) && 170619770b32SMel Gorman unlikely(!node_isset(nd, policy->v.nodes))) 170719770b32SMel Gorman nd = first_node(policy->v.nodes); 170819770b32SMel Gorman break; 17091da177e4SLinus Torvalds default: 17101da177e4SLinus Torvalds BUG(); 17111da177e4SLinus Torvalds } 17120e88460dSMel Gorman return node_zonelist(nd, gfp); 17131da177e4SLinus Torvalds } 17141da177e4SLinus Torvalds 17151da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 17161da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 17171da177e4SLinus Torvalds { 17181da177e4SLinus Torvalds unsigned nid, next; 17191da177e4SLinus Torvalds struct task_struct *me = current; 17201da177e4SLinus Torvalds 17211da177e4SLinus Torvalds nid = me->il_next; 1722dfcd3c0dSAndi Kleen next = next_node(nid, policy->v.nodes); 17231da177e4SLinus Torvalds if (next >= MAX_NUMNODES) 1724dfcd3c0dSAndi Kleen next = first_node(policy->v.nodes); 1725f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 17261da177e4SLinus Torvalds me->il_next = next; 17271da177e4SLinus Torvalds return nid; 17281da177e4SLinus Torvalds } 17291da177e4SLinus Torvalds 1730dc85da15SChristoph Lameter /* 1731dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1732dc85da15SChristoph Lameter * next slab entry. 1733dc85da15SChristoph Lameter */ 17342a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1735dc85da15SChristoph Lameter { 1736e7b691b0SAndi Kleen struct mempolicy *policy; 17372a389610SDavid Rientjes int node = numa_mem_id(); 1738e7b691b0SAndi Kleen 1739e7b691b0SAndi Kleen if (in_interrupt()) 17402a389610SDavid Rientjes return node; 1741e7b691b0SAndi Kleen 1742e7b691b0SAndi Kleen policy = current->mempolicy; 1743fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 17442a389610SDavid Rientjes return node; 1745765c4507SChristoph Lameter 1746bea904d5SLee Schermerhorn switch (policy->mode) { 1747bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1748fc36b8d3SLee Schermerhorn /* 1749fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1750fc36b8d3SLee Schermerhorn */ 1751bea904d5SLee Schermerhorn return policy->v.preferred_node; 1752bea904d5SLee Schermerhorn 1753dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1754dc85da15SChristoph Lameter return interleave_nodes(policy); 1755dc85da15SChristoph Lameter 1756dd1a239fSMel Gorman case MPOL_BIND: { 1757dc85da15SChristoph Lameter /* 1758dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1759dc85da15SChristoph Lameter * first node. 1760dc85da15SChristoph Lameter */ 176119770b32SMel Gorman struct zonelist *zonelist; 176219770b32SMel Gorman struct zone *zone; 176319770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 17642a389610SDavid Rientjes zonelist = &NODE_DATA(node)->node_zonelists[0]; 176519770b32SMel Gorman (void)first_zones_zonelist(zonelist, highest_zoneidx, 176619770b32SMel Gorman &policy->v.nodes, 176719770b32SMel Gorman &zone); 17682a389610SDavid Rientjes return zone ? zone->node : node; 1769dd1a239fSMel Gorman } 1770dc85da15SChristoph Lameter 1771dc85da15SChristoph Lameter default: 1772bea904d5SLee Schermerhorn BUG(); 1773dc85da15SChristoph Lameter } 1774dc85da15SChristoph Lameter } 1775dc85da15SChristoph Lameter 17761da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */ 17771da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 17781da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long off) 17791da177e4SLinus Torvalds { 1780dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1781f5b087b5SDavid Rientjes unsigned target; 17821da177e4SLinus Torvalds int c; 1783b76ac7e7SJianguo Wu int nid = NUMA_NO_NODE; 17841da177e4SLinus Torvalds 1785f5b087b5SDavid Rientjes if (!nnodes) 1786f5b087b5SDavid Rientjes return numa_node_id(); 1787f5b087b5SDavid Rientjes target = (unsigned int)off % nnodes; 17881da177e4SLinus Torvalds c = 0; 17891da177e4SLinus Torvalds do { 1790dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 17911da177e4SLinus Torvalds c++; 17921da177e4SLinus Torvalds } while (c <= target); 17931da177e4SLinus Torvalds return nid; 17941da177e4SLinus Torvalds } 17951da177e4SLinus Torvalds 17965da7ca86SChristoph Lameter /* Determine a node number for interleave */ 17975da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 17985da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 17995da7ca86SChristoph Lameter { 18005da7ca86SChristoph Lameter if (vma) { 18015da7ca86SChristoph Lameter unsigned long off; 18025da7ca86SChristoph Lameter 18033b98b087SNishanth Aravamudan /* 18043b98b087SNishanth Aravamudan * for small pages, there is no difference between 18053b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 18063b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 18073b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 18083b98b087SNishanth Aravamudan * a useful offset. 18093b98b087SNishanth Aravamudan */ 18103b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 18113b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 18125da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 18135da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 18145da7ca86SChristoph Lameter } else 18155da7ca86SChristoph Lameter return interleave_nodes(pol); 18165da7ca86SChristoph Lameter } 18175da7ca86SChristoph Lameter 1818778d3b0fSMichal Hocko /* 1819778d3b0fSMichal Hocko * Return the bit number of a random bit set in the nodemask. 1820b76ac7e7SJianguo Wu * (returns NUMA_NO_NODE if nodemask is empty) 1821778d3b0fSMichal Hocko */ 1822778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp) 1823778d3b0fSMichal Hocko { 1824b76ac7e7SJianguo Wu int w, bit = NUMA_NO_NODE; 1825778d3b0fSMichal Hocko 1826778d3b0fSMichal Hocko w = nodes_weight(*maskp); 1827778d3b0fSMichal Hocko if (w) 1828778d3b0fSMichal Hocko bit = bitmap_ord_to_pos(maskp->bits, 1829778d3b0fSMichal Hocko get_random_int() % w, MAX_NUMNODES); 1830778d3b0fSMichal Hocko return bit; 1831778d3b0fSMichal Hocko } 1832778d3b0fSMichal Hocko 183300ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1834480eccf9SLee Schermerhorn /* 1835480eccf9SLee Schermerhorn * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1836b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1837b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1838b46e14acSFabian Frederick * @gfp_flags: for requested zone 1839b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1840b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1841480eccf9SLee Schermerhorn * 184252cd3b07SLee Schermerhorn * Returns a zonelist suitable for a huge page allocation and a pointer 184352cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 184452cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 184552cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1846c0ff7453SMiao Xie * 1847d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 1848480eccf9SLee Schermerhorn */ 1849396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 185019770b32SMel Gorman gfp_t gfp_flags, struct mempolicy **mpol, 185119770b32SMel Gorman nodemask_t **nodemask) 18525da7ca86SChristoph Lameter { 1853480eccf9SLee Schermerhorn struct zonelist *zl; 18545da7ca86SChristoph Lameter 1855dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 185619770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 18575da7ca86SChristoph Lameter 185852cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 185952cd3b07SLee Schermerhorn zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1860a5516438SAndi Kleen huge_page_shift(hstate_vma(vma))), gfp_flags); 186152cd3b07SLee Schermerhorn } else { 18622f5f9486SAndi Kleen zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); 186352cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 186452cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1865480eccf9SLee Schermerhorn } 1866480eccf9SLee Schermerhorn return zl; 18675da7ca86SChristoph Lameter } 186806808b08SLee Schermerhorn 186906808b08SLee Schermerhorn /* 187006808b08SLee Schermerhorn * init_nodemask_of_mempolicy 187106808b08SLee Schermerhorn * 187206808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 187306808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 187406808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 187506808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 187606808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 187706808b08SLee Schermerhorn * of non-default mempolicy. 187806808b08SLee Schermerhorn * 187906808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 188006808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 188106808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 188206808b08SLee Schermerhorn * 188306808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 188406808b08SLee Schermerhorn */ 188506808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 188606808b08SLee Schermerhorn { 188706808b08SLee Schermerhorn struct mempolicy *mempolicy; 188806808b08SLee Schermerhorn int nid; 188906808b08SLee Schermerhorn 189006808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 189106808b08SLee Schermerhorn return false; 189206808b08SLee Schermerhorn 1893c0ff7453SMiao Xie task_lock(current); 189406808b08SLee Schermerhorn mempolicy = current->mempolicy; 189506808b08SLee Schermerhorn switch (mempolicy->mode) { 189606808b08SLee Schermerhorn case MPOL_PREFERRED: 189706808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 189806808b08SLee Schermerhorn nid = numa_node_id(); 189906808b08SLee Schermerhorn else 190006808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 190106808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 190206808b08SLee Schermerhorn break; 190306808b08SLee Schermerhorn 190406808b08SLee Schermerhorn case MPOL_BIND: 190506808b08SLee Schermerhorn /* Fall through */ 190606808b08SLee Schermerhorn case MPOL_INTERLEAVE: 190706808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 190806808b08SLee Schermerhorn break; 190906808b08SLee Schermerhorn 191006808b08SLee Schermerhorn default: 191106808b08SLee Schermerhorn BUG(); 191206808b08SLee Schermerhorn } 1913c0ff7453SMiao Xie task_unlock(current); 191406808b08SLee Schermerhorn 191506808b08SLee Schermerhorn return true; 191606808b08SLee Schermerhorn } 191700ac59adSChen, Kenneth W #endif 19185da7ca86SChristoph Lameter 19196f48d0ebSDavid Rientjes /* 19206f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 19216f48d0ebSDavid Rientjes * 19226f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 19236f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 19246f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 19256f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 19266f48d0ebSDavid Rientjes * 19276f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 19286f48d0ebSDavid Rientjes */ 19296f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 19306f48d0ebSDavid Rientjes const nodemask_t *mask) 19316f48d0ebSDavid Rientjes { 19326f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 19336f48d0ebSDavid Rientjes bool ret = true; 19346f48d0ebSDavid Rientjes 19356f48d0ebSDavid Rientjes if (!mask) 19366f48d0ebSDavid Rientjes return ret; 19376f48d0ebSDavid Rientjes task_lock(tsk); 19386f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 19396f48d0ebSDavid Rientjes if (!mempolicy) 19406f48d0ebSDavid Rientjes goto out; 19416f48d0ebSDavid Rientjes 19426f48d0ebSDavid Rientjes switch (mempolicy->mode) { 19436f48d0ebSDavid Rientjes case MPOL_PREFERRED: 19446f48d0ebSDavid Rientjes /* 19456f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 19466f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 19476f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 19486f48d0ebSDavid Rientjes * nodes in mask. 19496f48d0ebSDavid Rientjes */ 19506f48d0ebSDavid Rientjes break; 19516f48d0ebSDavid Rientjes case MPOL_BIND: 19526f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 19536f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 19546f48d0ebSDavid Rientjes break; 19556f48d0ebSDavid Rientjes default: 19566f48d0ebSDavid Rientjes BUG(); 19576f48d0ebSDavid Rientjes } 19586f48d0ebSDavid Rientjes out: 19596f48d0ebSDavid Rientjes task_unlock(tsk); 19606f48d0ebSDavid Rientjes return ret; 19616f48d0ebSDavid Rientjes } 19626f48d0ebSDavid Rientjes 19631da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 19641da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1965662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1966662f3a0bSAndi Kleen unsigned nid) 19671da177e4SLinus Torvalds { 19681da177e4SLinus Torvalds struct zonelist *zl; 19691da177e4SLinus Torvalds struct page *page; 19701da177e4SLinus Torvalds 19710e88460dSMel Gorman zl = node_zonelist(nid, gfp); 19721da177e4SLinus Torvalds page = __alloc_pages(gfp, order, zl); 1973dd1a239fSMel Gorman if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) 1974ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 19751da177e4SLinus Torvalds return page; 19761da177e4SLinus Torvalds } 19771da177e4SLinus Torvalds 19781da177e4SLinus Torvalds /** 19790bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 19801da177e4SLinus Torvalds * 19811da177e4SLinus Torvalds * @gfp: 19821da177e4SLinus Torvalds * %GFP_USER user allocation. 19831da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 19841da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 19851da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 19861da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 19871da177e4SLinus Torvalds * 19880bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 19891da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 19901da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 1991*be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 1992*be97a41bSVlastimil Babka * @hugepage: for hugepages try only the preferred node if possible 19931da177e4SLinus Torvalds * 19941da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 19951da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 19961da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 19971da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 1998*be97a41bSVlastimil Babka * all allocations for pages that will be mapped into user space. Returns 1999*be97a41bSVlastimil Babka * NULL when no page can be allocated. 20001da177e4SLinus Torvalds */ 20011da177e4SLinus Torvalds struct page * 20020bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 2003*be97a41bSVlastimil Babka unsigned long addr, int node, bool hugepage) 20041da177e4SLinus Torvalds { 2005cc9a6c87SMel Gorman struct mempolicy *pol; 2006c0ff7453SMiao Xie struct page *page; 2007cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 2008*be97a41bSVlastimil Babka struct zonelist *zl; 2009*be97a41bSVlastimil Babka nodemask_t *nmask; 20101da177e4SLinus Torvalds 2011cc9a6c87SMel Gorman retry_cpuset: 2012dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2013d26914d1SMel Gorman cpuset_mems_cookie = read_mems_allowed_begin(); 2014cc9a6c87SMel Gorman 2015*be97a41bSVlastimil Babka if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage && 2016*be97a41bSVlastimil Babka pol->mode != MPOL_INTERLEAVE)) { 2017*be97a41bSVlastimil Babka /* 2018*be97a41bSVlastimil Babka * For hugepage allocation and non-interleave policy which 2019*be97a41bSVlastimil Babka * allows the current node, we only try to allocate from the 2020*be97a41bSVlastimil Babka * current node and don't fall back to other nodes, as the 2021*be97a41bSVlastimil Babka * cost of remote accesses would likely offset THP benefits. 2022*be97a41bSVlastimil Babka * 2023*be97a41bSVlastimil Babka * If the policy is interleave, or does not allow the current 2024*be97a41bSVlastimil Babka * node in its nodemask, we allocate the standard way. 2025*be97a41bSVlastimil Babka */ 2026*be97a41bSVlastimil Babka nmask = policy_nodemask(gfp, pol); 2027*be97a41bSVlastimil Babka if (!nmask || node_isset(node, *nmask)) { 2028*be97a41bSVlastimil Babka mpol_cond_put(pol); 2029*be97a41bSVlastimil Babka page = alloc_pages_exact_node(node, gfp, order); 2030*be97a41bSVlastimil Babka goto out; 2031*be97a41bSVlastimil Babka } 2032*be97a41bSVlastimil Babka } 2033*be97a41bSVlastimil Babka 2034*be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 20351da177e4SLinus Torvalds unsigned nid; 20365da7ca86SChristoph Lameter 20378eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 203852cd3b07SLee Schermerhorn mpol_cond_put(pol); 20390bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2040*be97a41bSVlastimil Babka goto out; 20411da177e4SLinus Torvalds } 20421da177e4SLinus Torvalds 2043077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 2044*be97a41bSVlastimil Babka zl = policy_zonelist(gfp, pol, node); 2045077fcf11SAneesh Kumar K.V mpol_cond_put(pol); 2046*be97a41bSVlastimil Babka page = __alloc_pages_nodemask(gfp, order, zl, nmask); 2047*be97a41bSVlastimil Babka out: 2048*be97a41bSVlastimil Babka if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2049077fcf11SAneesh Kumar K.V goto retry_cpuset; 2050077fcf11SAneesh Kumar K.V return page; 2051077fcf11SAneesh Kumar K.V } 2052077fcf11SAneesh Kumar K.V 20531da177e4SLinus Torvalds /** 20541da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 20551da177e4SLinus Torvalds * 20561da177e4SLinus Torvalds * @gfp: 20571da177e4SLinus Torvalds * %GFP_USER user allocation, 20581da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 20591da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 20601da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 20611da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 20621da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 20631da177e4SLinus Torvalds * 20641da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 20651da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 20661da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 20671da177e4SLinus Torvalds * 2068cf2a473cSPaul Jackson * Don't call cpuset_update_task_memory_state() unless 20691da177e4SLinus Torvalds * 1) it's ok to take cpuset_sem (can WAIT), and 20701da177e4SLinus Torvalds * 2) allocating for current task (not interrupt). 20711da177e4SLinus Torvalds */ 2072dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 20731da177e4SLinus Torvalds { 20748d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2075c0ff7453SMiao Xie struct page *page; 2076cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 20771da177e4SLinus Torvalds 20788d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 20798d90274bSOleg Nesterov pol = get_task_policy(current); 208052cd3b07SLee Schermerhorn 2081cc9a6c87SMel Gorman retry_cpuset: 2082d26914d1SMel Gorman cpuset_mems_cookie = read_mems_allowed_begin(); 2083cc9a6c87SMel Gorman 208452cd3b07SLee Schermerhorn /* 208552cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 208652cd3b07SLee Schermerhorn * nor system default_policy 208752cd3b07SLee Schermerhorn */ 208845c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2089c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2090c0ff7453SMiao Xie else 2091c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 20925c4b4be3SAndi Kleen policy_zonelist(gfp, pol, numa_node_id()), 20935c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2094cc9a6c87SMel Gorman 2095d26914d1SMel Gorman if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2096cc9a6c87SMel Gorman goto retry_cpuset; 2097cc9a6c87SMel Gorman 2098c0ff7453SMiao Xie return page; 20991da177e4SLinus Torvalds } 21001da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 21011da177e4SLinus Torvalds 2102ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2103ef0855d3SOleg Nesterov { 2104ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2105ef0855d3SOleg Nesterov 2106ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2107ef0855d3SOleg Nesterov return PTR_ERR(pol); 2108ef0855d3SOleg Nesterov dst->vm_policy = pol; 2109ef0855d3SOleg Nesterov return 0; 2110ef0855d3SOleg Nesterov } 2111ef0855d3SOleg Nesterov 21124225399aSPaul Jackson /* 2113846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 21144225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 21154225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 21164225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 21174225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2118708c1bbcSMiao Xie * 2119708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2120708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 21214225399aSPaul Jackson */ 21224225399aSPaul Jackson 2123846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2124846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 21251da177e4SLinus Torvalds { 21261da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 21271da177e4SLinus Torvalds 21281da177e4SLinus Torvalds if (!new) 21291da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2130708c1bbcSMiao Xie 2131708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2132708c1bbcSMiao Xie if (old == current->mempolicy) { 2133708c1bbcSMiao Xie task_lock(current); 2134708c1bbcSMiao Xie *new = *old; 2135708c1bbcSMiao Xie task_unlock(current); 2136708c1bbcSMiao Xie } else 2137708c1bbcSMiao Xie *new = *old; 2138708c1bbcSMiao Xie 21394225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 21404225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2141708c1bbcSMiao Xie if (new->flags & MPOL_F_REBINDING) 2142708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); 2143708c1bbcSMiao Xie else 2144708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 21454225399aSPaul Jackson } 21461da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 21471da177e4SLinus Torvalds return new; 21481da177e4SLinus Torvalds } 21491da177e4SLinus Torvalds 21501da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2151fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 21521da177e4SLinus Torvalds { 21531da177e4SLinus Torvalds if (!a || !b) 2154fcfb4dccSKOSAKI Motohiro return false; 215545c4745aSLee Schermerhorn if (a->mode != b->mode) 2156fcfb4dccSKOSAKI Motohiro return false; 215719800502SBob Liu if (a->flags != b->flags) 2158fcfb4dccSKOSAKI Motohiro return false; 215919800502SBob Liu if (mpol_store_user_nodemask(a)) 216019800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2161fcfb4dccSKOSAKI Motohiro return false; 216219800502SBob Liu 216345c4745aSLee Schermerhorn switch (a->mode) { 216419770b32SMel Gorman case MPOL_BIND: 216519770b32SMel Gorman /* Fall through */ 21661da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2167fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 21681da177e4SLinus Torvalds case MPOL_PREFERRED: 216975719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 21701da177e4SLinus Torvalds default: 21711da177e4SLinus Torvalds BUG(); 2172fcfb4dccSKOSAKI Motohiro return false; 21731da177e4SLinus Torvalds } 21741da177e4SLinus Torvalds } 21751da177e4SLinus Torvalds 21761da177e4SLinus Torvalds /* 21771da177e4SLinus Torvalds * Shared memory backing store policy support. 21781da177e4SLinus Torvalds * 21791da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 21801da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 21811da177e4SLinus Torvalds * They are protected by the sp->lock spinlock, which should be held 21821da177e4SLinus Torvalds * for any accesses to the tree. 21831da177e4SLinus Torvalds */ 21841da177e4SLinus Torvalds 21851da177e4SLinus Torvalds /* lookup first element intersecting start-end */ 218642288fe3SMel Gorman /* Caller holds sp->lock */ 21871da177e4SLinus Torvalds static struct sp_node * 21881da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 21891da177e4SLinus Torvalds { 21901da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 21911da177e4SLinus Torvalds 21921da177e4SLinus Torvalds while (n) { 21931da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 21941da177e4SLinus Torvalds 21951da177e4SLinus Torvalds if (start >= p->end) 21961da177e4SLinus Torvalds n = n->rb_right; 21971da177e4SLinus Torvalds else if (end <= p->start) 21981da177e4SLinus Torvalds n = n->rb_left; 21991da177e4SLinus Torvalds else 22001da177e4SLinus Torvalds break; 22011da177e4SLinus Torvalds } 22021da177e4SLinus Torvalds if (!n) 22031da177e4SLinus Torvalds return NULL; 22041da177e4SLinus Torvalds for (;;) { 22051da177e4SLinus Torvalds struct sp_node *w = NULL; 22061da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 22071da177e4SLinus Torvalds if (!prev) 22081da177e4SLinus Torvalds break; 22091da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 22101da177e4SLinus Torvalds if (w->end <= start) 22111da177e4SLinus Torvalds break; 22121da177e4SLinus Torvalds n = prev; 22131da177e4SLinus Torvalds } 22141da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 22151da177e4SLinus Torvalds } 22161da177e4SLinus Torvalds 22171da177e4SLinus Torvalds /* Insert a new shared policy into the list. */ 22181da177e4SLinus Torvalds /* Caller holds sp->lock */ 22191da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 22201da177e4SLinus Torvalds { 22211da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 22221da177e4SLinus Torvalds struct rb_node *parent = NULL; 22231da177e4SLinus Torvalds struct sp_node *nd; 22241da177e4SLinus Torvalds 22251da177e4SLinus Torvalds while (*p) { 22261da177e4SLinus Torvalds parent = *p; 22271da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 22281da177e4SLinus Torvalds if (new->start < nd->start) 22291da177e4SLinus Torvalds p = &(*p)->rb_left; 22301da177e4SLinus Torvalds else if (new->end > nd->end) 22311da177e4SLinus Torvalds p = &(*p)->rb_right; 22321da177e4SLinus Torvalds else 22331da177e4SLinus Torvalds BUG(); 22341da177e4SLinus Torvalds } 22351da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 22361da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2237140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 223845c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 22391da177e4SLinus Torvalds } 22401da177e4SLinus Torvalds 22411da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 22421da177e4SLinus Torvalds struct mempolicy * 22431da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 22441da177e4SLinus Torvalds { 22451da177e4SLinus Torvalds struct mempolicy *pol = NULL; 22461da177e4SLinus Torvalds struct sp_node *sn; 22471da177e4SLinus Torvalds 22481da177e4SLinus Torvalds if (!sp->root.rb_node) 22491da177e4SLinus Torvalds return NULL; 225042288fe3SMel Gorman spin_lock(&sp->lock); 22511da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 22521da177e4SLinus Torvalds if (sn) { 22531da177e4SLinus Torvalds mpol_get(sn->policy); 22541da177e4SLinus Torvalds pol = sn->policy; 22551da177e4SLinus Torvalds } 225642288fe3SMel Gorman spin_unlock(&sp->lock); 22571da177e4SLinus Torvalds return pol; 22581da177e4SLinus Torvalds } 22591da177e4SLinus Torvalds 226063f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 226163f74ca2SKOSAKI Motohiro { 226263f74ca2SKOSAKI Motohiro mpol_put(n->policy); 226363f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 226463f74ca2SKOSAKI Motohiro } 226563f74ca2SKOSAKI Motohiro 2266771fb4d8SLee Schermerhorn /** 2267771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2268771fb4d8SLee Schermerhorn * 2269b46e14acSFabian Frederick * @page: page to be checked 2270b46e14acSFabian Frederick * @vma: vm area where page mapped 2271b46e14acSFabian Frederick * @addr: virtual address where page mapped 2272771fb4d8SLee Schermerhorn * 2273771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2274771fb4d8SLee Schermerhorn * node id. 2275771fb4d8SLee Schermerhorn * 2276771fb4d8SLee Schermerhorn * Returns: 2277771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2278771fb4d8SLee Schermerhorn * node - node id where the page should be 2279771fb4d8SLee Schermerhorn * 2280771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2281771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2282771fb4d8SLee Schermerhorn */ 2283771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2284771fb4d8SLee Schermerhorn { 2285771fb4d8SLee Schermerhorn struct mempolicy *pol; 2286771fb4d8SLee Schermerhorn struct zone *zone; 2287771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2288771fb4d8SLee Schermerhorn unsigned long pgoff; 228990572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 229090572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 2291771fb4d8SLee Schermerhorn int polnid = -1; 2292771fb4d8SLee Schermerhorn int ret = -1; 2293771fb4d8SLee Schermerhorn 2294771fb4d8SLee Schermerhorn BUG_ON(!vma); 2295771fb4d8SLee Schermerhorn 2296dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2297771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2298771fb4d8SLee Schermerhorn goto out; 2299771fb4d8SLee Schermerhorn 2300771fb4d8SLee Schermerhorn switch (pol->mode) { 2301771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2302771fb4d8SLee Schermerhorn BUG_ON(addr >= vma->vm_end); 2303771fb4d8SLee Schermerhorn BUG_ON(addr < vma->vm_start); 2304771fb4d8SLee Schermerhorn 2305771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2306771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2307771fb4d8SLee Schermerhorn polnid = offset_il_node(pol, vma, pgoff); 2308771fb4d8SLee Schermerhorn break; 2309771fb4d8SLee Schermerhorn 2310771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2311771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2312771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2313771fb4d8SLee Schermerhorn else 2314771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2315771fb4d8SLee Schermerhorn break; 2316771fb4d8SLee Schermerhorn 2317771fb4d8SLee Schermerhorn case MPOL_BIND: 2318771fb4d8SLee Schermerhorn /* 2319771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2320771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2321771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2322771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2323771fb4d8SLee Schermerhorn */ 2324771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2325771fb4d8SLee Schermerhorn goto out; 2326771fb4d8SLee Schermerhorn (void)first_zones_zonelist( 2327771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2328771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2329771fb4d8SLee Schermerhorn &pol->v.nodes, &zone); 2330771fb4d8SLee Schermerhorn polnid = zone->node; 2331771fb4d8SLee Schermerhorn break; 2332771fb4d8SLee Schermerhorn 2333771fb4d8SLee Schermerhorn default: 2334771fb4d8SLee Schermerhorn BUG(); 2335771fb4d8SLee Schermerhorn } 23365606e387SMel Gorman 23375606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2338e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 233990572890SPeter Zijlstra polnid = thisnid; 23405606e387SMel Gorman 234110f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2342de1c9ce6SRik van Riel goto out; 2343de1c9ce6SRik van Riel } 2344e42c8ff2SMel Gorman 2345771fb4d8SLee Schermerhorn if (curnid != polnid) 2346771fb4d8SLee Schermerhorn ret = polnid; 2347771fb4d8SLee Schermerhorn out: 2348771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2349771fb4d8SLee Schermerhorn 2350771fb4d8SLee Schermerhorn return ret; 2351771fb4d8SLee Schermerhorn } 2352771fb4d8SLee Schermerhorn 23531da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 23541da177e4SLinus Torvalds { 2355140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 23561da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 235763f74ca2SKOSAKI Motohiro sp_free(n); 23581da177e4SLinus Torvalds } 23591da177e4SLinus Torvalds 236042288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 236142288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 236242288fe3SMel Gorman { 236342288fe3SMel Gorman node->start = start; 236442288fe3SMel Gorman node->end = end; 236542288fe3SMel Gorman node->policy = pol; 236642288fe3SMel Gorman } 236742288fe3SMel Gorman 2368dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2369dbcb0f19SAdrian Bunk struct mempolicy *pol) 23701da177e4SLinus Torvalds { 2371869833f2SKOSAKI Motohiro struct sp_node *n; 2372869833f2SKOSAKI Motohiro struct mempolicy *newpol; 23731da177e4SLinus Torvalds 2374869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 23751da177e4SLinus Torvalds if (!n) 23761da177e4SLinus Torvalds return NULL; 2377869833f2SKOSAKI Motohiro 2378869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2379869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2380869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2381869833f2SKOSAKI Motohiro return NULL; 2382869833f2SKOSAKI Motohiro } 2383869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 238442288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2385869833f2SKOSAKI Motohiro 23861da177e4SLinus Torvalds return n; 23871da177e4SLinus Torvalds } 23881da177e4SLinus Torvalds 23891da177e4SLinus Torvalds /* Replace a policy range. */ 23901da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 23911da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 23921da177e4SLinus Torvalds { 2393b22d127aSMel Gorman struct sp_node *n; 239442288fe3SMel Gorman struct sp_node *n_new = NULL; 239542288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2396b22d127aSMel Gorman int ret = 0; 23971da177e4SLinus Torvalds 239842288fe3SMel Gorman restart: 239942288fe3SMel Gorman spin_lock(&sp->lock); 24001da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 24011da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 24021da177e4SLinus Torvalds while (n && n->start < end) { 24031da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 24041da177e4SLinus Torvalds if (n->start >= start) { 24051da177e4SLinus Torvalds if (n->end <= end) 24061da177e4SLinus Torvalds sp_delete(sp, n); 24071da177e4SLinus Torvalds else 24081da177e4SLinus Torvalds n->start = end; 24091da177e4SLinus Torvalds } else { 24101da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 24111da177e4SLinus Torvalds if (n->end > end) { 241242288fe3SMel Gorman if (!n_new) 241342288fe3SMel Gorman goto alloc_new; 241442288fe3SMel Gorman 241542288fe3SMel Gorman *mpol_new = *n->policy; 241642288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 24177880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 24181da177e4SLinus Torvalds n->end = start; 24195ca39575SHillf Danton sp_insert(sp, n_new); 242042288fe3SMel Gorman n_new = NULL; 242142288fe3SMel Gorman mpol_new = NULL; 24221da177e4SLinus Torvalds break; 24231da177e4SLinus Torvalds } else 24241da177e4SLinus Torvalds n->end = start; 24251da177e4SLinus Torvalds } 24261da177e4SLinus Torvalds if (!next) 24271da177e4SLinus Torvalds break; 24281da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 24291da177e4SLinus Torvalds } 24301da177e4SLinus Torvalds if (new) 24311da177e4SLinus Torvalds sp_insert(sp, new); 243242288fe3SMel Gorman spin_unlock(&sp->lock); 243342288fe3SMel Gorman ret = 0; 243442288fe3SMel Gorman 243542288fe3SMel Gorman err_out: 243642288fe3SMel Gorman if (mpol_new) 243742288fe3SMel Gorman mpol_put(mpol_new); 243842288fe3SMel Gorman if (n_new) 243942288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 244042288fe3SMel Gorman 2441b22d127aSMel Gorman return ret; 244242288fe3SMel Gorman 244342288fe3SMel Gorman alloc_new: 244442288fe3SMel Gorman spin_unlock(&sp->lock); 244542288fe3SMel Gorman ret = -ENOMEM; 244642288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 244742288fe3SMel Gorman if (!n_new) 244842288fe3SMel Gorman goto err_out; 244942288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 245042288fe3SMel Gorman if (!mpol_new) 245142288fe3SMel Gorman goto err_out; 245242288fe3SMel Gorman goto restart; 24531da177e4SLinus Torvalds } 24541da177e4SLinus Torvalds 245571fe804bSLee Schermerhorn /** 245671fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 245771fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 245871fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 245971fe804bSLee Schermerhorn * 246071fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 246171fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 246271fe804bSLee Schermerhorn * This must be released on exit. 24634bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 246471fe804bSLee Schermerhorn */ 246571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 24667339ff83SRobin Holt { 246758568d2aSMiao Xie int ret; 246858568d2aSMiao Xie 246971fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 247042288fe3SMel Gorman spin_lock_init(&sp->lock); 24717339ff83SRobin Holt 247271fe804bSLee Schermerhorn if (mpol) { 24737339ff83SRobin Holt struct vm_area_struct pvma; 247471fe804bSLee Schermerhorn struct mempolicy *new; 24754bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 24767339ff83SRobin Holt 24774bfc4495SKAMEZAWA Hiroyuki if (!scratch) 24785c0c1654SLee Schermerhorn goto put_mpol; 247971fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 248071fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 248115d77835SLee Schermerhorn if (IS_ERR(new)) 24820cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 248358568d2aSMiao Xie 248458568d2aSMiao Xie task_lock(current); 24854bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 248658568d2aSMiao Xie task_unlock(current); 248715d77835SLee Schermerhorn if (ret) 24885c0c1654SLee Schermerhorn goto put_new; 248971fe804bSLee Schermerhorn 249071fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 24917339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 249271fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 249371fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 249415d77835SLee Schermerhorn 24955c0c1654SLee Schermerhorn put_new: 249671fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 24970cae3457SDan Carpenter free_scratch: 24984bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 24995c0c1654SLee Schermerhorn put_mpol: 25005c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 25017339ff83SRobin Holt } 25027339ff83SRobin Holt } 25037339ff83SRobin Holt 25041da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 25051da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 25061da177e4SLinus Torvalds { 25071da177e4SLinus Torvalds int err; 25081da177e4SLinus Torvalds struct sp_node *new = NULL; 25091da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 25101da177e4SLinus Torvalds 2511028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 25121da177e4SLinus Torvalds vma->vm_pgoff, 251345c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2514028fec41SDavid Rientjes npol ? npol->flags : -1, 251500ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 25161da177e4SLinus Torvalds 25171da177e4SLinus Torvalds if (npol) { 25181da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 25191da177e4SLinus Torvalds if (!new) 25201da177e4SLinus Torvalds return -ENOMEM; 25211da177e4SLinus Torvalds } 25221da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 25231da177e4SLinus Torvalds if (err && new) 252463f74ca2SKOSAKI Motohiro sp_free(new); 25251da177e4SLinus Torvalds return err; 25261da177e4SLinus Torvalds } 25271da177e4SLinus Torvalds 25281da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 25291da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 25301da177e4SLinus Torvalds { 25311da177e4SLinus Torvalds struct sp_node *n; 25321da177e4SLinus Torvalds struct rb_node *next; 25331da177e4SLinus Torvalds 25341da177e4SLinus Torvalds if (!p->root.rb_node) 25351da177e4SLinus Torvalds return; 253642288fe3SMel Gorman spin_lock(&p->lock); 25371da177e4SLinus Torvalds next = rb_first(&p->root); 25381da177e4SLinus Torvalds while (next) { 25391da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25401da177e4SLinus Torvalds next = rb_next(&n->nd); 254163f74ca2SKOSAKI Motohiro sp_delete(p, n); 25421da177e4SLinus Torvalds } 254342288fe3SMel Gorman spin_unlock(&p->lock); 25441da177e4SLinus Torvalds } 25451da177e4SLinus Torvalds 25461a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2547c297663cSMel Gorman static int __initdata numabalancing_override; 25481a687c2eSMel Gorman 25491a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 25501a687c2eSMel Gorman { 25511a687c2eSMel Gorman bool numabalancing_default = false; 25521a687c2eSMel Gorman 25531a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 25541a687c2eSMel Gorman numabalancing_default = true; 25551a687c2eSMel Gorman 2556c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2557c297663cSMel Gorman if (numabalancing_override) 2558c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2559c297663cSMel Gorman 25601a687c2eSMel Gorman if (nr_node_ids > 1 && !numabalancing_override) { 25614a404beaSAndrew Morton pr_info("%s automatic NUMA balancing. " 2562c297663cSMel Gorman "Configure with numa_balancing= or the " 2563c297663cSMel Gorman "kernel.numa_balancing sysctl", 2564c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 25651a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 25661a687c2eSMel Gorman } 25671a687c2eSMel Gorman } 25681a687c2eSMel Gorman 25691a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 25701a687c2eSMel Gorman { 25711a687c2eSMel Gorman int ret = 0; 25721a687c2eSMel Gorman if (!str) 25731a687c2eSMel Gorman goto out; 25741a687c2eSMel Gorman 25751a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2576c297663cSMel Gorman numabalancing_override = 1; 25771a687c2eSMel Gorman ret = 1; 25781a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2579c297663cSMel Gorman numabalancing_override = -1; 25801a687c2eSMel Gorman ret = 1; 25811a687c2eSMel Gorman } 25821a687c2eSMel Gorman out: 25831a687c2eSMel Gorman if (!ret) 25844a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 25851a687c2eSMel Gorman 25861a687c2eSMel Gorman return ret; 25871a687c2eSMel Gorman } 25881a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 25891a687c2eSMel Gorman #else 25901a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 25911a687c2eSMel Gorman { 25921a687c2eSMel Gorman } 25931a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 25941a687c2eSMel Gorman 25951da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 25961da177e4SLinus Torvalds void __init numa_policy_init(void) 25971da177e4SLinus Torvalds { 2598b71636e2SPaul Mundt nodemask_t interleave_nodes; 2599b71636e2SPaul Mundt unsigned long largest = 0; 2600b71636e2SPaul Mundt int nid, prefer = 0; 2601b71636e2SPaul Mundt 26021da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 26031da177e4SLinus Torvalds sizeof(struct mempolicy), 260420c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 26051da177e4SLinus Torvalds 26061da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 26071da177e4SLinus Torvalds sizeof(struct sp_node), 260820c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 26091da177e4SLinus Torvalds 26105606e387SMel Gorman for_each_node(nid) { 26115606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 26125606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 26135606e387SMel Gorman .mode = MPOL_PREFERRED, 26145606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 26155606e387SMel Gorman .v = { .preferred_node = nid, }, 26165606e387SMel Gorman }; 26175606e387SMel Gorman } 26185606e387SMel Gorman 2619b71636e2SPaul Mundt /* 2620b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2621b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2622b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2623b71636e2SPaul Mundt */ 2624b71636e2SPaul Mundt nodes_clear(interleave_nodes); 262501f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2626b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 26271da177e4SLinus Torvalds 2628b71636e2SPaul Mundt /* Preserve the largest node */ 2629b71636e2SPaul Mundt if (largest < total_pages) { 2630b71636e2SPaul Mundt largest = total_pages; 2631b71636e2SPaul Mundt prefer = nid; 2632b71636e2SPaul Mundt } 2633b71636e2SPaul Mundt 2634b71636e2SPaul Mundt /* Interleave this node? */ 2635b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2636b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2637b71636e2SPaul Mundt } 2638b71636e2SPaul Mundt 2639b71636e2SPaul Mundt /* All too small, use the largest */ 2640b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2641b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2642b71636e2SPaul Mundt 2643028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2644b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 26451a687c2eSMel Gorman 26461a687c2eSMel Gorman check_numabalancing_enable(); 26471da177e4SLinus Torvalds } 26481da177e4SLinus Torvalds 26498bccd85fSChristoph Lameter /* Reset policy of current process to default */ 26501da177e4SLinus Torvalds void numa_default_policy(void) 26511da177e4SLinus Torvalds { 2652028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 26531da177e4SLinus Torvalds } 265468860ec1SPaul Jackson 26554225399aSPaul Jackson /* 2656095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2657095f1fc4SLee Schermerhorn */ 2658095f1fc4SLee Schermerhorn 2659095f1fc4SLee Schermerhorn /* 2660f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 26611a75a6c8SChristoph Lameter */ 2662345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2663345ace9cSLee Schermerhorn { 2664345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2665345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2666345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2667345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2668d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2669345ace9cSLee Schermerhorn }; 26701a75a6c8SChristoph Lameter 2671095f1fc4SLee Schermerhorn 2672095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2673095f1fc4SLee Schermerhorn /** 2674f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2675095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 267671fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2677095f1fc4SLee Schermerhorn * 2678095f1fc4SLee Schermerhorn * Format of input: 2679095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2680095f1fc4SLee Schermerhorn * 268171fe804bSLee Schermerhorn * On success, returns 0, else 1 2682095f1fc4SLee Schermerhorn */ 2683a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2684095f1fc4SLee Schermerhorn { 268571fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2686b4652e84SLee Schermerhorn unsigned short mode; 2687f2a07f40SHugh Dickins unsigned short mode_flags; 268871fe804bSLee Schermerhorn nodemask_t nodes; 2689095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2690095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2691095f1fc4SLee Schermerhorn int err = 1; 2692095f1fc4SLee Schermerhorn 2693095f1fc4SLee Schermerhorn if (nodelist) { 2694095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2695095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 269671fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2697095f1fc4SLee Schermerhorn goto out; 269801f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2699095f1fc4SLee Schermerhorn goto out; 270071fe804bSLee Schermerhorn } else 270171fe804bSLee Schermerhorn nodes_clear(nodes); 270271fe804bSLee Schermerhorn 2703095f1fc4SLee Schermerhorn if (flags) 2704095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2705095f1fc4SLee Schermerhorn 2706479e2802SPeter Zijlstra for (mode = 0; mode < MPOL_MAX; mode++) { 2707345ace9cSLee Schermerhorn if (!strcmp(str, policy_modes[mode])) { 2708095f1fc4SLee Schermerhorn break; 2709095f1fc4SLee Schermerhorn } 2710095f1fc4SLee Schermerhorn } 2711a720094dSMel Gorman if (mode >= MPOL_MAX) 2712095f1fc4SLee Schermerhorn goto out; 2713095f1fc4SLee Schermerhorn 271471fe804bSLee Schermerhorn switch (mode) { 2715095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 271671fe804bSLee Schermerhorn /* 271771fe804bSLee Schermerhorn * Insist on a nodelist of one node only 271871fe804bSLee Schermerhorn */ 2719095f1fc4SLee Schermerhorn if (nodelist) { 2720095f1fc4SLee Schermerhorn char *rest = nodelist; 2721095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2722095f1fc4SLee Schermerhorn rest++; 2723926f2ae0SKOSAKI Motohiro if (*rest) 2724926f2ae0SKOSAKI Motohiro goto out; 2725095f1fc4SLee Schermerhorn } 2726095f1fc4SLee Schermerhorn break; 2727095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2728095f1fc4SLee Schermerhorn /* 2729095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2730095f1fc4SLee Schermerhorn */ 2731095f1fc4SLee Schermerhorn if (!nodelist) 273201f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 27333f226aa1SLee Schermerhorn break; 273471fe804bSLee Schermerhorn case MPOL_LOCAL: 27353f226aa1SLee Schermerhorn /* 273671fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 27373f226aa1SLee Schermerhorn */ 273871fe804bSLee Schermerhorn if (nodelist) 27393f226aa1SLee Schermerhorn goto out; 274071fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 27413f226aa1SLee Schermerhorn break; 2742413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2743413b43deSRavikiran G Thirumalai /* 2744413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2745413b43deSRavikiran G Thirumalai */ 2746413b43deSRavikiran G Thirumalai if (!nodelist) 2747413b43deSRavikiran G Thirumalai err = 0; 2748413b43deSRavikiran G Thirumalai goto out; 2749d69b2e63SKOSAKI Motohiro case MPOL_BIND: 275071fe804bSLee Schermerhorn /* 2751d69b2e63SKOSAKI Motohiro * Insist on a nodelist 275271fe804bSLee Schermerhorn */ 2753d69b2e63SKOSAKI Motohiro if (!nodelist) 2754d69b2e63SKOSAKI Motohiro goto out; 2755095f1fc4SLee Schermerhorn } 2756095f1fc4SLee Schermerhorn 275771fe804bSLee Schermerhorn mode_flags = 0; 2758095f1fc4SLee Schermerhorn if (flags) { 2759095f1fc4SLee Schermerhorn /* 2760095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2761095f1fc4SLee Schermerhorn * mode flags. 2762095f1fc4SLee Schermerhorn */ 2763095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 276471fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2765095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 276671fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2767095f1fc4SLee Schermerhorn else 2768926f2ae0SKOSAKI Motohiro goto out; 2769095f1fc4SLee Schermerhorn } 277071fe804bSLee Schermerhorn 277171fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 277271fe804bSLee Schermerhorn if (IS_ERR(new)) 2773926f2ae0SKOSAKI Motohiro goto out; 2774926f2ae0SKOSAKI Motohiro 2775f2a07f40SHugh Dickins /* 2776f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2777f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2778f2a07f40SHugh Dickins */ 2779f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2780f2a07f40SHugh Dickins new->v.nodes = nodes; 2781f2a07f40SHugh Dickins else if (nodelist) 2782f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2783f2a07f40SHugh Dickins else 2784f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2785f2a07f40SHugh Dickins 2786f2a07f40SHugh Dickins /* 2787f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2788f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2789f2a07f40SHugh Dickins */ 2790e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2791f2a07f40SHugh Dickins 2792926f2ae0SKOSAKI Motohiro err = 0; 279371fe804bSLee Schermerhorn 2794095f1fc4SLee Schermerhorn out: 2795095f1fc4SLee Schermerhorn /* Restore string for error message */ 2796095f1fc4SLee Schermerhorn if (nodelist) 2797095f1fc4SLee Schermerhorn *--nodelist = ':'; 2798095f1fc4SLee Schermerhorn if (flags) 2799095f1fc4SLee Schermerhorn *--flags = '='; 280071fe804bSLee Schermerhorn if (!err) 280171fe804bSLee Schermerhorn *mpol = new; 2802095f1fc4SLee Schermerhorn return err; 2803095f1fc4SLee Schermerhorn } 2804095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2805095f1fc4SLee Schermerhorn 280671fe804bSLee Schermerhorn /** 280771fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 280871fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 280971fe804bSLee Schermerhorn * @maxlen: length of @buffer 281071fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 281171fe804bSLee Schermerhorn * 2812948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2813948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2814948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 28151a75a6c8SChristoph Lameter */ 2816948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 28171a75a6c8SChristoph Lameter { 28181a75a6c8SChristoph Lameter char *p = buffer; 2819948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2820948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2821948927eeSDavid Rientjes unsigned short flags = 0; 28221a75a6c8SChristoph Lameter 28238790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2824bea904d5SLee Schermerhorn mode = pol->mode; 2825948927eeSDavid Rientjes flags = pol->flags; 2826948927eeSDavid Rientjes } 2827bea904d5SLee Schermerhorn 28281a75a6c8SChristoph Lameter switch (mode) { 28291a75a6c8SChristoph Lameter case MPOL_DEFAULT: 28301a75a6c8SChristoph Lameter break; 28311a75a6c8SChristoph Lameter case MPOL_PREFERRED: 2832fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 2833f2a07f40SHugh Dickins mode = MPOL_LOCAL; 283453f2556bSLee Schermerhorn else 2835fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 28361a75a6c8SChristoph Lameter break; 28371a75a6c8SChristoph Lameter case MPOL_BIND: 28381a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 28391a75a6c8SChristoph Lameter nodes = pol->v.nodes; 28401a75a6c8SChristoph Lameter break; 28411a75a6c8SChristoph Lameter default: 2842948927eeSDavid Rientjes WARN_ON_ONCE(1); 2843948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 2844948927eeSDavid Rientjes return; 28451a75a6c8SChristoph Lameter } 28461a75a6c8SChristoph Lameter 2847b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 28481a75a6c8SChristoph Lameter 2849fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2850948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 2851f5b087b5SDavid Rientjes 28522291990aSLee Schermerhorn /* 28532291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 28542291990aSLee Schermerhorn */ 2855f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 28562291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 28572291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 28582291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2859f5b087b5SDavid Rientjes } 2860f5b087b5SDavid Rientjes 28611a75a6c8SChristoph Lameter if (!nodes_empty(nodes)) { 2862948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, ":"); 28631a75a6c8SChristoph Lameter p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); 28641a75a6c8SChristoph Lameter } 28651a75a6c8SChristoph Lameter } 2866