11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 711da177e4SLinus Torvalds #include <linux/mm.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 761da177e4SLinus Torvalds #include <linux/nodemask.h> 771da177e4SLinus Torvalds #include <linux/cpuset.h> 781da177e4SLinus Torvalds #include <linux/slab.h> 791da177e4SLinus Torvalds #include <linux/string.h> 80b95f1b31SPaul Gortmaker #include <linux/export.h> 81b488893aSPavel Emelyanov #include <linux/nsproxy.h> 821da177e4SLinus Torvalds #include <linux/interrupt.h> 831da177e4SLinus Torvalds #include <linux/init.h> 841da177e4SLinus Torvalds #include <linux/compat.h> 85dc9aa5b9SChristoph Lameter #include <linux/swap.h> 861a75a6c8SChristoph Lameter #include <linux/seq_file.h> 871a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 88b20a3503SChristoph Lameter #include <linux/migrate.h> 8962b61f61SHugh Dickins #include <linux/ksm.h> 9095a402c3SChristoph Lameter #include <linux/rmap.h> 9186c3a764SDavid Quigley #include <linux/security.h> 92dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 93095f1fc4SLee Schermerhorn #include <linux/ctype.h> 946d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 95b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 96b1de0d13SMitchel Humpherys #include <linux/printk.h> 97dc9aa5b9SChristoph Lameter 981da177e4SLinus Torvalds #include <asm/tlbflush.h> 991da177e4SLinus Torvalds #include <asm/uaccess.h> 100778d3b0fSMichal Hocko #include <linux/random.h> 1011da177e4SLinus Torvalds 10262695a84SNick Piggin #include "internal.h" 10362695a84SNick Piggin 10438e35860SChristoph Lameter /* Internal flags */ 105dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10638e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 107dc9aa5b9SChristoph Lameter 108fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 109fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1101da177e4SLinus Torvalds 1111da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1121da177e4SLinus Torvalds policied. */ 1136267276fSChristoph Lameter enum zone_type policy_zone = 0; 1141da177e4SLinus Torvalds 115bea904d5SLee Schermerhorn /* 116bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 117bea904d5SLee Schermerhorn */ 118e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1191da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 120bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 121fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1221da177e4SLinus Torvalds }; 1231da177e4SLinus Torvalds 1245606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1255606e387SMel Gorman 12674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1275606e387SMel Gorman { 1285606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 129f15ca78eSOleg Nesterov int node; 1305606e387SMel Gorman 131f15ca78eSOleg Nesterov if (pol) 132f15ca78eSOleg Nesterov return pol; 1335606e387SMel Gorman 134f15ca78eSOleg Nesterov node = numa_node_id(); 1351da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1361da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 137f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 138f15ca78eSOleg Nesterov if (pol->mode) 139f15ca78eSOleg Nesterov return pol; 1401da6f0e1SJianguo Wu } 1415606e387SMel Gorman 142f15ca78eSOleg Nesterov return &default_policy; 1435606e387SMel Gorman } 1445606e387SMel Gorman 14537012946SDavid Rientjes static const struct mempolicy_operations { 14637012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 147708c1bbcSMiao Xie /* 148708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 149708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 150708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 151708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 152708c1bbcSMiao Xie * page. 153708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 154708c1bbcSMiao Xie * rebind directly. 155708c1bbcSMiao Xie * 156708c1bbcSMiao Xie * step: 157708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 158708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 159708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 160708c1bbcSMiao Xie */ 161708c1bbcSMiao Xie void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, 162708c1bbcSMiao Xie enum mpol_rebind_step step); 16337012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 16437012946SDavid Rientjes 165f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 166f5b087b5SDavid Rientjes { 1676d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1684c50bc01SDavid Rientjes } 1694c50bc01SDavid Rientjes 1704c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1714c50bc01SDavid Rientjes const nodemask_t *rel) 1724c50bc01SDavid Rientjes { 1734c50bc01SDavid Rientjes nodemask_t tmp; 1744c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1754c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 176f5b087b5SDavid Rientjes } 177f5b087b5SDavid Rientjes 17837012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 17937012946SDavid Rientjes { 18037012946SDavid Rientjes if (nodes_empty(*nodes)) 18137012946SDavid Rientjes return -EINVAL; 18237012946SDavid Rientjes pol->v.nodes = *nodes; 18337012946SDavid Rientjes return 0; 18437012946SDavid Rientjes } 18537012946SDavid Rientjes 18637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 18737012946SDavid Rientjes { 18837012946SDavid Rientjes if (!nodes) 189fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 19037012946SDavid Rientjes else if (nodes_empty(*nodes)) 19137012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 19237012946SDavid Rientjes else 19337012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 19437012946SDavid Rientjes return 0; 19537012946SDavid Rientjes } 19637012946SDavid Rientjes 19737012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 19837012946SDavid Rientjes { 199859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 20037012946SDavid Rientjes return -EINVAL; 20137012946SDavid Rientjes pol->v.nodes = *nodes; 20237012946SDavid Rientjes return 0; 20337012946SDavid Rientjes } 20437012946SDavid Rientjes 20558568d2aSMiao Xie /* 20658568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 20758568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 20858568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 20958568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 21058568d2aSMiao Xie * 21158568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 21258568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 21358568d2aSMiao Xie */ 2144bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2154bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 21658568d2aSMiao Xie { 21758568d2aSMiao Xie int ret; 21858568d2aSMiao Xie 21958568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 22058568d2aSMiao Xie if (pol == NULL) 22158568d2aSMiao Xie return 0; 22201f13bd6SLai Jiangshan /* Check N_MEMORY */ 2234bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 22401f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 22558568d2aSMiao Xie 22658568d2aSMiao Xie VM_BUG_ON(!nodes); 22758568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 22858568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 22958568d2aSMiao Xie else { 23058568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2314bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 23258568d2aSMiao Xie else 2334bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2344bfc4495SKAMEZAWA Hiroyuki 23558568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 23658568d2aSMiao Xie pol->w.user_nodemask = *nodes; 23758568d2aSMiao Xie else 23858568d2aSMiao Xie pol->w.cpuset_mems_allowed = 23958568d2aSMiao Xie cpuset_current_mems_allowed; 24058568d2aSMiao Xie } 24158568d2aSMiao Xie 2424bfc4495SKAMEZAWA Hiroyuki if (nodes) 2434bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2444bfc4495SKAMEZAWA Hiroyuki else 2454bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 24658568d2aSMiao Xie return ret; 24758568d2aSMiao Xie } 24858568d2aSMiao Xie 24958568d2aSMiao Xie /* 25058568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25158568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 25258568d2aSMiao Xie */ 253028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 254028fec41SDavid Rientjes nodemask_t *nodes) 2551da177e4SLinus Torvalds { 2561da177e4SLinus Torvalds struct mempolicy *policy; 2571da177e4SLinus Torvalds 258028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 25900ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 260140d5a49SPaul Mundt 2613e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2623e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 26337012946SDavid Rientjes return ERR_PTR(-EINVAL); 264d3a71033SLee Schermerhorn return NULL; 26537012946SDavid Rientjes } 2663e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2673e1f0645SDavid Rientjes 2683e1f0645SDavid Rientjes /* 2693e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2703e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2713e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2723e1f0645SDavid Rientjes */ 2733e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2743e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2753e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2763e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2773e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2783e1f0645SDavid Rientjes } 279479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 280479e2802SPeter Zijlstra if (!nodes_empty(*nodes)) 281479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 282479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2833e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2843e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2851da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2861da177e4SLinus Torvalds if (!policy) 2871da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2881da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 28945c4745aSLee Schermerhorn policy->mode = mode; 29037012946SDavid Rientjes policy->flags = flags; 2913e1f0645SDavid Rientjes 29237012946SDavid Rientjes return policy; 29337012946SDavid Rientjes } 29437012946SDavid Rientjes 29552cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 29652cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 29752cd3b07SLee Schermerhorn { 29852cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 29952cd3b07SLee Schermerhorn return; 30052cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 30152cd3b07SLee Schermerhorn } 30252cd3b07SLee Schermerhorn 303708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, 304708c1bbcSMiao Xie enum mpol_rebind_step step) 30537012946SDavid Rientjes { 30637012946SDavid Rientjes } 30737012946SDavid Rientjes 308708c1bbcSMiao Xie /* 309708c1bbcSMiao Xie * step: 310708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 311708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 312708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 313708c1bbcSMiao Xie */ 314708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, 315708c1bbcSMiao Xie enum mpol_rebind_step step) 3161d0d2680SDavid Rientjes { 3171d0d2680SDavid Rientjes nodemask_t tmp; 3181d0d2680SDavid Rientjes 31937012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32037012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32137012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32237012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3231d0d2680SDavid Rientjes else { 324708c1bbcSMiao Xie /* 325708c1bbcSMiao Xie * if step == 1, we use ->w.cpuset_mems_allowed to cache the 326708c1bbcSMiao Xie * result 327708c1bbcSMiao Xie */ 328708c1bbcSMiao Xie if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { 329708c1bbcSMiao Xie nodes_remap(tmp, pol->v.nodes, 330708c1bbcSMiao Xie pol->w.cpuset_mems_allowed, *nodes); 331708c1bbcSMiao Xie pol->w.cpuset_mems_allowed = step ? tmp : *nodes; 332708c1bbcSMiao Xie } else if (step == MPOL_REBIND_STEP2) { 333708c1bbcSMiao Xie tmp = pol->w.cpuset_mems_allowed; 33437012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 335708c1bbcSMiao Xie } else 336708c1bbcSMiao Xie BUG(); 3371d0d2680SDavid Rientjes } 33837012946SDavid Rientjes 339708c1bbcSMiao Xie if (nodes_empty(tmp)) 340708c1bbcSMiao Xie tmp = *nodes; 341708c1bbcSMiao Xie 342708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 343708c1bbcSMiao Xie nodes_or(pol->v.nodes, pol->v.nodes, tmp); 344708c1bbcSMiao Xie else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) 3451d0d2680SDavid Rientjes pol->v.nodes = tmp; 346708c1bbcSMiao Xie else 347708c1bbcSMiao Xie BUG(); 348708c1bbcSMiao Xie 3491d0d2680SDavid Rientjes if (!node_isset(current->il_next, tmp)) { 3501d0d2680SDavid Rientjes current->il_next = next_node(current->il_next, tmp); 3511d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3521d0d2680SDavid Rientjes current->il_next = first_node(tmp); 3531d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3541d0d2680SDavid Rientjes current->il_next = numa_node_id(); 3551d0d2680SDavid Rientjes } 35637012946SDavid Rientjes } 35737012946SDavid Rientjes 35837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 359708c1bbcSMiao Xie const nodemask_t *nodes, 360708c1bbcSMiao Xie enum mpol_rebind_step step) 36137012946SDavid Rientjes { 36237012946SDavid Rientjes nodemask_t tmp; 36337012946SDavid Rientjes 36437012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3651d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3661d0d2680SDavid Rientjes 367fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3681d0d2680SDavid Rientjes pol->v.preferred_node = node; 369fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 370fc36b8d3SLee Schermerhorn } else 371fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 37237012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 37337012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3741d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 375fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3761d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 37737012946SDavid Rientjes pol->w.cpuset_mems_allowed, 37837012946SDavid Rientjes *nodes); 37937012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3801d0d2680SDavid Rientjes } 3811d0d2680SDavid Rientjes } 38237012946SDavid Rientjes 383708c1bbcSMiao Xie /* 384708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 385708c1bbcSMiao Xie * 386708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 387708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 388708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 389708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 390708c1bbcSMiao Xie * page. 391708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 392708c1bbcSMiao Xie * rebind directly. 393708c1bbcSMiao Xie * 394708c1bbcSMiao Xie * step: 395708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 396708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 397708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 398708c1bbcSMiao Xie */ 399708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, 400708c1bbcSMiao Xie enum mpol_rebind_step step) 40137012946SDavid Rientjes { 40237012946SDavid Rientjes if (!pol) 40337012946SDavid Rientjes return; 40489c522c7SWang Sheng-Hui if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE && 40537012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 40637012946SDavid Rientjes return; 407708c1bbcSMiao Xie 408708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) 409708c1bbcSMiao Xie return; 410708c1bbcSMiao Xie 411708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) 412708c1bbcSMiao Xie BUG(); 413708c1bbcSMiao Xie 414708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 415708c1bbcSMiao Xie pol->flags |= MPOL_F_REBINDING; 416708c1bbcSMiao Xie else if (step == MPOL_REBIND_STEP2) 417708c1bbcSMiao Xie pol->flags &= ~MPOL_F_REBINDING; 418708c1bbcSMiao Xie else if (step >= MPOL_REBIND_NSTEP) 419708c1bbcSMiao Xie BUG(); 420708c1bbcSMiao Xie 421708c1bbcSMiao Xie mpol_ops[pol->mode].rebind(pol, newmask, step); 4221d0d2680SDavid Rientjes } 4231d0d2680SDavid Rientjes 4241d0d2680SDavid Rientjes /* 4251d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 4261d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 42758568d2aSMiao Xie * 42858568d2aSMiao Xie * Called with task's alloc_lock held. 4291d0d2680SDavid Rientjes */ 4301d0d2680SDavid Rientjes 431708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 432708c1bbcSMiao Xie enum mpol_rebind_step step) 4331d0d2680SDavid Rientjes { 434708c1bbcSMiao Xie mpol_rebind_policy(tsk->mempolicy, new, step); 4351d0d2680SDavid Rientjes } 4361d0d2680SDavid Rientjes 4371d0d2680SDavid Rientjes /* 4381d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 4391d0d2680SDavid Rientjes * 4401d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 4411d0d2680SDavid Rientjes */ 4421d0d2680SDavid Rientjes 4431d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 4441d0d2680SDavid Rientjes { 4451d0d2680SDavid Rientjes struct vm_area_struct *vma; 4461d0d2680SDavid Rientjes 4471d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 4481d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 449708c1bbcSMiao Xie mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); 4501d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 4511d0d2680SDavid Rientjes } 4521d0d2680SDavid Rientjes 45337012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 45437012946SDavid Rientjes [MPOL_DEFAULT] = { 45537012946SDavid Rientjes .rebind = mpol_rebind_default, 45637012946SDavid Rientjes }, 45737012946SDavid Rientjes [MPOL_INTERLEAVE] = { 45837012946SDavid Rientjes .create = mpol_new_interleave, 45937012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 46037012946SDavid Rientjes }, 46137012946SDavid Rientjes [MPOL_PREFERRED] = { 46237012946SDavid Rientjes .create = mpol_new_preferred, 46337012946SDavid Rientjes .rebind = mpol_rebind_preferred, 46437012946SDavid Rientjes }, 46537012946SDavid Rientjes [MPOL_BIND] = { 46637012946SDavid Rientjes .create = mpol_new_bind, 46737012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 46837012946SDavid Rientjes }, 46937012946SDavid Rientjes }; 47037012946SDavid Rientjes 471fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 472fc301289SChristoph Lameter unsigned long flags); 4731a75a6c8SChristoph Lameter 4746f4576e3SNaoya Horiguchi struct queue_pages { 4756f4576e3SNaoya Horiguchi struct list_head *pagelist; 4766f4576e3SNaoya Horiguchi unsigned long flags; 4776f4576e3SNaoya Horiguchi nodemask_t *nmask; 4786f4576e3SNaoya Horiguchi struct vm_area_struct *prev; 4796f4576e3SNaoya Horiguchi }; 4806f4576e3SNaoya Horiguchi 48198094945SNaoya Horiguchi /* 48298094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 48398094945SNaoya Horiguchi * and move them to the pagelist if they do. 48498094945SNaoya Horiguchi */ 4856f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 4866f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 4871da177e4SLinus Torvalds { 4886f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 4896f4576e3SNaoya Horiguchi struct page *page; 4906f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 4916f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 4926f4576e3SNaoya Horiguchi int nid; 49391612e0dSHugh Dickins pte_t *pte; 494705e87c0SHugh Dickins spinlock_t *ptl; 495941150a3SHugh Dickins 4966f4576e3SNaoya Horiguchi split_huge_page_pmd(vma, addr, pmd); 4976f4576e3SNaoya Horiguchi if (pmd_trans_unstable(pmd)) 4986f4576e3SNaoya Horiguchi return 0; 49991612e0dSHugh Dickins 5006f4576e3SNaoya Horiguchi pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5016f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 50291612e0dSHugh Dickins if (!pte_present(*pte)) 50391612e0dSHugh Dickins continue; 5046aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5056aab341eSLinus Torvalds if (!page) 50691612e0dSHugh Dickins continue; 507053837fcSNick Piggin /* 50862b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 50962b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 510053837fcSNick Piggin */ 511b79bc0a0SHugh Dickins if (PageReserved(page)) 512f4598c8bSChristoph Lameter continue; 5136aab341eSLinus Torvalds nid = page_to_nid(page); 5146f4576e3SNaoya Horiguchi if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) 51538e35860SChristoph Lameter continue; 51638e35860SChristoph Lameter 517b1f72d18SStephen Wilson if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 5186f4576e3SNaoya Horiguchi migrate_page_add(page, qp->pagelist, flags); 5196f4576e3SNaoya Horiguchi } 5206f4576e3SNaoya Horiguchi pte_unmap_unlock(pte - 1, ptl); 5216f4576e3SNaoya Horiguchi cond_resched(); 5226f4576e3SNaoya Horiguchi return 0; 52391612e0dSHugh Dickins } 52491612e0dSHugh Dickins 5256f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5266f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5276f4576e3SNaoya Horiguchi struct mm_walk *walk) 528e2d8cf40SNaoya Horiguchi { 529e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5306f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5316f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 532e2d8cf40SNaoya Horiguchi int nid; 533e2d8cf40SNaoya Horiguchi struct page *page; 534cb900f41SKirill A. Shutemov spinlock_t *ptl; 535d4c54919SNaoya Horiguchi pte_t entry; 536e2d8cf40SNaoya Horiguchi 5376f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5386f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 539d4c54919SNaoya Horiguchi if (!pte_present(entry)) 540d4c54919SNaoya Horiguchi goto unlock; 541d4c54919SNaoya Horiguchi page = pte_page(entry); 542e2d8cf40SNaoya Horiguchi nid = page_to_nid(page); 5436f4576e3SNaoya Horiguchi if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) 544e2d8cf40SNaoya Horiguchi goto unlock; 545e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 546e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 547e2d8cf40SNaoya Horiguchi (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 5486f4576e3SNaoya Horiguchi isolate_huge_page(page, qp->pagelist); 549e2d8cf40SNaoya Horiguchi unlock: 550cb900f41SKirill A. Shutemov spin_unlock(ptl); 551e2d8cf40SNaoya Horiguchi #else 552e2d8cf40SNaoya Horiguchi BUG(); 553e2d8cf40SNaoya Horiguchi #endif 55491612e0dSHugh Dickins return 0; 5551da177e4SLinus Torvalds } 5561da177e4SLinus Torvalds 5575877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 558b24f53a0SLee Schermerhorn /* 5594b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 5604b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 5614b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 5624b10e7d5SMel Gorman * 5634b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 5644b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 5654b10e7d5SMel Gorman * changes to the core. 566b24f53a0SLee Schermerhorn */ 5674b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 5684b10e7d5SMel Gorman unsigned long addr, unsigned long end) 569b24f53a0SLee Schermerhorn { 5704b10e7d5SMel Gorman int nr_updated; 571b24f53a0SLee Schermerhorn 5724d942466SMel Gorman nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); 57303c5a6e1SMel Gorman if (nr_updated) 57403c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 575b24f53a0SLee Schermerhorn 5764b10e7d5SMel Gorman return nr_updated; 577b24f53a0SLee Schermerhorn } 578b24f53a0SLee Schermerhorn #else 579b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 580b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 581b24f53a0SLee Schermerhorn { 582b24f53a0SLee Schermerhorn return 0; 583b24f53a0SLee Schermerhorn } 5845877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 585b24f53a0SLee Schermerhorn 5866f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 5876f4576e3SNaoya Horiguchi struct mm_walk *walk) 5881da177e4SLinus Torvalds { 5896f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5906f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5915b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 5926f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 593dc9aa5b9SChristoph Lameter 59448684a65SNaoya Horiguchi if (vma->vm_flags & VM_PFNMAP) 59548684a65SNaoya Horiguchi return 1; 59648684a65SNaoya Horiguchi 5975b952b3cSAndi Kleen if (endvma > end) 5985b952b3cSAndi Kleen endvma = end; 5995b952b3cSAndi Kleen if (vma->vm_start > start) 6005b952b3cSAndi Kleen start = vma->vm_start; 601b24f53a0SLee Schermerhorn 602b24f53a0SLee Schermerhorn if (!(flags & MPOL_MF_DISCONTIG_OK)) { 603b24f53a0SLee Schermerhorn if (!vma->vm_next && vma->vm_end < end) 604d05f0cdcSHugh Dickins return -EFAULT; 6056f4576e3SNaoya Horiguchi if (qp->prev && qp->prev->vm_end < vma->vm_start) 606d05f0cdcSHugh Dickins return -EFAULT; 607b24f53a0SLee Schermerhorn } 608b24f53a0SLee Schermerhorn 6096f4576e3SNaoya Horiguchi qp->prev = vma; 6106f4576e3SNaoya Horiguchi 611b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6122c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6132c0346a3SMel Gorman if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) 614b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6156f4576e3SNaoya Horiguchi return 1; 616b24f53a0SLee Schermerhorn } 617b24f53a0SLee Schermerhorn 618b24f53a0SLee Schermerhorn if ((flags & MPOL_MF_STRICT) || 619b24f53a0SLee Schermerhorn ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && 6206f4576e3SNaoya Horiguchi vma_migratable(vma))) 6216f4576e3SNaoya Horiguchi /* queue pages from current vma */ 6226f4576e3SNaoya Horiguchi return 0; 6236f4576e3SNaoya Horiguchi return 1; 6246f4576e3SNaoya Horiguchi } 625b24f53a0SLee Schermerhorn 6266f4576e3SNaoya Horiguchi /* 6276f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 6286f4576e3SNaoya Horiguchi * 6296f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 6306f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 6316f4576e3SNaoya Horiguchi * passed via @private.) 6326f4576e3SNaoya Horiguchi */ 6336f4576e3SNaoya Horiguchi static int 6346f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 6356f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 6366f4576e3SNaoya Horiguchi struct list_head *pagelist) 6376f4576e3SNaoya Horiguchi { 6386f4576e3SNaoya Horiguchi struct queue_pages qp = { 6396f4576e3SNaoya Horiguchi .pagelist = pagelist, 6406f4576e3SNaoya Horiguchi .flags = flags, 6416f4576e3SNaoya Horiguchi .nmask = nodes, 6426f4576e3SNaoya Horiguchi .prev = NULL, 6436f4576e3SNaoya Horiguchi }; 6446f4576e3SNaoya Horiguchi struct mm_walk queue_pages_walk = { 6456f4576e3SNaoya Horiguchi .hugetlb_entry = queue_pages_hugetlb, 6466f4576e3SNaoya Horiguchi .pmd_entry = queue_pages_pte_range, 6476f4576e3SNaoya Horiguchi .test_walk = queue_pages_test_walk, 6486f4576e3SNaoya Horiguchi .mm = mm, 6496f4576e3SNaoya Horiguchi .private = &qp, 6506f4576e3SNaoya Horiguchi }; 6516f4576e3SNaoya Horiguchi 6526f4576e3SNaoya Horiguchi return walk_page_range(start, end, &queue_pages_walk); 6531da177e4SLinus Torvalds } 6541da177e4SLinus Torvalds 655869833f2SKOSAKI Motohiro /* 656869833f2SKOSAKI Motohiro * Apply policy to a single VMA 657869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 658869833f2SKOSAKI Motohiro */ 659869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 660869833f2SKOSAKI Motohiro struct mempolicy *pol) 6618d34694cSKOSAKI Motohiro { 662869833f2SKOSAKI Motohiro int err; 663869833f2SKOSAKI Motohiro struct mempolicy *old; 664869833f2SKOSAKI Motohiro struct mempolicy *new; 6658d34694cSKOSAKI Motohiro 6668d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 6678d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 6688d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 6698d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 6708d34694cSKOSAKI Motohiro 671869833f2SKOSAKI Motohiro new = mpol_dup(pol); 672869833f2SKOSAKI Motohiro if (IS_ERR(new)) 673869833f2SKOSAKI Motohiro return PTR_ERR(new); 674869833f2SKOSAKI Motohiro 675869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 6768d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 677869833f2SKOSAKI Motohiro if (err) 678869833f2SKOSAKI Motohiro goto err_out; 6798d34694cSKOSAKI Motohiro } 680869833f2SKOSAKI Motohiro 681869833f2SKOSAKI Motohiro old = vma->vm_policy; 682869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 683869833f2SKOSAKI Motohiro mpol_put(old); 684869833f2SKOSAKI Motohiro 685869833f2SKOSAKI Motohiro return 0; 686869833f2SKOSAKI Motohiro err_out: 687869833f2SKOSAKI Motohiro mpol_put(new); 6888d34694cSKOSAKI Motohiro return err; 6898d34694cSKOSAKI Motohiro } 6908d34694cSKOSAKI Motohiro 6911da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 6929d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 6939d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 6941da177e4SLinus Torvalds { 6951da177e4SLinus Torvalds struct vm_area_struct *next; 6969d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 6979d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 6989d8cebd4SKOSAKI Motohiro int err = 0; 699e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7009d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7019d8cebd4SKOSAKI Motohiro unsigned long vmend; 7021da177e4SLinus Torvalds 703097d5910SLinus Torvalds vma = find_vma(mm, start); 7049d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 7059d8cebd4SKOSAKI Motohiro return -EFAULT; 7069d8cebd4SKOSAKI Motohiro 707097d5910SLinus Torvalds prev = vma->vm_prev; 708e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 709e26a5114SKOSAKI Motohiro prev = vma; 710e26a5114SKOSAKI Motohiro 7119d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 7121da177e4SLinus Torvalds next = vma->vm_next; 7139d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 7149d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 7159d8cebd4SKOSAKI Motohiro 716e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 717e26a5114SKOSAKI Motohiro continue; 718e26a5114SKOSAKI Motohiro 719e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 720e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 7219d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 722e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 72319a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 7249d8cebd4SKOSAKI Motohiro if (prev) { 7259d8cebd4SKOSAKI Motohiro vma = prev; 7269d8cebd4SKOSAKI Motohiro next = vma->vm_next; 7273964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 7289d8cebd4SKOSAKI Motohiro continue; 7293964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 7303964acd0SOleg Nesterov goto replace; 7311da177e4SLinus Torvalds } 7329d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 7339d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 7349d8cebd4SKOSAKI Motohiro if (err) 7359d8cebd4SKOSAKI Motohiro goto out; 7369d8cebd4SKOSAKI Motohiro } 7379d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 7389d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 7399d8cebd4SKOSAKI Motohiro if (err) 7409d8cebd4SKOSAKI Motohiro goto out; 7419d8cebd4SKOSAKI Motohiro } 7423964acd0SOleg Nesterov replace: 743869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 7449d8cebd4SKOSAKI Motohiro if (err) 7459d8cebd4SKOSAKI Motohiro goto out; 7469d8cebd4SKOSAKI Motohiro } 7479d8cebd4SKOSAKI Motohiro 7489d8cebd4SKOSAKI Motohiro out: 7491da177e4SLinus Torvalds return err; 7501da177e4SLinus Torvalds } 7511da177e4SLinus Torvalds 7521da177e4SLinus Torvalds /* Set the process memory policy */ 753028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 754028fec41SDavid Rientjes nodemask_t *nodes) 7551da177e4SLinus Torvalds { 75658568d2aSMiao Xie struct mempolicy *new, *old; 7574bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 75858568d2aSMiao Xie int ret; 7591da177e4SLinus Torvalds 7604bfc4495SKAMEZAWA Hiroyuki if (!scratch) 7614bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 762f4e53d91SLee Schermerhorn 7634bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 7644bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 7654bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 7664bfc4495SKAMEZAWA Hiroyuki goto out; 7674bfc4495SKAMEZAWA Hiroyuki } 7682c7c3a7dSOleg Nesterov 76958568d2aSMiao Xie task_lock(current); 7704bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 77158568d2aSMiao Xie if (ret) { 77258568d2aSMiao Xie task_unlock(current); 77358568d2aSMiao Xie mpol_put(new); 7744bfc4495SKAMEZAWA Hiroyuki goto out; 77558568d2aSMiao Xie } 77658568d2aSMiao Xie old = current->mempolicy; 7771da177e4SLinus Torvalds current->mempolicy = new; 77845c4745aSLee Schermerhorn if (new && new->mode == MPOL_INTERLEAVE && 779f5b087b5SDavid Rientjes nodes_weight(new->v.nodes)) 780dfcd3c0dSAndi Kleen current->il_next = first_node(new->v.nodes); 78158568d2aSMiao Xie task_unlock(current); 78258568d2aSMiao Xie mpol_put(old); 7834bfc4495SKAMEZAWA Hiroyuki ret = 0; 7844bfc4495SKAMEZAWA Hiroyuki out: 7854bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 7864bfc4495SKAMEZAWA Hiroyuki return ret; 7871da177e4SLinus Torvalds } 7881da177e4SLinus Torvalds 789bea904d5SLee Schermerhorn /* 790bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 79158568d2aSMiao Xie * 79258568d2aSMiao Xie * Called with task's alloc_lock held 793bea904d5SLee Schermerhorn */ 794bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 7951da177e4SLinus Torvalds { 796dfcd3c0dSAndi Kleen nodes_clear(*nodes); 797bea904d5SLee Schermerhorn if (p == &default_policy) 798bea904d5SLee Schermerhorn return; 799bea904d5SLee Schermerhorn 80045c4745aSLee Schermerhorn switch (p->mode) { 80119770b32SMel Gorman case MPOL_BIND: 80219770b32SMel Gorman /* Fall through */ 8031da177e4SLinus Torvalds case MPOL_INTERLEAVE: 804dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 8051da177e4SLinus Torvalds break; 8061da177e4SLinus Torvalds case MPOL_PREFERRED: 807fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 808dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 80953f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 8101da177e4SLinus Torvalds break; 8111da177e4SLinus Torvalds default: 8121da177e4SLinus Torvalds BUG(); 8131da177e4SLinus Torvalds } 8141da177e4SLinus Torvalds } 8151da177e4SLinus Torvalds 8161da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr) 8171da177e4SLinus Torvalds { 8181da177e4SLinus Torvalds struct page *p; 8191da177e4SLinus Torvalds int err; 8201da177e4SLinus Torvalds 8211da177e4SLinus Torvalds err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 8221da177e4SLinus Torvalds if (err >= 0) { 8231da177e4SLinus Torvalds err = page_to_nid(p); 8241da177e4SLinus Torvalds put_page(p); 8251da177e4SLinus Torvalds } 8261da177e4SLinus Torvalds return err; 8271da177e4SLinus Torvalds } 8281da177e4SLinus Torvalds 8291da177e4SLinus Torvalds /* Retrieve NUMA policy */ 830dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8311da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8321da177e4SLinus Torvalds { 8338bccd85fSChristoph Lameter int err; 8341da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8351da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8361da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 8371da177e4SLinus Torvalds 838754af6f5SLee Schermerhorn if (flags & 839754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8401da177e4SLinus Torvalds return -EINVAL; 841754af6f5SLee Schermerhorn 842754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 843754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 844754af6f5SLee Schermerhorn return -EINVAL; 845754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 84658568d2aSMiao Xie task_lock(current); 847754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 84858568d2aSMiao Xie task_unlock(current); 849754af6f5SLee Schermerhorn return 0; 850754af6f5SLee Schermerhorn } 851754af6f5SLee Schermerhorn 8521da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 853bea904d5SLee Schermerhorn /* 854bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 855bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 856bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 857bea904d5SLee Schermerhorn */ 8581da177e4SLinus Torvalds down_read(&mm->mmap_sem); 8591da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 8601da177e4SLinus Torvalds if (!vma) { 8611da177e4SLinus Torvalds up_read(&mm->mmap_sem); 8621da177e4SLinus Torvalds return -EFAULT; 8631da177e4SLinus Torvalds } 8641da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 8651da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 8661da177e4SLinus Torvalds else 8671da177e4SLinus Torvalds pol = vma->vm_policy; 8681da177e4SLinus Torvalds } else if (addr) 8691da177e4SLinus Torvalds return -EINVAL; 8701da177e4SLinus Torvalds 8711da177e4SLinus Torvalds if (!pol) 872bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 8731da177e4SLinus Torvalds 8741da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 8751da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 8761da177e4SLinus Torvalds err = lookup_node(mm, addr); 8771da177e4SLinus Torvalds if (err < 0) 8781da177e4SLinus Torvalds goto out; 8798bccd85fSChristoph Lameter *policy = err; 8801da177e4SLinus Torvalds } else if (pol == current->mempolicy && 88145c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 8828bccd85fSChristoph Lameter *policy = current->il_next; 8831da177e4SLinus Torvalds } else { 8841da177e4SLinus Torvalds err = -EINVAL; 8851da177e4SLinus Torvalds goto out; 8861da177e4SLinus Torvalds } 887bea904d5SLee Schermerhorn } else { 888bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 889bea904d5SLee Schermerhorn pol->mode; 890d79df630SDavid Rientjes /* 891d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 892d79df630SDavid Rientjes * the policy to userspace. 893d79df630SDavid Rientjes */ 894d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 895bea904d5SLee Schermerhorn } 8961da177e4SLinus Torvalds 8971da177e4SLinus Torvalds if (vma) { 8981da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 8991da177e4SLinus Torvalds vma = NULL; 9001da177e4SLinus Torvalds } 9011da177e4SLinus Torvalds 9021da177e4SLinus Torvalds err = 0; 90358568d2aSMiao Xie if (nmask) { 904c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 905c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 906c6b6ef8bSLee Schermerhorn } else { 90758568d2aSMiao Xie task_lock(current); 908bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 90958568d2aSMiao Xie task_unlock(current); 91058568d2aSMiao Xie } 911c6b6ef8bSLee Schermerhorn } 9121da177e4SLinus Torvalds 9131da177e4SLinus Torvalds out: 91452cd3b07SLee Schermerhorn mpol_cond_put(pol); 9151da177e4SLinus Torvalds if (vma) 9161da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 9171da177e4SLinus Torvalds return err; 9181da177e4SLinus Torvalds } 9191da177e4SLinus Torvalds 920b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 9218bccd85fSChristoph Lameter /* 9226ce3c4c0SChristoph Lameter * page migration 9236ce3c4c0SChristoph Lameter */ 924fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 925fc301289SChristoph Lameter unsigned long flags) 9266ce3c4c0SChristoph Lameter { 9276ce3c4c0SChristoph Lameter /* 928fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9296ce3c4c0SChristoph Lameter */ 93062695a84SNick Piggin if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 93162695a84SNick Piggin if (!isolate_lru_page(page)) { 93262695a84SNick Piggin list_add_tail(&page->lru, pagelist); 9336d9c285aSKOSAKI Motohiro inc_zone_page_state(page, NR_ISOLATED_ANON + 9346d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 93562695a84SNick Piggin } 93662695a84SNick Piggin } 9376ce3c4c0SChristoph Lameter } 9386ce3c4c0SChristoph Lameter 939742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 94095a402c3SChristoph Lameter { 941e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 942e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 943e2d8cf40SNaoya Horiguchi node); 944e2d8cf40SNaoya Horiguchi else 94596db800fSVlastimil Babka return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | 946b360edb4SDavid Rientjes __GFP_THISNODE, 0); 94795a402c3SChristoph Lameter } 94895a402c3SChristoph Lameter 9496ce3c4c0SChristoph Lameter /* 9507e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 9517e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 9527e2ab150SChristoph Lameter */ 953dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 954dbcb0f19SAdrian Bunk int flags) 9557e2ab150SChristoph Lameter { 9567e2ab150SChristoph Lameter nodemask_t nmask; 9577e2ab150SChristoph Lameter LIST_HEAD(pagelist); 9587e2ab150SChristoph Lameter int err = 0; 9597e2ab150SChristoph Lameter 9607e2ab150SChristoph Lameter nodes_clear(nmask); 9617e2ab150SChristoph Lameter node_set(source, nmask); 9627e2ab150SChristoph Lameter 96308270807SMinchan Kim /* 96408270807SMinchan Kim * This does not "check" the range but isolates all pages that 96508270807SMinchan Kim * need migration. Between passing in the full user address 96608270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 96708270807SMinchan Kim */ 96808270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 96998094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 9707e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 9717e2ab150SChristoph Lameter 972cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 97368711a74SDavid Rientjes err = migrate_pages(&pagelist, new_node_page, NULL, dest, 9749c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 975cf608ac1SMinchan Kim if (err) 976e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 977cf608ac1SMinchan Kim } 97895a402c3SChristoph Lameter 9797e2ab150SChristoph Lameter return err; 9807e2ab150SChristoph Lameter } 9817e2ab150SChristoph Lameter 9827e2ab150SChristoph Lameter /* 9837e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 9847e2ab150SChristoph Lameter * layout as much as possible. 98539743889SChristoph Lameter * 98639743889SChristoph Lameter * Returns the number of page that could not be moved. 98739743889SChristoph Lameter */ 9880ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 9890ce72d4fSAndrew Morton const nodemask_t *to, int flags) 99039743889SChristoph Lameter { 9917e2ab150SChristoph Lameter int busy = 0; 9920aedadf9SChristoph Lameter int err; 9937e2ab150SChristoph Lameter nodemask_t tmp; 99439743889SChristoph Lameter 9950aedadf9SChristoph Lameter err = migrate_prep(); 9960aedadf9SChristoph Lameter if (err) 9970aedadf9SChristoph Lameter return err; 9980aedadf9SChristoph Lameter 99939743889SChristoph Lameter down_read(&mm->mmap_sem); 1000d4984711SChristoph Lameter 10017e2ab150SChristoph Lameter /* 10027e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 10037e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 10047e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 10057e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 10067e2ab150SChristoph Lameter * 10077e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 10087e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 10097e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 10107e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 10117e2ab150SChristoph Lameter * 10127e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 10137e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 10147e2ab150SChristoph Lameter * (nothing left to migrate). 10157e2ab150SChristoph Lameter * 10167e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 10177e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 10187e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 10197e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 10207e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 10217e2ab150SChristoph Lameter * 10227e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 10237e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 10247e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 10257e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1026ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 10277e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 10287e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 10297e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 10307e2ab150SChristoph Lameter */ 10317e2ab150SChristoph Lameter 10320ce72d4fSAndrew Morton tmp = *from; 10337e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 10347e2ab150SChristoph Lameter int s,d; 1035b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 10367e2ab150SChristoph Lameter int dest = 0; 10377e2ab150SChristoph Lameter 10387e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 10394a5b18ccSLarry Woodman 10404a5b18ccSLarry Woodman /* 10414a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 10424a5b18ccSLarry Woodman * node relationship of the pages established between 10434a5b18ccSLarry Woodman * threads and memory areas. 10444a5b18ccSLarry Woodman * 10454a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 10464a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 10474a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 10484a5b18ccSLarry Woodman * copying memory from a node that is in the destination 10494a5b18ccSLarry Woodman * mask. 10504a5b18ccSLarry Woodman * 10514a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 10524a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 10534a5b18ccSLarry Woodman */ 10544a5b18ccSLarry Woodman 10550ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 10560ce72d4fSAndrew Morton (node_isset(s, *to))) 10574a5b18ccSLarry Woodman continue; 10584a5b18ccSLarry Woodman 10590ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 10607e2ab150SChristoph Lameter if (s == d) 10617e2ab150SChristoph Lameter continue; 10627e2ab150SChristoph Lameter 10637e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 10647e2ab150SChristoph Lameter dest = d; 10657e2ab150SChristoph Lameter 10667e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 10677e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 10687e2ab150SChristoph Lameter break; 10697e2ab150SChristoph Lameter } 1070b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 10717e2ab150SChristoph Lameter break; 10727e2ab150SChristoph Lameter 10737e2ab150SChristoph Lameter node_clear(source, tmp); 10747e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 10757e2ab150SChristoph Lameter if (err > 0) 10767e2ab150SChristoph Lameter busy += err; 10777e2ab150SChristoph Lameter if (err < 0) 10787e2ab150SChristoph Lameter break; 107939743889SChristoph Lameter } 108039743889SChristoph Lameter up_read(&mm->mmap_sem); 10817e2ab150SChristoph Lameter if (err < 0) 10827e2ab150SChristoph Lameter return err; 10837e2ab150SChristoph Lameter return busy; 1084b20a3503SChristoph Lameter 108539743889SChristoph Lameter } 108639743889SChristoph Lameter 10873ad33b24SLee Schermerhorn /* 10883ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1089d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 10903ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 10913ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 10923ad33b24SLee Schermerhorn * is in virtual address order. 10933ad33b24SLee Schermerhorn */ 1094d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 109595a402c3SChristoph Lameter { 1096d05f0cdcSHugh Dickins struct vm_area_struct *vma; 10973ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 109895a402c3SChristoph Lameter 1099d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 11003ad33b24SLee Schermerhorn while (vma) { 11013ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 11023ad33b24SLee Schermerhorn if (address != -EFAULT) 11033ad33b24SLee Schermerhorn break; 11043ad33b24SLee Schermerhorn vma = vma->vm_next; 11053ad33b24SLee Schermerhorn } 11063ad33b24SLee Schermerhorn 110711c731e8SWanpeng Li if (PageHuge(page)) { 1108cc81717eSMichal Hocko BUG_ON(!vma); 110974060e4dSNaoya Horiguchi return alloc_huge_page_noerr(vma, address, 1); 111011c731e8SWanpeng Li } 111111c731e8SWanpeng Li /* 111211c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 111311c731e8SWanpeng Li */ 11143ad33b24SLee Schermerhorn return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 111595a402c3SChristoph Lameter } 1116b20a3503SChristoph Lameter #else 1117b20a3503SChristoph Lameter 1118b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1119b20a3503SChristoph Lameter unsigned long flags) 1120b20a3503SChristoph Lameter { 1121b20a3503SChristoph Lameter } 1122b20a3503SChristoph Lameter 11230ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11240ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1125b20a3503SChristoph Lameter { 1126b20a3503SChristoph Lameter return -ENOSYS; 1127b20a3503SChristoph Lameter } 112895a402c3SChristoph Lameter 1129d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 113095a402c3SChristoph Lameter { 113195a402c3SChristoph Lameter return NULL; 113295a402c3SChristoph Lameter } 1133b20a3503SChristoph Lameter #endif 1134b20a3503SChristoph Lameter 1135dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1136028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1137028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 11386ce3c4c0SChristoph Lameter { 11396ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 11406ce3c4c0SChristoph Lameter struct mempolicy *new; 11416ce3c4c0SChristoph Lameter unsigned long end; 11426ce3c4c0SChristoph Lameter int err; 11436ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 11446ce3c4c0SChristoph Lameter 1145b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 11466ce3c4c0SChristoph Lameter return -EINVAL; 114774c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 11486ce3c4c0SChristoph Lameter return -EPERM; 11496ce3c4c0SChristoph Lameter 11506ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 11516ce3c4c0SChristoph Lameter return -EINVAL; 11526ce3c4c0SChristoph Lameter 11536ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 11546ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 11556ce3c4c0SChristoph Lameter 11566ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 11576ce3c4c0SChristoph Lameter end = start + len; 11586ce3c4c0SChristoph Lameter 11596ce3c4c0SChristoph Lameter if (end < start) 11606ce3c4c0SChristoph Lameter return -EINVAL; 11616ce3c4c0SChristoph Lameter if (end == start) 11626ce3c4c0SChristoph Lameter return 0; 11636ce3c4c0SChristoph Lameter 1164028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 11656ce3c4c0SChristoph Lameter if (IS_ERR(new)) 11666ce3c4c0SChristoph Lameter return PTR_ERR(new); 11676ce3c4c0SChristoph Lameter 1168b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1169b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1170b24f53a0SLee Schermerhorn 11716ce3c4c0SChristoph Lameter /* 11726ce3c4c0SChristoph Lameter * If we are using the default policy then operation 11736ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 11746ce3c4c0SChristoph Lameter */ 11756ce3c4c0SChristoph Lameter if (!new) 11766ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 11776ce3c4c0SChristoph Lameter 1178028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1179028fec41SDavid Rientjes start, start + len, mode, mode_flags, 118000ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 11816ce3c4c0SChristoph Lameter 11820aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 11830aedadf9SChristoph Lameter 11840aedadf9SChristoph Lameter err = migrate_prep(); 11850aedadf9SChristoph Lameter if (err) 1186b05ca738SKOSAKI Motohiro goto mpol_out; 11870aedadf9SChristoph Lameter } 11884bfc4495SKAMEZAWA Hiroyuki { 11894bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 11904bfc4495SKAMEZAWA Hiroyuki if (scratch) { 11916ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 119258568d2aSMiao Xie task_lock(current); 11934bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 119458568d2aSMiao Xie task_unlock(current); 11954bfc4495SKAMEZAWA Hiroyuki if (err) 119658568d2aSMiao Xie up_write(&mm->mmap_sem); 11974bfc4495SKAMEZAWA Hiroyuki } else 11984bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 11994bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 12004bfc4495SKAMEZAWA Hiroyuki } 1201b05ca738SKOSAKI Motohiro if (err) 1202b05ca738SKOSAKI Motohiro goto mpol_out; 1203b05ca738SKOSAKI Motohiro 1204d05f0cdcSHugh Dickins err = queue_pages_range(mm, start, end, nmask, 12056ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1206d05f0cdcSHugh Dickins if (!err) 12079d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 12087e2ab150SChristoph Lameter 1209b24f53a0SLee Schermerhorn if (!err) { 1210b24f53a0SLee Schermerhorn int nr_failed = 0; 1211b24f53a0SLee Schermerhorn 1212cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1213b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1214d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1215d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1216cf608ac1SMinchan Kim if (nr_failed) 121774060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1218cf608ac1SMinchan Kim } 12196ce3c4c0SChristoph Lameter 1220b24f53a0SLee Schermerhorn if (nr_failed && (flags & MPOL_MF_STRICT)) 12216ce3c4c0SChristoph Lameter err = -EIO; 1222ab8a3e14SKOSAKI Motohiro } else 1223b0e5fd73SJoonsoo Kim putback_movable_pages(&pagelist); 1224b20a3503SChristoph Lameter 12256ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1226b05ca738SKOSAKI Motohiro mpol_out: 1227f0be3d32SLee Schermerhorn mpol_put(new); 12286ce3c4c0SChristoph Lameter return err; 12296ce3c4c0SChristoph Lameter } 12306ce3c4c0SChristoph Lameter 123139743889SChristoph Lameter /* 12328bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 12338bccd85fSChristoph Lameter */ 12348bccd85fSChristoph Lameter 12358bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 123639743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 12378bccd85fSChristoph Lameter unsigned long maxnode) 12388bccd85fSChristoph Lameter { 12398bccd85fSChristoph Lameter unsigned long k; 12408bccd85fSChristoph Lameter unsigned long nlongs; 12418bccd85fSChristoph Lameter unsigned long endmask; 12428bccd85fSChristoph Lameter 12438bccd85fSChristoph Lameter --maxnode; 12448bccd85fSChristoph Lameter nodes_clear(*nodes); 12458bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 12468bccd85fSChristoph Lameter return 0; 1247a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1248636f13c1SChris Wright return -EINVAL; 12498bccd85fSChristoph Lameter 12508bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 12518bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 12528bccd85fSChristoph Lameter endmask = ~0UL; 12538bccd85fSChristoph Lameter else 12548bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 12558bccd85fSChristoph Lameter 12568bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 12578bccd85fSChristoph Lameter if the non supported part is all zero. */ 12588bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 12598bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 12608bccd85fSChristoph Lameter return -EINVAL; 12618bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 12628bccd85fSChristoph Lameter unsigned long t; 12638bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 12648bccd85fSChristoph Lameter return -EFAULT; 12658bccd85fSChristoph Lameter if (k == nlongs - 1) { 12668bccd85fSChristoph Lameter if (t & endmask) 12678bccd85fSChristoph Lameter return -EINVAL; 12688bccd85fSChristoph Lameter } else if (t) 12698bccd85fSChristoph Lameter return -EINVAL; 12708bccd85fSChristoph Lameter } 12718bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 12728bccd85fSChristoph Lameter endmask = ~0UL; 12738bccd85fSChristoph Lameter } 12748bccd85fSChristoph Lameter 12758bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 12768bccd85fSChristoph Lameter return -EFAULT; 12778bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 12788bccd85fSChristoph Lameter return 0; 12798bccd85fSChristoph Lameter } 12808bccd85fSChristoph Lameter 12818bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 12828bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 12838bccd85fSChristoph Lameter nodemask_t *nodes) 12848bccd85fSChristoph Lameter { 12858bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 12868bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 12878bccd85fSChristoph Lameter 12888bccd85fSChristoph Lameter if (copy > nbytes) { 12898bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 12908bccd85fSChristoph Lameter return -EINVAL; 12918bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 12928bccd85fSChristoph Lameter return -EFAULT; 12938bccd85fSChristoph Lameter copy = nbytes; 12948bccd85fSChristoph Lameter } 12958bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 12968bccd85fSChristoph Lameter } 12978bccd85fSChristoph Lameter 1298938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1299f7f28ca9SRasmus Villemoes unsigned long, mode, const unsigned long __user *, nmask, 1300938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 13018bccd85fSChristoph Lameter { 13028bccd85fSChristoph Lameter nodemask_t nodes; 13038bccd85fSChristoph Lameter int err; 1304028fec41SDavid Rientjes unsigned short mode_flags; 13058bccd85fSChristoph Lameter 1306028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1307028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1308a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1309a3b51e01SDavid Rientjes return -EINVAL; 13104c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 13114c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 13124c50bc01SDavid Rientjes return -EINVAL; 13138bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13148bccd85fSChristoph Lameter if (err) 13158bccd85fSChristoph Lameter return err; 1316028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 13178bccd85fSChristoph Lameter } 13188bccd85fSChristoph Lameter 13198bccd85fSChristoph Lameter /* Set the process memory policy */ 132023c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1321938bb9f5SHeiko Carstens unsigned long, maxnode) 13228bccd85fSChristoph Lameter { 13238bccd85fSChristoph Lameter int err; 13248bccd85fSChristoph Lameter nodemask_t nodes; 1325028fec41SDavid Rientjes unsigned short flags; 13268bccd85fSChristoph Lameter 1327028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1328028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1329028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 13308bccd85fSChristoph Lameter return -EINVAL; 13314c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 13324c50bc01SDavid Rientjes return -EINVAL; 13338bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13348bccd85fSChristoph Lameter if (err) 13358bccd85fSChristoph Lameter return err; 1336028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 13378bccd85fSChristoph Lameter } 13388bccd85fSChristoph Lameter 1339938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1340938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1341938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 134239743889SChristoph Lameter { 1343c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 1344596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 134539743889SChristoph Lameter struct task_struct *task; 134639743889SChristoph Lameter nodemask_t task_nodes; 134739743889SChristoph Lameter int err; 1348596d7cfaSKOSAKI Motohiro nodemask_t *old; 1349596d7cfaSKOSAKI Motohiro nodemask_t *new; 1350596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 135139743889SChristoph Lameter 1352596d7cfaSKOSAKI Motohiro if (!scratch) 1353596d7cfaSKOSAKI Motohiro return -ENOMEM; 135439743889SChristoph Lameter 1355596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1356596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1357596d7cfaSKOSAKI Motohiro 1358596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 135939743889SChristoph Lameter if (err) 1360596d7cfaSKOSAKI Motohiro goto out; 1361596d7cfaSKOSAKI Motohiro 1362596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1363596d7cfaSKOSAKI Motohiro if (err) 1364596d7cfaSKOSAKI Motohiro goto out; 136539743889SChristoph Lameter 136639743889SChristoph Lameter /* Find the mm_struct */ 136755cfaa3cSZeng Zhaoming rcu_read_lock(); 1368228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 136939743889SChristoph Lameter if (!task) { 137055cfaa3cSZeng Zhaoming rcu_read_unlock(); 1371596d7cfaSKOSAKI Motohiro err = -ESRCH; 1372596d7cfaSKOSAKI Motohiro goto out; 137339743889SChristoph Lameter } 13743268c63eSChristoph Lameter get_task_struct(task); 137539743889SChristoph Lameter 1376596d7cfaSKOSAKI Motohiro err = -EINVAL; 137739743889SChristoph Lameter 137839743889SChristoph Lameter /* 137939743889SChristoph Lameter * Check if this process has the right to modify the specified 138039743889SChristoph Lameter * process. The right exists if the process has administrative 13817f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 138239743889SChristoph Lameter * userid as the target process. 138339743889SChristoph Lameter */ 1384c69e8d9cSDavid Howells tcred = __task_cred(task); 1385b38a86ebSEric W. Biederman if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1386b38a86ebSEric W. Biederman !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 138774c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1388c69e8d9cSDavid Howells rcu_read_unlock(); 138939743889SChristoph Lameter err = -EPERM; 13903268c63eSChristoph Lameter goto out_put; 139139743889SChristoph Lameter } 1392c69e8d9cSDavid Howells rcu_read_unlock(); 139339743889SChristoph Lameter 139439743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 139539743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1396596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 139739743889SChristoph Lameter err = -EPERM; 13983268c63eSChristoph Lameter goto out_put; 139939743889SChristoph Lameter } 140039743889SChristoph Lameter 140101f13bd6SLai Jiangshan if (!nodes_subset(*new, node_states[N_MEMORY])) { 14023b42d28bSChristoph Lameter err = -EINVAL; 14033268c63eSChristoph Lameter goto out_put; 14043b42d28bSChristoph Lameter } 14053b42d28bSChristoph Lameter 140686c3a764SDavid Quigley err = security_task_movememory(task); 140786c3a764SDavid Quigley if (err) 14083268c63eSChristoph Lameter goto out_put; 140986c3a764SDavid Quigley 14103268c63eSChristoph Lameter mm = get_task_mm(task); 14113268c63eSChristoph Lameter put_task_struct(task); 1412f2a9ef88SSasha Levin 1413f2a9ef88SSasha Levin if (!mm) { 1414f2a9ef88SSasha Levin err = -EINVAL; 1415f2a9ef88SSasha Levin goto out; 1416f2a9ef88SSasha Levin } 1417f2a9ef88SSasha Levin 1418596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 141974c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 14203268c63eSChristoph Lameter 142139743889SChristoph Lameter mmput(mm); 14223268c63eSChristoph Lameter out: 1423596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1424596d7cfaSKOSAKI Motohiro 142539743889SChristoph Lameter return err; 14263268c63eSChristoph Lameter 14273268c63eSChristoph Lameter out_put: 14283268c63eSChristoph Lameter put_task_struct(task); 14293268c63eSChristoph Lameter goto out; 14303268c63eSChristoph Lameter 143139743889SChristoph Lameter } 143239743889SChristoph Lameter 143339743889SChristoph Lameter 14348bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1435938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1436938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1437938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 14388bccd85fSChristoph Lameter { 1439dbcb0f19SAdrian Bunk int err; 1440dbcb0f19SAdrian Bunk int uninitialized_var(pval); 14418bccd85fSChristoph Lameter nodemask_t nodes; 14428bccd85fSChristoph Lameter 14438bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 14448bccd85fSChristoph Lameter return -EINVAL; 14458bccd85fSChristoph Lameter 14468bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 14478bccd85fSChristoph Lameter 14488bccd85fSChristoph Lameter if (err) 14498bccd85fSChristoph Lameter return err; 14508bccd85fSChristoph Lameter 14518bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 14528bccd85fSChristoph Lameter return -EFAULT; 14538bccd85fSChristoph Lameter 14548bccd85fSChristoph Lameter if (nmask) 14558bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 14568bccd85fSChristoph Lameter 14578bccd85fSChristoph Lameter return err; 14588bccd85fSChristoph Lameter } 14598bccd85fSChristoph Lameter 14601da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 14611da177e4SLinus Torvalds 1462c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1463c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1464c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1465c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 14661da177e4SLinus Torvalds { 14671da177e4SLinus Torvalds long err; 14681da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14691da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 14701da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 14711da177e4SLinus Torvalds 14721da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 14731da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 14741da177e4SLinus Torvalds 14751da177e4SLinus Torvalds if (nmask) 14761da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 14771da177e4SLinus Torvalds 14781da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 14791da177e4SLinus Torvalds 14801da177e4SLinus Torvalds if (!err && nmask) { 14812bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 14822bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 14832bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 14841da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 14851da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 14861da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 14871da177e4SLinus Torvalds } 14881da177e4SLinus Torvalds 14891da177e4SLinus Torvalds return err; 14901da177e4SLinus Torvalds } 14911da177e4SLinus Torvalds 1492c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1493c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 14941da177e4SLinus Torvalds { 14951da177e4SLinus Torvalds long err = 0; 14961da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14971da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 14981da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 14991da177e4SLinus Torvalds 15001da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15011da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15021da177e4SLinus Torvalds 15031da177e4SLinus Torvalds if (nmask) { 15041da177e4SLinus Torvalds err = compat_get_bitmap(bm, nmask, nr_bits); 15051da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 15061da177e4SLinus Torvalds err |= copy_to_user(nm, bm, alloc_size); 15071da177e4SLinus Torvalds } 15081da177e4SLinus Torvalds 15091da177e4SLinus Torvalds if (err) 15101da177e4SLinus Torvalds return -EFAULT; 15111da177e4SLinus Torvalds 15121da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 15131da177e4SLinus Torvalds } 15141da177e4SLinus Torvalds 1515c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1516c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1517c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 15181da177e4SLinus Torvalds { 15191da177e4SLinus Torvalds long err = 0; 15201da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15211da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1522dfcd3c0dSAndi Kleen nodemask_t bm; 15231da177e4SLinus Torvalds 15241da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15251da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15261da177e4SLinus Torvalds 15271da177e4SLinus Torvalds if (nmask) { 1528dfcd3c0dSAndi Kleen err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 15291da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 1530dfcd3c0dSAndi Kleen err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 15311da177e4SLinus Torvalds } 15321da177e4SLinus Torvalds 15331da177e4SLinus Torvalds if (err) 15341da177e4SLinus Torvalds return -EFAULT; 15351da177e4SLinus Torvalds 15361da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 15371da177e4SLinus Torvalds } 15381da177e4SLinus Torvalds 15391da177e4SLinus Torvalds #endif 15401da177e4SLinus Torvalds 154174d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 154274d2c3a0SOleg Nesterov unsigned long addr) 15431da177e4SLinus Torvalds { 15448d90274bSOleg Nesterov struct mempolicy *pol = NULL; 15451da177e4SLinus Torvalds 15461da177e4SLinus Torvalds if (vma) { 1547480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 15488d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 154900442ad0SMel Gorman } else if (vma->vm_policy) { 15501da177e4SLinus Torvalds pol = vma->vm_policy; 155100442ad0SMel Gorman 155200442ad0SMel Gorman /* 155300442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 155400442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 155500442ad0SMel Gorman * count on these policies which will be dropped by 155600442ad0SMel Gorman * mpol_cond_put() later 155700442ad0SMel Gorman */ 155800442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 155900442ad0SMel Gorman mpol_get(pol); 156000442ad0SMel Gorman } 15611da177e4SLinus Torvalds } 1562f15ca78eSOleg Nesterov 156374d2c3a0SOleg Nesterov return pol; 156474d2c3a0SOleg Nesterov } 156574d2c3a0SOleg Nesterov 156674d2c3a0SOleg Nesterov /* 1567dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 156874d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 156974d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 157074d2c3a0SOleg Nesterov * 157174d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1572dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 157374d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 157474d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 157574d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 157674d2c3a0SOleg Nesterov * extra reference for shared policies. 157774d2c3a0SOleg Nesterov */ 1578dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1579dd6eecb9SOleg Nesterov unsigned long addr) 158074d2c3a0SOleg Nesterov { 158174d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 158274d2c3a0SOleg Nesterov 15838d90274bSOleg Nesterov if (!pol) 1584dd6eecb9SOleg Nesterov pol = get_task_policy(current); 15858d90274bSOleg Nesterov 15861da177e4SLinus Torvalds return pol; 15871da177e4SLinus Torvalds } 15881da177e4SLinus Torvalds 15896b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1590fc314724SMel Gorman { 15916b6482bbSOleg Nesterov struct mempolicy *pol; 1592f15ca78eSOleg Nesterov 1593fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1594fc314724SMel Gorman bool ret = false; 1595fc314724SMel Gorman 1596fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1597fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1598fc314724SMel Gorman ret = true; 1599fc314724SMel Gorman mpol_cond_put(pol); 1600fc314724SMel Gorman 1601fc314724SMel Gorman return ret; 16028d90274bSOleg Nesterov } 16038d90274bSOleg Nesterov 1604fc314724SMel Gorman pol = vma->vm_policy; 16058d90274bSOleg Nesterov if (!pol) 16066b6482bbSOleg Nesterov pol = get_task_policy(current); 1607fc314724SMel Gorman 1608fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1609fc314724SMel Gorman } 1610fc314724SMel Gorman 1611d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1612d3eb1570SLai Jiangshan { 1613d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1614d3eb1570SLai Jiangshan 1615d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1616d3eb1570SLai Jiangshan 1617d3eb1570SLai Jiangshan /* 1618d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1619d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1620d3eb1570SLai Jiangshan * 1621d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1622d3eb1570SLai Jiangshan * so if the following test faile, it implies 1623d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1624d3eb1570SLai Jiangshan */ 1625d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1626d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1627d3eb1570SLai Jiangshan 1628d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1629d3eb1570SLai Jiangshan } 1630d3eb1570SLai Jiangshan 163152cd3b07SLee Schermerhorn /* 163252cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 163352cd3b07SLee Schermerhorn * page allocation 163452cd3b07SLee Schermerhorn */ 163552cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 163619770b32SMel Gorman { 163719770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 163845c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1639d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 164019770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 164119770b32SMel Gorman return &policy->v.nodes; 164219770b32SMel Gorman 164319770b32SMel Gorman return NULL; 164419770b32SMel Gorman } 164519770b32SMel Gorman 164652cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */ 16472f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, 16482f5f9486SAndi Kleen int nd) 16491da177e4SLinus Torvalds { 165045c4745aSLee Schermerhorn switch (policy->mode) { 16511da177e4SLinus Torvalds case MPOL_PREFERRED: 1652fc36b8d3SLee Schermerhorn if (!(policy->flags & MPOL_F_LOCAL)) 16531da177e4SLinus Torvalds nd = policy->v.preferred_node; 16541da177e4SLinus Torvalds break; 16551da177e4SLinus Torvalds case MPOL_BIND: 165619770b32SMel Gorman /* 165752cd3b07SLee Schermerhorn * Normally, MPOL_BIND allocations are node-local within the 165852cd3b07SLee Schermerhorn * allowed nodemask. However, if __GFP_THISNODE is set and the 16596eb27e1fSBob Liu * current node isn't part of the mask, we use the zonelist for 166052cd3b07SLee Schermerhorn * the first node in the mask instead. 166119770b32SMel Gorman */ 166219770b32SMel Gorman if (unlikely(gfp & __GFP_THISNODE) && 166319770b32SMel Gorman unlikely(!node_isset(nd, policy->v.nodes))) 166419770b32SMel Gorman nd = first_node(policy->v.nodes); 166519770b32SMel Gorman break; 16661da177e4SLinus Torvalds default: 16671da177e4SLinus Torvalds BUG(); 16681da177e4SLinus Torvalds } 16690e88460dSMel Gorman return node_zonelist(nd, gfp); 16701da177e4SLinus Torvalds } 16711da177e4SLinus Torvalds 16721da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 16731da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 16741da177e4SLinus Torvalds { 16751da177e4SLinus Torvalds unsigned nid, next; 16761da177e4SLinus Torvalds struct task_struct *me = current; 16771da177e4SLinus Torvalds 16781da177e4SLinus Torvalds nid = me->il_next; 1679dfcd3c0dSAndi Kleen next = next_node(nid, policy->v.nodes); 16801da177e4SLinus Torvalds if (next >= MAX_NUMNODES) 1681dfcd3c0dSAndi Kleen next = first_node(policy->v.nodes); 1682f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 16831da177e4SLinus Torvalds me->il_next = next; 16841da177e4SLinus Torvalds return nid; 16851da177e4SLinus Torvalds } 16861da177e4SLinus Torvalds 1687dc85da15SChristoph Lameter /* 1688dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1689dc85da15SChristoph Lameter * next slab entry. 1690dc85da15SChristoph Lameter */ 16912a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1692dc85da15SChristoph Lameter { 1693e7b691b0SAndi Kleen struct mempolicy *policy; 16942a389610SDavid Rientjes int node = numa_mem_id(); 1695e7b691b0SAndi Kleen 1696e7b691b0SAndi Kleen if (in_interrupt()) 16972a389610SDavid Rientjes return node; 1698e7b691b0SAndi Kleen 1699e7b691b0SAndi Kleen policy = current->mempolicy; 1700fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 17012a389610SDavid Rientjes return node; 1702765c4507SChristoph Lameter 1703bea904d5SLee Schermerhorn switch (policy->mode) { 1704bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1705fc36b8d3SLee Schermerhorn /* 1706fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1707fc36b8d3SLee Schermerhorn */ 1708bea904d5SLee Schermerhorn return policy->v.preferred_node; 1709bea904d5SLee Schermerhorn 1710dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1711dc85da15SChristoph Lameter return interleave_nodes(policy); 1712dc85da15SChristoph Lameter 1713dd1a239fSMel Gorman case MPOL_BIND: { 1714dc85da15SChristoph Lameter /* 1715dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1716dc85da15SChristoph Lameter * first node. 1717dc85da15SChristoph Lameter */ 171819770b32SMel Gorman struct zonelist *zonelist; 171919770b32SMel Gorman struct zone *zone; 172019770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 17212a389610SDavid Rientjes zonelist = &NODE_DATA(node)->node_zonelists[0]; 172219770b32SMel Gorman (void)first_zones_zonelist(zonelist, highest_zoneidx, 172319770b32SMel Gorman &policy->v.nodes, 172419770b32SMel Gorman &zone); 17252a389610SDavid Rientjes return zone ? zone->node : node; 1726dd1a239fSMel Gorman } 1727dc85da15SChristoph Lameter 1728dc85da15SChristoph Lameter default: 1729bea904d5SLee Schermerhorn BUG(); 1730dc85da15SChristoph Lameter } 1731dc85da15SChristoph Lameter } 1732dc85da15SChristoph Lameter 17331da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */ 17341da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 17351da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long off) 17361da177e4SLinus Torvalds { 1737dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1738f5b087b5SDavid Rientjes unsigned target; 17391da177e4SLinus Torvalds int c; 1740b76ac7e7SJianguo Wu int nid = NUMA_NO_NODE; 17411da177e4SLinus Torvalds 1742f5b087b5SDavid Rientjes if (!nnodes) 1743f5b087b5SDavid Rientjes return numa_node_id(); 1744f5b087b5SDavid Rientjes target = (unsigned int)off % nnodes; 17451da177e4SLinus Torvalds c = 0; 17461da177e4SLinus Torvalds do { 1747dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 17481da177e4SLinus Torvalds c++; 17491da177e4SLinus Torvalds } while (c <= target); 17501da177e4SLinus Torvalds return nid; 17511da177e4SLinus Torvalds } 17521da177e4SLinus Torvalds 17535da7ca86SChristoph Lameter /* Determine a node number for interleave */ 17545da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 17555da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 17565da7ca86SChristoph Lameter { 17575da7ca86SChristoph Lameter if (vma) { 17585da7ca86SChristoph Lameter unsigned long off; 17595da7ca86SChristoph Lameter 17603b98b087SNishanth Aravamudan /* 17613b98b087SNishanth Aravamudan * for small pages, there is no difference between 17623b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 17633b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 17643b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 17653b98b087SNishanth Aravamudan * a useful offset. 17663b98b087SNishanth Aravamudan */ 17673b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 17683b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 17695da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 17705da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 17715da7ca86SChristoph Lameter } else 17725da7ca86SChristoph Lameter return interleave_nodes(pol); 17735da7ca86SChristoph Lameter } 17745da7ca86SChristoph Lameter 1775778d3b0fSMichal Hocko /* 1776778d3b0fSMichal Hocko * Return the bit number of a random bit set in the nodemask. 1777b76ac7e7SJianguo Wu * (returns NUMA_NO_NODE if nodemask is empty) 1778778d3b0fSMichal Hocko */ 1779778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp) 1780778d3b0fSMichal Hocko { 1781b76ac7e7SJianguo Wu int w, bit = NUMA_NO_NODE; 1782778d3b0fSMichal Hocko 1783778d3b0fSMichal Hocko w = nodes_weight(*maskp); 1784778d3b0fSMichal Hocko if (w) 1785778d3b0fSMichal Hocko bit = bitmap_ord_to_pos(maskp->bits, 1786778d3b0fSMichal Hocko get_random_int() % w, MAX_NUMNODES); 1787778d3b0fSMichal Hocko return bit; 1788778d3b0fSMichal Hocko } 1789778d3b0fSMichal Hocko 179000ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1791480eccf9SLee Schermerhorn /* 1792480eccf9SLee Schermerhorn * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1793b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1794b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1795b46e14acSFabian Frederick * @gfp_flags: for requested zone 1796b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1797b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1798480eccf9SLee Schermerhorn * 179952cd3b07SLee Schermerhorn * Returns a zonelist suitable for a huge page allocation and a pointer 180052cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 180152cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 180252cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1803c0ff7453SMiao Xie * 1804d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 1805480eccf9SLee Schermerhorn */ 1806396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 180719770b32SMel Gorman gfp_t gfp_flags, struct mempolicy **mpol, 180819770b32SMel Gorman nodemask_t **nodemask) 18095da7ca86SChristoph Lameter { 1810480eccf9SLee Schermerhorn struct zonelist *zl; 18115da7ca86SChristoph Lameter 1812dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 181319770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 18145da7ca86SChristoph Lameter 181552cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 181652cd3b07SLee Schermerhorn zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1817a5516438SAndi Kleen huge_page_shift(hstate_vma(vma))), gfp_flags); 181852cd3b07SLee Schermerhorn } else { 18192f5f9486SAndi Kleen zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); 182052cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 182152cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1822480eccf9SLee Schermerhorn } 1823480eccf9SLee Schermerhorn return zl; 18245da7ca86SChristoph Lameter } 182506808b08SLee Schermerhorn 182606808b08SLee Schermerhorn /* 182706808b08SLee Schermerhorn * init_nodemask_of_mempolicy 182806808b08SLee Schermerhorn * 182906808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 183006808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 183106808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 183206808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 183306808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 183406808b08SLee Schermerhorn * of non-default mempolicy. 183506808b08SLee Schermerhorn * 183606808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 183706808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 183806808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 183906808b08SLee Schermerhorn * 184006808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 184106808b08SLee Schermerhorn */ 184206808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 184306808b08SLee Schermerhorn { 184406808b08SLee Schermerhorn struct mempolicy *mempolicy; 184506808b08SLee Schermerhorn int nid; 184606808b08SLee Schermerhorn 184706808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 184806808b08SLee Schermerhorn return false; 184906808b08SLee Schermerhorn 1850c0ff7453SMiao Xie task_lock(current); 185106808b08SLee Schermerhorn mempolicy = current->mempolicy; 185206808b08SLee Schermerhorn switch (mempolicy->mode) { 185306808b08SLee Schermerhorn case MPOL_PREFERRED: 185406808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 185506808b08SLee Schermerhorn nid = numa_node_id(); 185606808b08SLee Schermerhorn else 185706808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 185806808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 185906808b08SLee Schermerhorn break; 186006808b08SLee Schermerhorn 186106808b08SLee Schermerhorn case MPOL_BIND: 186206808b08SLee Schermerhorn /* Fall through */ 186306808b08SLee Schermerhorn case MPOL_INTERLEAVE: 186406808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 186506808b08SLee Schermerhorn break; 186606808b08SLee Schermerhorn 186706808b08SLee Schermerhorn default: 186806808b08SLee Schermerhorn BUG(); 186906808b08SLee Schermerhorn } 1870c0ff7453SMiao Xie task_unlock(current); 187106808b08SLee Schermerhorn 187206808b08SLee Schermerhorn return true; 187306808b08SLee Schermerhorn } 187400ac59adSChen, Kenneth W #endif 18755da7ca86SChristoph Lameter 18766f48d0ebSDavid Rientjes /* 18776f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 18786f48d0ebSDavid Rientjes * 18796f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 18806f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 18816f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 18826f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 18836f48d0ebSDavid Rientjes * 18846f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 18856f48d0ebSDavid Rientjes */ 18866f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 18876f48d0ebSDavid Rientjes const nodemask_t *mask) 18886f48d0ebSDavid Rientjes { 18896f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 18906f48d0ebSDavid Rientjes bool ret = true; 18916f48d0ebSDavid Rientjes 18926f48d0ebSDavid Rientjes if (!mask) 18936f48d0ebSDavid Rientjes return ret; 18946f48d0ebSDavid Rientjes task_lock(tsk); 18956f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 18966f48d0ebSDavid Rientjes if (!mempolicy) 18976f48d0ebSDavid Rientjes goto out; 18986f48d0ebSDavid Rientjes 18996f48d0ebSDavid Rientjes switch (mempolicy->mode) { 19006f48d0ebSDavid Rientjes case MPOL_PREFERRED: 19016f48d0ebSDavid Rientjes /* 19026f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 19036f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 19046f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 19056f48d0ebSDavid Rientjes * nodes in mask. 19066f48d0ebSDavid Rientjes */ 19076f48d0ebSDavid Rientjes break; 19086f48d0ebSDavid Rientjes case MPOL_BIND: 19096f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 19106f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 19116f48d0ebSDavid Rientjes break; 19126f48d0ebSDavid Rientjes default: 19136f48d0ebSDavid Rientjes BUG(); 19146f48d0ebSDavid Rientjes } 19156f48d0ebSDavid Rientjes out: 19166f48d0ebSDavid Rientjes task_unlock(tsk); 19176f48d0ebSDavid Rientjes return ret; 19186f48d0ebSDavid Rientjes } 19196f48d0ebSDavid Rientjes 19201da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 19211da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1922662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1923662f3a0bSAndi Kleen unsigned nid) 19241da177e4SLinus Torvalds { 19251da177e4SLinus Torvalds struct zonelist *zl; 19261da177e4SLinus Torvalds struct page *page; 19271da177e4SLinus Torvalds 19280e88460dSMel Gorman zl = node_zonelist(nid, gfp); 19291da177e4SLinus Torvalds page = __alloc_pages(gfp, order, zl); 1930dd1a239fSMel Gorman if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) 1931ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 19321da177e4SLinus Torvalds return page; 19331da177e4SLinus Torvalds } 19341da177e4SLinus Torvalds 19351da177e4SLinus Torvalds /** 19360bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 19371da177e4SLinus Torvalds * 19381da177e4SLinus Torvalds * @gfp: 19391da177e4SLinus Torvalds * %GFP_USER user allocation. 19401da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 19411da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 19421da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 19431da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 19441da177e4SLinus Torvalds * 19450bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 19461da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 19471da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 1948be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 1949be97a41bSVlastimil Babka * @hugepage: for hugepages try only the preferred node if possible 19501da177e4SLinus Torvalds * 19511da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 19521da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 19531da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 19541da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 1955be97a41bSVlastimil Babka * all allocations for pages that will be mapped into user space. Returns 1956be97a41bSVlastimil Babka * NULL when no page can be allocated. 19571da177e4SLinus Torvalds */ 19581da177e4SLinus Torvalds struct page * 19590bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1960be97a41bSVlastimil Babka unsigned long addr, int node, bool hugepage) 19611da177e4SLinus Torvalds { 1962cc9a6c87SMel Gorman struct mempolicy *pol; 1963c0ff7453SMiao Xie struct page *page; 1964cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 1965be97a41bSVlastimil Babka struct zonelist *zl; 1966be97a41bSVlastimil Babka nodemask_t *nmask; 19671da177e4SLinus Torvalds 1968cc9a6c87SMel Gorman retry_cpuset: 1969dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 1970d26914d1SMel Gorman cpuset_mems_cookie = read_mems_allowed_begin(); 1971cc9a6c87SMel Gorman 1972be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 19731da177e4SLinus Torvalds unsigned nid; 19745da7ca86SChristoph Lameter 19758eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 197652cd3b07SLee Schermerhorn mpol_cond_put(pol); 19770bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 1978be97a41bSVlastimil Babka goto out; 19791da177e4SLinus Torvalds } 19801da177e4SLinus Torvalds 19810867a57cSVlastimil Babka if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 19820867a57cSVlastimil Babka int hpage_node = node; 19830867a57cSVlastimil Babka 19840867a57cSVlastimil Babka /* 19850867a57cSVlastimil Babka * For hugepage allocation and non-interleave policy which 19860867a57cSVlastimil Babka * allows the current node (or other explicitly preferred 19870867a57cSVlastimil Babka * node) we only try to allocate from the current/preferred 19880867a57cSVlastimil Babka * node and don't fall back to other nodes, as the cost of 19890867a57cSVlastimil Babka * remote accesses would likely offset THP benefits. 19900867a57cSVlastimil Babka * 19910867a57cSVlastimil Babka * If the policy is interleave, or does not allow the current 19920867a57cSVlastimil Babka * node in its nodemask, we allocate the standard way. 19930867a57cSVlastimil Babka */ 19940867a57cSVlastimil Babka if (pol->mode == MPOL_PREFERRED && 19950867a57cSVlastimil Babka !(pol->flags & MPOL_F_LOCAL)) 19960867a57cSVlastimil Babka hpage_node = pol->v.preferred_node; 19970867a57cSVlastimil Babka 19980867a57cSVlastimil Babka nmask = policy_nodemask(gfp, pol); 19990867a57cSVlastimil Babka if (!nmask || node_isset(hpage_node, *nmask)) { 20000867a57cSVlastimil Babka mpol_cond_put(pol); 200196db800fSVlastimil Babka page = __alloc_pages_node(hpage_node, 20020867a57cSVlastimil Babka gfp | __GFP_THISNODE, order); 20030867a57cSVlastimil Babka goto out; 20040867a57cSVlastimil Babka } 20050867a57cSVlastimil Babka } 20060867a57cSVlastimil Babka 2007077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 2008be97a41bSVlastimil Babka zl = policy_zonelist(gfp, pol, node); 2009077fcf11SAneesh Kumar K.V mpol_cond_put(pol); 2010be97a41bSVlastimil Babka page = __alloc_pages_nodemask(gfp, order, zl, nmask); 2011be97a41bSVlastimil Babka out: 2012be97a41bSVlastimil Babka if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2013077fcf11SAneesh Kumar K.V goto retry_cpuset; 2014077fcf11SAneesh Kumar K.V return page; 2015077fcf11SAneesh Kumar K.V } 2016077fcf11SAneesh Kumar K.V 20171da177e4SLinus Torvalds /** 20181da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 20191da177e4SLinus Torvalds * 20201da177e4SLinus Torvalds * @gfp: 20211da177e4SLinus Torvalds * %GFP_USER user allocation, 20221da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 20231da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 20241da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 20251da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 20261da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 20271da177e4SLinus Torvalds * 20281da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 20291da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 20301da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 20311da177e4SLinus Torvalds * 2032cf2a473cSPaul Jackson * Don't call cpuset_update_task_memory_state() unless 20331da177e4SLinus Torvalds * 1) it's ok to take cpuset_sem (can WAIT), and 20341da177e4SLinus Torvalds * 2) allocating for current task (not interrupt). 20351da177e4SLinus Torvalds */ 2036dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 20371da177e4SLinus Torvalds { 20388d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2039c0ff7453SMiao Xie struct page *page; 2040cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 20411da177e4SLinus Torvalds 20428d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 20438d90274bSOleg Nesterov pol = get_task_policy(current); 204452cd3b07SLee Schermerhorn 2045cc9a6c87SMel Gorman retry_cpuset: 2046d26914d1SMel Gorman cpuset_mems_cookie = read_mems_allowed_begin(); 2047cc9a6c87SMel Gorman 204852cd3b07SLee Schermerhorn /* 204952cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 205052cd3b07SLee Schermerhorn * nor system default_policy 205152cd3b07SLee Schermerhorn */ 205245c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2053c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2054c0ff7453SMiao Xie else 2055c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 20565c4b4be3SAndi Kleen policy_zonelist(gfp, pol, numa_node_id()), 20575c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2058cc9a6c87SMel Gorman 2059d26914d1SMel Gorman if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2060cc9a6c87SMel Gorman goto retry_cpuset; 2061cc9a6c87SMel Gorman 2062c0ff7453SMiao Xie return page; 20631da177e4SLinus Torvalds } 20641da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 20651da177e4SLinus Torvalds 2066ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2067ef0855d3SOleg Nesterov { 2068ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2069ef0855d3SOleg Nesterov 2070ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2071ef0855d3SOleg Nesterov return PTR_ERR(pol); 2072ef0855d3SOleg Nesterov dst->vm_policy = pol; 2073ef0855d3SOleg Nesterov return 0; 2074ef0855d3SOleg Nesterov } 2075ef0855d3SOleg Nesterov 20764225399aSPaul Jackson /* 2077846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 20784225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 20794225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 20804225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 20814225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2082708c1bbcSMiao Xie * 2083708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2084708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 20854225399aSPaul Jackson */ 20864225399aSPaul Jackson 2087846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2088846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 20891da177e4SLinus Torvalds { 20901da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 20911da177e4SLinus Torvalds 20921da177e4SLinus Torvalds if (!new) 20931da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2094708c1bbcSMiao Xie 2095708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2096708c1bbcSMiao Xie if (old == current->mempolicy) { 2097708c1bbcSMiao Xie task_lock(current); 2098708c1bbcSMiao Xie *new = *old; 2099708c1bbcSMiao Xie task_unlock(current); 2100708c1bbcSMiao Xie } else 2101708c1bbcSMiao Xie *new = *old; 2102708c1bbcSMiao Xie 21034225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 21044225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2105708c1bbcSMiao Xie if (new->flags & MPOL_F_REBINDING) 2106708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); 2107708c1bbcSMiao Xie else 2108708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 21094225399aSPaul Jackson } 21101da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 21111da177e4SLinus Torvalds return new; 21121da177e4SLinus Torvalds } 21131da177e4SLinus Torvalds 21141da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2115fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 21161da177e4SLinus Torvalds { 21171da177e4SLinus Torvalds if (!a || !b) 2118fcfb4dccSKOSAKI Motohiro return false; 211945c4745aSLee Schermerhorn if (a->mode != b->mode) 2120fcfb4dccSKOSAKI Motohiro return false; 212119800502SBob Liu if (a->flags != b->flags) 2122fcfb4dccSKOSAKI Motohiro return false; 212319800502SBob Liu if (mpol_store_user_nodemask(a)) 212419800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2125fcfb4dccSKOSAKI Motohiro return false; 212619800502SBob Liu 212745c4745aSLee Schermerhorn switch (a->mode) { 212819770b32SMel Gorman case MPOL_BIND: 212919770b32SMel Gorman /* Fall through */ 21301da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2131fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 21321da177e4SLinus Torvalds case MPOL_PREFERRED: 213375719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 21341da177e4SLinus Torvalds default: 21351da177e4SLinus Torvalds BUG(); 2136fcfb4dccSKOSAKI Motohiro return false; 21371da177e4SLinus Torvalds } 21381da177e4SLinus Torvalds } 21391da177e4SLinus Torvalds 21401da177e4SLinus Torvalds /* 21411da177e4SLinus Torvalds * Shared memory backing store policy support. 21421da177e4SLinus Torvalds * 21431da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 21441da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 2145*4a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 21461da177e4SLinus Torvalds * for any accesses to the tree. 21471da177e4SLinus Torvalds */ 21481da177e4SLinus Torvalds 2149*4a8c7bb5SNathan Zimmer /* 2150*4a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 2151*4a8c7bb5SNathan Zimmer * reading or for writing 2152*4a8c7bb5SNathan Zimmer */ 21531da177e4SLinus Torvalds static struct sp_node * 21541da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 21551da177e4SLinus Torvalds { 21561da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 21571da177e4SLinus Torvalds 21581da177e4SLinus Torvalds while (n) { 21591da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 21601da177e4SLinus Torvalds 21611da177e4SLinus Torvalds if (start >= p->end) 21621da177e4SLinus Torvalds n = n->rb_right; 21631da177e4SLinus Torvalds else if (end <= p->start) 21641da177e4SLinus Torvalds n = n->rb_left; 21651da177e4SLinus Torvalds else 21661da177e4SLinus Torvalds break; 21671da177e4SLinus Torvalds } 21681da177e4SLinus Torvalds if (!n) 21691da177e4SLinus Torvalds return NULL; 21701da177e4SLinus Torvalds for (;;) { 21711da177e4SLinus Torvalds struct sp_node *w = NULL; 21721da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 21731da177e4SLinus Torvalds if (!prev) 21741da177e4SLinus Torvalds break; 21751da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 21761da177e4SLinus Torvalds if (w->end <= start) 21771da177e4SLinus Torvalds break; 21781da177e4SLinus Torvalds n = prev; 21791da177e4SLinus Torvalds } 21801da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 21811da177e4SLinus Torvalds } 21821da177e4SLinus Torvalds 2183*4a8c7bb5SNathan Zimmer /* 2184*4a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 2185*4a8c7bb5SNathan Zimmer * writing. 2186*4a8c7bb5SNathan Zimmer */ 21871da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 21881da177e4SLinus Torvalds { 21891da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 21901da177e4SLinus Torvalds struct rb_node *parent = NULL; 21911da177e4SLinus Torvalds struct sp_node *nd; 21921da177e4SLinus Torvalds 21931da177e4SLinus Torvalds while (*p) { 21941da177e4SLinus Torvalds parent = *p; 21951da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 21961da177e4SLinus Torvalds if (new->start < nd->start) 21971da177e4SLinus Torvalds p = &(*p)->rb_left; 21981da177e4SLinus Torvalds else if (new->end > nd->end) 21991da177e4SLinus Torvalds p = &(*p)->rb_right; 22001da177e4SLinus Torvalds else 22011da177e4SLinus Torvalds BUG(); 22021da177e4SLinus Torvalds } 22031da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 22041da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2205140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 220645c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 22071da177e4SLinus Torvalds } 22081da177e4SLinus Torvalds 22091da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 22101da177e4SLinus Torvalds struct mempolicy * 22111da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 22121da177e4SLinus Torvalds { 22131da177e4SLinus Torvalds struct mempolicy *pol = NULL; 22141da177e4SLinus Torvalds struct sp_node *sn; 22151da177e4SLinus Torvalds 22161da177e4SLinus Torvalds if (!sp->root.rb_node) 22171da177e4SLinus Torvalds return NULL; 2218*4a8c7bb5SNathan Zimmer read_lock(&sp->lock); 22191da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 22201da177e4SLinus Torvalds if (sn) { 22211da177e4SLinus Torvalds mpol_get(sn->policy); 22221da177e4SLinus Torvalds pol = sn->policy; 22231da177e4SLinus Torvalds } 2224*4a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 22251da177e4SLinus Torvalds return pol; 22261da177e4SLinus Torvalds } 22271da177e4SLinus Torvalds 222863f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 222963f74ca2SKOSAKI Motohiro { 223063f74ca2SKOSAKI Motohiro mpol_put(n->policy); 223163f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 223263f74ca2SKOSAKI Motohiro } 223363f74ca2SKOSAKI Motohiro 2234771fb4d8SLee Schermerhorn /** 2235771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2236771fb4d8SLee Schermerhorn * 2237b46e14acSFabian Frederick * @page: page to be checked 2238b46e14acSFabian Frederick * @vma: vm area where page mapped 2239b46e14acSFabian Frederick * @addr: virtual address where page mapped 2240771fb4d8SLee Schermerhorn * 2241771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2242771fb4d8SLee Schermerhorn * node id. 2243771fb4d8SLee Schermerhorn * 2244771fb4d8SLee Schermerhorn * Returns: 2245771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2246771fb4d8SLee Schermerhorn * node - node id where the page should be 2247771fb4d8SLee Schermerhorn * 2248771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2249771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2250771fb4d8SLee Schermerhorn */ 2251771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2252771fb4d8SLee Schermerhorn { 2253771fb4d8SLee Schermerhorn struct mempolicy *pol; 2254771fb4d8SLee Schermerhorn struct zone *zone; 2255771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2256771fb4d8SLee Schermerhorn unsigned long pgoff; 225790572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 225890572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 2259771fb4d8SLee Schermerhorn int polnid = -1; 2260771fb4d8SLee Schermerhorn int ret = -1; 2261771fb4d8SLee Schermerhorn 2262771fb4d8SLee Schermerhorn BUG_ON(!vma); 2263771fb4d8SLee Schermerhorn 2264dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2265771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2266771fb4d8SLee Schermerhorn goto out; 2267771fb4d8SLee Schermerhorn 2268771fb4d8SLee Schermerhorn switch (pol->mode) { 2269771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2270771fb4d8SLee Schermerhorn BUG_ON(addr >= vma->vm_end); 2271771fb4d8SLee Schermerhorn BUG_ON(addr < vma->vm_start); 2272771fb4d8SLee Schermerhorn 2273771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2274771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2275771fb4d8SLee Schermerhorn polnid = offset_il_node(pol, vma, pgoff); 2276771fb4d8SLee Schermerhorn break; 2277771fb4d8SLee Schermerhorn 2278771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2279771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2280771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2281771fb4d8SLee Schermerhorn else 2282771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2283771fb4d8SLee Schermerhorn break; 2284771fb4d8SLee Schermerhorn 2285771fb4d8SLee Schermerhorn case MPOL_BIND: 2286771fb4d8SLee Schermerhorn /* 2287771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2288771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2289771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2290771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2291771fb4d8SLee Schermerhorn */ 2292771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2293771fb4d8SLee Schermerhorn goto out; 2294771fb4d8SLee Schermerhorn (void)first_zones_zonelist( 2295771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2296771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2297771fb4d8SLee Schermerhorn &pol->v.nodes, &zone); 2298771fb4d8SLee Schermerhorn polnid = zone->node; 2299771fb4d8SLee Schermerhorn break; 2300771fb4d8SLee Schermerhorn 2301771fb4d8SLee Schermerhorn default: 2302771fb4d8SLee Schermerhorn BUG(); 2303771fb4d8SLee Schermerhorn } 23045606e387SMel Gorman 23055606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2306e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 230790572890SPeter Zijlstra polnid = thisnid; 23085606e387SMel Gorman 230910f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2310de1c9ce6SRik van Riel goto out; 2311de1c9ce6SRik van Riel } 2312e42c8ff2SMel Gorman 2313771fb4d8SLee Schermerhorn if (curnid != polnid) 2314771fb4d8SLee Schermerhorn ret = polnid; 2315771fb4d8SLee Schermerhorn out: 2316771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2317771fb4d8SLee Schermerhorn 2318771fb4d8SLee Schermerhorn return ret; 2319771fb4d8SLee Schermerhorn } 2320771fb4d8SLee Schermerhorn 23211da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 23221da177e4SLinus Torvalds { 2323140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 23241da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 232563f74ca2SKOSAKI Motohiro sp_free(n); 23261da177e4SLinus Torvalds } 23271da177e4SLinus Torvalds 232842288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 232942288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 233042288fe3SMel Gorman { 233142288fe3SMel Gorman node->start = start; 233242288fe3SMel Gorman node->end = end; 233342288fe3SMel Gorman node->policy = pol; 233442288fe3SMel Gorman } 233542288fe3SMel Gorman 2336dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2337dbcb0f19SAdrian Bunk struct mempolicy *pol) 23381da177e4SLinus Torvalds { 2339869833f2SKOSAKI Motohiro struct sp_node *n; 2340869833f2SKOSAKI Motohiro struct mempolicy *newpol; 23411da177e4SLinus Torvalds 2342869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 23431da177e4SLinus Torvalds if (!n) 23441da177e4SLinus Torvalds return NULL; 2345869833f2SKOSAKI Motohiro 2346869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2347869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2348869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2349869833f2SKOSAKI Motohiro return NULL; 2350869833f2SKOSAKI Motohiro } 2351869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 235242288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2353869833f2SKOSAKI Motohiro 23541da177e4SLinus Torvalds return n; 23551da177e4SLinus Torvalds } 23561da177e4SLinus Torvalds 23571da177e4SLinus Torvalds /* Replace a policy range. */ 23581da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 23591da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 23601da177e4SLinus Torvalds { 2361b22d127aSMel Gorman struct sp_node *n; 236242288fe3SMel Gorman struct sp_node *n_new = NULL; 236342288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2364b22d127aSMel Gorman int ret = 0; 23651da177e4SLinus Torvalds 236642288fe3SMel Gorman restart: 2367*4a8c7bb5SNathan Zimmer write_lock(&sp->lock); 23681da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 23691da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 23701da177e4SLinus Torvalds while (n && n->start < end) { 23711da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 23721da177e4SLinus Torvalds if (n->start >= start) { 23731da177e4SLinus Torvalds if (n->end <= end) 23741da177e4SLinus Torvalds sp_delete(sp, n); 23751da177e4SLinus Torvalds else 23761da177e4SLinus Torvalds n->start = end; 23771da177e4SLinus Torvalds } else { 23781da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 23791da177e4SLinus Torvalds if (n->end > end) { 238042288fe3SMel Gorman if (!n_new) 238142288fe3SMel Gorman goto alloc_new; 238242288fe3SMel Gorman 238342288fe3SMel Gorman *mpol_new = *n->policy; 238442288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 23857880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 23861da177e4SLinus Torvalds n->end = start; 23875ca39575SHillf Danton sp_insert(sp, n_new); 238842288fe3SMel Gorman n_new = NULL; 238942288fe3SMel Gorman mpol_new = NULL; 23901da177e4SLinus Torvalds break; 23911da177e4SLinus Torvalds } else 23921da177e4SLinus Torvalds n->end = start; 23931da177e4SLinus Torvalds } 23941da177e4SLinus Torvalds if (!next) 23951da177e4SLinus Torvalds break; 23961da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 23971da177e4SLinus Torvalds } 23981da177e4SLinus Torvalds if (new) 23991da177e4SLinus Torvalds sp_insert(sp, new); 2400*4a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 240142288fe3SMel Gorman ret = 0; 240242288fe3SMel Gorman 240342288fe3SMel Gorman err_out: 240442288fe3SMel Gorman if (mpol_new) 240542288fe3SMel Gorman mpol_put(mpol_new); 240642288fe3SMel Gorman if (n_new) 240742288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 240842288fe3SMel Gorman 2409b22d127aSMel Gorman return ret; 241042288fe3SMel Gorman 241142288fe3SMel Gorman alloc_new: 2412*4a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 241342288fe3SMel Gorman ret = -ENOMEM; 241442288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 241542288fe3SMel Gorman if (!n_new) 241642288fe3SMel Gorman goto err_out; 241742288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 241842288fe3SMel Gorman if (!mpol_new) 241942288fe3SMel Gorman goto err_out; 242042288fe3SMel Gorman goto restart; 24211da177e4SLinus Torvalds } 24221da177e4SLinus Torvalds 242371fe804bSLee Schermerhorn /** 242471fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 242571fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 242671fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 242771fe804bSLee Schermerhorn * 242871fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 242971fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 243071fe804bSLee Schermerhorn * This must be released on exit. 24314bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 243271fe804bSLee Schermerhorn */ 243371fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 24347339ff83SRobin Holt { 243558568d2aSMiao Xie int ret; 243658568d2aSMiao Xie 243771fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 2438*4a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 24397339ff83SRobin Holt 244071fe804bSLee Schermerhorn if (mpol) { 24417339ff83SRobin Holt struct vm_area_struct pvma; 244271fe804bSLee Schermerhorn struct mempolicy *new; 24434bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 24447339ff83SRobin Holt 24454bfc4495SKAMEZAWA Hiroyuki if (!scratch) 24465c0c1654SLee Schermerhorn goto put_mpol; 244771fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 244871fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 244915d77835SLee Schermerhorn if (IS_ERR(new)) 24500cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 245158568d2aSMiao Xie 245258568d2aSMiao Xie task_lock(current); 24534bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 245458568d2aSMiao Xie task_unlock(current); 245515d77835SLee Schermerhorn if (ret) 24565c0c1654SLee Schermerhorn goto put_new; 245771fe804bSLee Schermerhorn 245871fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 24597339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 246071fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 246171fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 246215d77835SLee Schermerhorn 24635c0c1654SLee Schermerhorn put_new: 246471fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 24650cae3457SDan Carpenter free_scratch: 24664bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 24675c0c1654SLee Schermerhorn put_mpol: 24685c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 24697339ff83SRobin Holt } 24707339ff83SRobin Holt } 24717339ff83SRobin Holt 24721da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 24731da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 24741da177e4SLinus Torvalds { 24751da177e4SLinus Torvalds int err; 24761da177e4SLinus Torvalds struct sp_node *new = NULL; 24771da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 24781da177e4SLinus Torvalds 2479028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 24801da177e4SLinus Torvalds vma->vm_pgoff, 248145c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2482028fec41SDavid Rientjes npol ? npol->flags : -1, 248300ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 24841da177e4SLinus Torvalds 24851da177e4SLinus Torvalds if (npol) { 24861da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 24871da177e4SLinus Torvalds if (!new) 24881da177e4SLinus Torvalds return -ENOMEM; 24891da177e4SLinus Torvalds } 24901da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 24911da177e4SLinus Torvalds if (err && new) 249263f74ca2SKOSAKI Motohiro sp_free(new); 24931da177e4SLinus Torvalds return err; 24941da177e4SLinus Torvalds } 24951da177e4SLinus Torvalds 24961da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 24971da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 24981da177e4SLinus Torvalds { 24991da177e4SLinus Torvalds struct sp_node *n; 25001da177e4SLinus Torvalds struct rb_node *next; 25011da177e4SLinus Torvalds 25021da177e4SLinus Torvalds if (!p->root.rb_node) 25031da177e4SLinus Torvalds return; 2504*4a8c7bb5SNathan Zimmer write_lock(&p->lock); 25051da177e4SLinus Torvalds next = rb_first(&p->root); 25061da177e4SLinus Torvalds while (next) { 25071da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25081da177e4SLinus Torvalds next = rb_next(&n->nd); 250963f74ca2SKOSAKI Motohiro sp_delete(p, n); 25101da177e4SLinus Torvalds } 2511*4a8c7bb5SNathan Zimmer write_unlock(&p->lock); 25121da177e4SLinus Torvalds } 25131da177e4SLinus Torvalds 25141a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2515c297663cSMel Gorman static int __initdata numabalancing_override; 25161a687c2eSMel Gorman 25171a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 25181a687c2eSMel Gorman { 25191a687c2eSMel Gorman bool numabalancing_default = false; 25201a687c2eSMel Gorman 25211a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 25221a687c2eSMel Gorman numabalancing_default = true; 25231a687c2eSMel Gorman 2524c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2525c297663cSMel Gorman if (numabalancing_override) 2526c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2527c297663cSMel Gorman 2528b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 25294a404beaSAndrew Morton pr_info("%s automatic NUMA balancing. " 2530c297663cSMel Gorman "Configure with numa_balancing= or the " 2531c297663cSMel Gorman "kernel.numa_balancing sysctl", 2532c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 25331a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 25341a687c2eSMel Gorman } 25351a687c2eSMel Gorman } 25361a687c2eSMel Gorman 25371a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 25381a687c2eSMel Gorman { 25391a687c2eSMel Gorman int ret = 0; 25401a687c2eSMel Gorman if (!str) 25411a687c2eSMel Gorman goto out; 25421a687c2eSMel Gorman 25431a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2544c297663cSMel Gorman numabalancing_override = 1; 25451a687c2eSMel Gorman ret = 1; 25461a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2547c297663cSMel Gorman numabalancing_override = -1; 25481a687c2eSMel Gorman ret = 1; 25491a687c2eSMel Gorman } 25501a687c2eSMel Gorman out: 25511a687c2eSMel Gorman if (!ret) 25524a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 25531a687c2eSMel Gorman 25541a687c2eSMel Gorman return ret; 25551a687c2eSMel Gorman } 25561a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 25571a687c2eSMel Gorman #else 25581a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 25591a687c2eSMel Gorman { 25601a687c2eSMel Gorman } 25611a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 25621a687c2eSMel Gorman 25631da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 25641da177e4SLinus Torvalds void __init numa_policy_init(void) 25651da177e4SLinus Torvalds { 2566b71636e2SPaul Mundt nodemask_t interleave_nodes; 2567b71636e2SPaul Mundt unsigned long largest = 0; 2568b71636e2SPaul Mundt int nid, prefer = 0; 2569b71636e2SPaul Mundt 25701da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 25711da177e4SLinus Torvalds sizeof(struct mempolicy), 257220c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 25731da177e4SLinus Torvalds 25741da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 25751da177e4SLinus Torvalds sizeof(struct sp_node), 257620c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 25771da177e4SLinus Torvalds 25785606e387SMel Gorman for_each_node(nid) { 25795606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 25805606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 25815606e387SMel Gorman .mode = MPOL_PREFERRED, 25825606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 25835606e387SMel Gorman .v = { .preferred_node = nid, }, 25845606e387SMel Gorman }; 25855606e387SMel Gorman } 25865606e387SMel Gorman 2587b71636e2SPaul Mundt /* 2588b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2589b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2590b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2591b71636e2SPaul Mundt */ 2592b71636e2SPaul Mundt nodes_clear(interleave_nodes); 259301f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2594b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 25951da177e4SLinus Torvalds 2596b71636e2SPaul Mundt /* Preserve the largest node */ 2597b71636e2SPaul Mundt if (largest < total_pages) { 2598b71636e2SPaul Mundt largest = total_pages; 2599b71636e2SPaul Mundt prefer = nid; 2600b71636e2SPaul Mundt } 2601b71636e2SPaul Mundt 2602b71636e2SPaul Mundt /* Interleave this node? */ 2603b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2604b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2605b71636e2SPaul Mundt } 2606b71636e2SPaul Mundt 2607b71636e2SPaul Mundt /* All too small, use the largest */ 2608b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2609b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2610b71636e2SPaul Mundt 2611028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2612b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 26131a687c2eSMel Gorman 26141a687c2eSMel Gorman check_numabalancing_enable(); 26151da177e4SLinus Torvalds } 26161da177e4SLinus Torvalds 26178bccd85fSChristoph Lameter /* Reset policy of current process to default */ 26181da177e4SLinus Torvalds void numa_default_policy(void) 26191da177e4SLinus Torvalds { 2620028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 26211da177e4SLinus Torvalds } 262268860ec1SPaul Jackson 26234225399aSPaul Jackson /* 2624095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2625095f1fc4SLee Schermerhorn */ 2626095f1fc4SLee Schermerhorn 2627095f1fc4SLee Schermerhorn /* 2628f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 26291a75a6c8SChristoph Lameter */ 2630345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2631345ace9cSLee Schermerhorn { 2632345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2633345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2634345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2635345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2636d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2637345ace9cSLee Schermerhorn }; 26381a75a6c8SChristoph Lameter 2639095f1fc4SLee Schermerhorn 2640095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2641095f1fc4SLee Schermerhorn /** 2642f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2643095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 264471fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2645095f1fc4SLee Schermerhorn * 2646095f1fc4SLee Schermerhorn * Format of input: 2647095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2648095f1fc4SLee Schermerhorn * 264971fe804bSLee Schermerhorn * On success, returns 0, else 1 2650095f1fc4SLee Schermerhorn */ 2651a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2652095f1fc4SLee Schermerhorn { 265371fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2654b4652e84SLee Schermerhorn unsigned short mode; 2655f2a07f40SHugh Dickins unsigned short mode_flags; 265671fe804bSLee Schermerhorn nodemask_t nodes; 2657095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2658095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2659095f1fc4SLee Schermerhorn int err = 1; 2660095f1fc4SLee Schermerhorn 2661095f1fc4SLee Schermerhorn if (nodelist) { 2662095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2663095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 266471fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2665095f1fc4SLee Schermerhorn goto out; 266601f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2667095f1fc4SLee Schermerhorn goto out; 266871fe804bSLee Schermerhorn } else 266971fe804bSLee Schermerhorn nodes_clear(nodes); 267071fe804bSLee Schermerhorn 2671095f1fc4SLee Schermerhorn if (flags) 2672095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2673095f1fc4SLee Schermerhorn 2674479e2802SPeter Zijlstra for (mode = 0; mode < MPOL_MAX; mode++) { 2675345ace9cSLee Schermerhorn if (!strcmp(str, policy_modes[mode])) { 2676095f1fc4SLee Schermerhorn break; 2677095f1fc4SLee Schermerhorn } 2678095f1fc4SLee Schermerhorn } 2679a720094dSMel Gorman if (mode >= MPOL_MAX) 2680095f1fc4SLee Schermerhorn goto out; 2681095f1fc4SLee Schermerhorn 268271fe804bSLee Schermerhorn switch (mode) { 2683095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 268471fe804bSLee Schermerhorn /* 268571fe804bSLee Schermerhorn * Insist on a nodelist of one node only 268671fe804bSLee Schermerhorn */ 2687095f1fc4SLee Schermerhorn if (nodelist) { 2688095f1fc4SLee Schermerhorn char *rest = nodelist; 2689095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2690095f1fc4SLee Schermerhorn rest++; 2691926f2ae0SKOSAKI Motohiro if (*rest) 2692926f2ae0SKOSAKI Motohiro goto out; 2693095f1fc4SLee Schermerhorn } 2694095f1fc4SLee Schermerhorn break; 2695095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2696095f1fc4SLee Schermerhorn /* 2697095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2698095f1fc4SLee Schermerhorn */ 2699095f1fc4SLee Schermerhorn if (!nodelist) 270001f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 27013f226aa1SLee Schermerhorn break; 270271fe804bSLee Schermerhorn case MPOL_LOCAL: 27033f226aa1SLee Schermerhorn /* 270471fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 27053f226aa1SLee Schermerhorn */ 270671fe804bSLee Schermerhorn if (nodelist) 27073f226aa1SLee Schermerhorn goto out; 270871fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 27093f226aa1SLee Schermerhorn break; 2710413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2711413b43deSRavikiran G Thirumalai /* 2712413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2713413b43deSRavikiran G Thirumalai */ 2714413b43deSRavikiran G Thirumalai if (!nodelist) 2715413b43deSRavikiran G Thirumalai err = 0; 2716413b43deSRavikiran G Thirumalai goto out; 2717d69b2e63SKOSAKI Motohiro case MPOL_BIND: 271871fe804bSLee Schermerhorn /* 2719d69b2e63SKOSAKI Motohiro * Insist on a nodelist 272071fe804bSLee Schermerhorn */ 2721d69b2e63SKOSAKI Motohiro if (!nodelist) 2722d69b2e63SKOSAKI Motohiro goto out; 2723095f1fc4SLee Schermerhorn } 2724095f1fc4SLee Schermerhorn 272571fe804bSLee Schermerhorn mode_flags = 0; 2726095f1fc4SLee Schermerhorn if (flags) { 2727095f1fc4SLee Schermerhorn /* 2728095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2729095f1fc4SLee Schermerhorn * mode flags. 2730095f1fc4SLee Schermerhorn */ 2731095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 273271fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2733095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 273471fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2735095f1fc4SLee Schermerhorn else 2736926f2ae0SKOSAKI Motohiro goto out; 2737095f1fc4SLee Schermerhorn } 273871fe804bSLee Schermerhorn 273971fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 274071fe804bSLee Schermerhorn if (IS_ERR(new)) 2741926f2ae0SKOSAKI Motohiro goto out; 2742926f2ae0SKOSAKI Motohiro 2743f2a07f40SHugh Dickins /* 2744f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2745f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2746f2a07f40SHugh Dickins */ 2747f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2748f2a07f40SHugh Dickins new->v.nodes = nodes; 2749f2a07f40SHugh Dickins else if (nodelist) 2750f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2751f2a07f40SHugh Dickins else 2752f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2753f2a07f40SHugh Dickins 2754f2a07f40SHugh Dickins /* 2755f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2756f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2757f2a07f40SHugh Dickins */ 2758e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2759f2a07f40SHugh Dickins 2760926f2ae0SKOSAKI Motohiro err = 0; 276171fe804bSLee Schermerhorn 2762095f1fc4SLee Schermerhorn out: 2763095f1fc4SLee Schermerhorn /* Restore string for error message */ 2764095f1fc4SLee Schermerhorn if (nodelist) 2765095f1fc4SLee Schermerhorn *--nodelist = ':'; 2766095f1fc4SLee Schermerhorn if (flags) 2767095f1fc4SLee Schermerhorn *--flags = '='; 276871fe804bSLee Schermerhorn if (!err) 276971fe804bSLee Schermerhorn *mpol = new; 2770095f1fc4SLee Schermerhorn return err; 2771095f1fc4SLee Schermerhorn } 2772095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2773095f1fc4SLee Schermerhorn 277471fe804bSLee Schermerhorn /** 277571fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 277671fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 277771fe804bSLee Schermerhorn * @maxlen: length of @buffer 277871fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 277971fe804bSLee Schermerhorn * 2780948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2781948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2782948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 27831a75a6c8SChristoph Lameter */ 2784948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 27851a75a6c8SChristoph Lameter { 27861a75a6c8SChristoph Lameter char *p = buffer; 2787948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2788948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2789948927eeSDavid Rientjes unsigned short flags = 0; 27901a75a6c8SChristoph Lameter 27918790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2792bea904d5SLee Schermerhorn mode = pol->mode; 2793948927eeSDavid Rientjes flags = pol->flags; 2794948927eeSDavid Rientjes } 2795bea904d5SLee Schermerhorn 27961a75a6c8SChristoph Lameter switch (mode) { 27971a75a6c8SChristoph Lameter case MPOL_DEFAULT: 27981a75a6c8SChristoph Lameter break; 27991a75a6c8SChristoph Lameter case MPOL_PREFERRED: 2800fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 2801f2a07f40SHugh Dickins mode = MPOL_LOCAL; 280253f2556bSLee Schermerhorn else 2803fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 28041a75a6c8SChristoph Lameter break; 28051a75a6c8SChristoph Lameter case MPOL_BIND: 28061a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 28071a75a6c8SChristoph Lameter nodes = pol->v.nodes; 28081a75a6c8SChristoph Lameter break; 28091a75a6c8SChristoph Lameter default: 2810948927eeSDavid Rientjes WARN_ON_ONCE(1); 2811948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 2812948927eeSDavid Rientjes return; 28131a75a6c8SChristoph Lameter } 28141a75a6c8SChristoph Lameter 2815b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 28161a75a6c8SChristoph Lameter 2817fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2818948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 2819f5b087b5SDavid Rientjes 28202291990aSLee Schermerhorn /* 28212291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 28222291990aSLee Schermerhorn */ 2823f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 28242291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 28252291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 28262291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2827f5b087b5SDavid Rientjes } 2828f5b087b5SDavid Rientjes 28299e763e0fSTejun Heo if (!nodes_empty(nodes)) 28309e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 28319e763e0fSTejun Heo nodemask_pr_args(&nodes)); 28321a75a6c8SChristoph Lameter } 2833