11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 711da177e4SLinus Torvalds #include <linux/mm.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 761da177e4SLinus Torvalds #include <linux/nodemask.h> 771da177e4SLinus Torvalds #include <linux/cpuset.h> 781da177e4SLinus Torvalds #include <linux/slab.h> 791da177e4SLinus Torvalds #include <linux/string.h> 80b95f1b31SPaul Gortmaker #include <linux/export.h> 81b488893aSPavel Emelyanov #include <linux/nsproxy.h> 821da177e4SLinus Torvalds #include <linux/interrupt.h> 831da177e4SLinus Torvalds #include <linux/init.h> 841da177e4SLinus Torvalds #include <linux/compat.h> 85dc9aa5b9SChristoph Lameter #include <linux/swap.h> 861a75a6c8SChristoph Lameter #include <linux/seq_file.h> 871a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 88b20a3503SChristoph Lameter #include <linux/migrate.h> 8962b61f61SHugh Dickins #include <linux/ksm.h> 9095a402c3SChristoph Lameter #include <linux/rmap.h> 9186c3a764SDavid Quigley #include <linux/security.h> 92dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 93095f1fc4SLee Schermerhorn #include <linux/ctype.h> 946d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 95b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 96b1de0d13SMitchel Humpherys #include <linux/printk.h> 97dc9aa5b9SChristoph Lameter 981da177e4SLinus Torvalds #include <asm/tlbflush.h> 991da177e4SLinus Torvalds #include <asm/uaccess.h> 1001da177e4SLinus Torvalds 10162695a84SNick Piggin #include "internal.h" 10262695a84SNick Piggin 10338e35860SChristoph Lameter /* Internal flags */ 104dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10538e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 106dc9aa5b9SChristoph Lameter 107fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 108fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1091da177e4SLinus Torvalds 1101da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1111da177e4SLinus Torvalds policied. */ 1126267276fSChristoph Lameter enum zone_type policy_zone = 0; 1131da177e4SLinus Torvalds 114bea904d5SLee Schermerhorn /* 115bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 116bea904d5SLee Schermerhorn */ 117e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1181da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 119bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 120fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1211da177e4SLinus Torvalds }; 1221da177e4SLinus Torvalds 1235606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1245606e387SMel Gorman 12574d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1265606e387SMel Gorman { 1275606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 128f15ca78eSOleg Nesterov int node; 1295606e387SMel Gorman 130f15ca78eSOleg Nesterov if (pol) 131f15ca78eSOleg Nesterov return pol; 1325606e387SMel Gorman 133f15ca78eSOleg Nesterov node = numa_node_id(); 1341da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1351da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 136f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 137f15ca78eSOleg Nesterov if (pol->mode) 138f15ca78eSOleg Nesterov return pol; 1391da6f0e1SJianguo Wu } 1405606e387SMel Gorman 141f15ca78eSOleg Nesterov return &default_policy; 1425606e387SMel Gorman } 1435606e387SMel Gorman 14437012946SDavid Rientjes static const struct mempolicy_operations { 14537012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 146708c1bbcSMiao Xie /* 147708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 148708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 149708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 150708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 151708c1bbcSMiao Xie * page. 152708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 153708c1bbcSMiao Xie * rebind directly. 154708c1bbcSMiao Xie * 155708c1bbcSMiao Xie * step: 156708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 157708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 158708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 159708c1bbcSMiao Xie */ 160708c1bbcSMiao Xie void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, 161708c1bbcSMiao Xie enum mpol_rebind_step step); 16237012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 16337012946SDavid Rientjes 164f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 165f5b087b5SDavid Rientjes { 1666d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1674c50bc01SDavid Rientjes } 1684c50bc01SDavid Rientjes 1694c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1704c50bc01SDavid Rientjes const nodemask_t *rel) 1714c50bc01SDavid Rientjes { 1724c50bc01SDavid Rientjes nodemask_t tmp; 1734c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1744c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 175f5b087b5SDavid Rientjes } 176f5b087b5SDavid Rientjes 17737012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 17837012946SDavid Rientjes { 17937012946SDavid Rientjes if (nodes_empty(*nodes)) 18037012946SDavid Rientjes return -EINVAL; 18137012946SDavid Rientjes pol->v.nodes = *nodes; 18237012946SDavid Rientjes return 0; 18337012946SDavid Rientjes } 18437012946SDavid Rientjes 18537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 18637012946SDavid Rientjes { 18737012946SDavid Rientjes if (!nodes) 188fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 18937012946SDavid Rientjes else if (nodes_empty(*nodes)) 19037012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 19137012946SDavid Rientjes else 19237012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 19337012946SDavid Rientjes return 0; 19437012946SDavid Rientjes } 19537012946SDavid Rientjes 19637012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 19737012946SDavid Rientjes { 198859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 19937012946SDavid Rientjes return -EINVAL; 20037012946SDavid Rientjes pol->v.nodes = *nodes; 20137012946SDavid Rientjes return 0; 20237012946SDavid Rientjes } 20337012946SDavid Rientjes 20458568d2aSMiao Xie /* 20558568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 20658568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 20758568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 20858568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 20958568d2aSMiao Xie * 21058568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 21158568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 21258568d2aSMiao Xie */ 2134bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2144bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 21558568d2aSMiao Xie { 21658568d2aSMiao Xie int ret; 21758568d2aSMiao Xie 21858568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 21958568d2aSMiao Xie if (pol == NULL) 22058568d2aSMiao Xie return 0; 22101f13bd6SLai Jiangshan /* Check N_MEMORY */ 2224bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 22301f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 22458568d2aSMiao Xie 22558568d2aSMiao Xie VM_BUG_ON(!nodes); 22658568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 22758568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 22858568d2aSMiao Xie else { 22958568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2304bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 23158568d2aSMiao Xie else 2324bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2334bfc4495SKAMEZAWA Hiroyuki 23458568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 23558568d2aSMiao Xie pol->w.user_nodemask = *nodes; 23658568d2aSMiao Xie else 23758568d2aSMiao Xie pol->w.cpuset_mems_allowed = 23858568d2aSMiao Xie cpuset_current_mems_allowed; 23958568d2aSMiao Xie } 24058568d2aSMiao Xie 2414bfc4495SKAMEZAWA Hiroyuki if (nodes) 2424bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2434bfc4495SKAMEZAWA Hiroyuki else 2444bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 24558568d2aSMiao Xie return ret; 24658568d2aSMiao Xie } 24758568d2aSMiao Xie 24858568d2aSMiao Xie /* 24958568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25058568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 25158568d2aSMiao Xie */ 252028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 253028fec41SDavid Rientjes nodemask_t *nodes) 2541da177e4SLinus Torvalds { 2551da177e4SLinus Torvalds struct mempolicy *policy; 2561da177e4SLinus Torvalds 257028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 25800ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 259140d5a49SPaul Mundt 2603e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2613e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 26237012946SDavid Rientjes return ERR_PTR(-EINVAL); 263d3a71033SLee Schermerhorn return NULL; 26437012946SDavid Rientjes } 2653e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2663e1f0645SDavid Rientjes 2673e1f0645SDavid Rientjes /* 2683e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2693e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2703e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2713e1f0645SDavid Rientjes */ 2723e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2733e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2743e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2753e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2763e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2773e1f0645SDavid Rientjes } 278479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 279479e2802SPeter Zijlstra if (!nodes_empty(*nodes)) 280479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 281479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2823e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2833e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2841da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2851da177e4SLinus Torvalds if (!policy) 2861da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2871da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 28845c4745aSLee Schermerhorn policy->mode = mode; 28937012946SDavid Rientjes policy->flags = flags; 2903e1f0645SDavid Rientjes 29137012946SDavid Rientjes return policy; 29237012946SDavid Rientjes } 29337012946SDavid Rientjes 29452cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 29552cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 29652cd3b07SLee Schermerhorn { 29752cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 29852cd3b07SLee Schermerhorn return; 29952cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 30052cd3b07SLee Schermerhorn } 30152cd3b07SLee Schermerhorn 302708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, 303708c1bbcSMiao Xie enum mpol_rebind_step step) 30437012946SDavid Rientjes { 30537012946SDavid Rientjes } 30637012946SDavid Rientjes 307708c1bbcSMiao Xie /* 308708c1bbcSMiao Xie * step: 309708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 310708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 311708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 312708c1bbcSMiao Xie */ 313708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, 314708c1bbcSMiao Xie enum mpol_rebind_step step) 3151d0d2680SDavid Rientjes { 3161d0d2680SDavid Rientjes nodemask_t tmp; 3171d0d2680SDavid Rientjes 31837012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 31937012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32037012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32137012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3221d0d2680SDavid Rientjes else { 323708c1bbcSMiao Xie /* 324708c1bbcSMiao Xie * if step == 1, we use ->w.cpuset_mems_allowed to cache the 325708c1bbcSMiao Xie * result 326708c1bbcSMiao Xie */ 327708c1bbcSMiao Xie if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { 328708c1bbcSMiao Xie nodes_remap(tmp, pol->v.nodes, 329708c1bbcSMiao Xie pol->w.cpuset_mems_allowed, *nodes); 330708c1bbcSMiao Xie pol->w.cpuset_mems_allowed = step ? tmp : *nodes; 331708c1bbcSMiao Xie } else if (step == MPOL_REBIND_STEP2) { 332708c1bbcSMiao Xie tmp = pol->w.cpuset_mems_allowed; 33337012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 334708c1bbcSMiao Xie } else 335708c1bbcSMiao Xie BUG(); 3361d0d2680SDavid Rientjes } 33737012946SDavid Rientjes 338708c1bbcSMiao Xie if (nodes_empty(tmp)) 339708c1bbcSMiao Xie tmp = *nodes; 340708c1bbcSMiao Xie 341708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 342708c1bbcSMiao Xie nodes_or(pol->v.nodes, pol->v.nodes, tmp); 343708c1bbcSMiao Xie else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) 3441d0d2680SDavid Rientjes pol->v.nodes = tmp; 345708c1bbcSMiao Xie else 346708c1bbcSMiao Xie BUG(); 347708c1bbcSMiao Xie 3481d0d2680SDavid Rientjes if (!node_isset(current->il_next, tmp)) { 3490edaf86cSAndrew Morton current->il_next = next_node_in(current->il_next, tmp); 3501d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3511d0d2680SDavid Rientjes current->il_next = numa_node_id(); 3521d0d2680SDavid Rientjes } 35337012946SDavid Rientjes } 35437012946SDavid Rientjes 35537012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 356708c1bbcSMiao Xie const nodemask_t *nodes, 357708c1bbcSMiao Xie enum mpol_rebind_step step) 35837012946SDavid Rientjes { 35937012946SDavid Rientjes nodemask_t tmp; 36037012946SDavid Rientjes 36137012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3621d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3631d0d2680SDavid Rientjes 364fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3651d0d2680SDavid Rientjes pol->v.preferred_node = node; 366fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 367fc36b8d3SLee Schermerhorn } else 368fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 36937012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 37037012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3711d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 372fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3731d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 37437012946SDavid Rientjes pol->w.cpuset_mems_allowed, 37537012946SDavid Rientjes *nodes); 37637012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3771d0d2680SDavid Rientjes } 3781d0d2680SDavid Rientjes } 37937012946SDavid Rientjes 380708c1bbcSMiao Xie /* 381708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 382708c1bbcSMiao Xie * 383708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 384708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 385708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 386708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 387708c1bbcSMiao Xie * page. 388708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 389708c1bbcSMiao Xie * rebind directly. 390708c1bbcSMiao Xie * 391708c1bbcSMiao Xie * step: 392708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 393708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 394708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 395708c1bbcSMiao Xie */ 396708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, 397708c1bbcSMiao Xie enum mpol_rebind_step step) 39837012946SDavid Rientjes { 39937012946SDavid Rientjes if (!pol) 40037012946SDavid Rientjes return; 40189c522c7SWang Sheng-Hui if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE && 40237012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 40337012946SDavid Rientjes return; 404708c1bbcSMiao Xie 405708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) 406708c1bbcSMiao Xie return; 407708c1bbcSMiao Xie 408708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) 409708c1bbcSMiao Xie BUG(); 410708c1bbcSMiao Xie 411708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 412708c1bbcSMiao Xie pol->flags |= MPOL_F_REBINDING; 413708c1bbcSMiao Xie else if (step == MPOL_REBIND_STEP2) 414708c1bbcSMiao Xie pol->flags &= ~MPOL_F_REBINDING; 415708c1bbcSMiao Xie else if (step >= MPOL_REBIND_NSTEP) 416708c1bbcSMiao Xie BUG(); 417708c1bbcSMiao Xie 418708c1bbcSMiao Xie mpol_ops[pol->mode].rebind(pol, newmask, step); 4191d0d2680SDavid Rientjes } 4201d0d2680SDavid Rientjes 4211d0d2680SDavid Rientjes /* 4221d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 4231d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 42458568d2aSMiao Xie * 42558568d2aSMiao Xie * Called with task's alloc_lock held. 4261d0d2680SDavid Rientjes */ 4271d0d2680SDavid Rientjes 428708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 429708c1bbcSMiao Xie enum mpol_rebind_step step) 4301d0d2680SDavid Rientjes { 431708c1bbcSMiao Xie mpol_rebind_policy(tsk->mempolicy, new, step); 4321d0d2680SDavid Rientjes } 4331d0d2680SDavid Rientjes 4341d0d2680SDavid Rientjes /* 4351d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 4361d0d2680SDavid Rientjes * 4371d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 4381d0d2680SDavid Rientjes */ 4391d0d2680SDavid Rientjes 4401d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 4411d0d2680SDavid Rientjes { 4421d0d2680SDavid Rientjes struct vm_area_struct *vma; 4431d0d2680SDavid Rientjes 4441d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 4451d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 446708c1bbcSMiao Xie mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); 4471d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 4481d0d2680SDavid Rientjes } 4491d0d2680SDavid Rientjes 45037012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 45137012946SDavid Rientjes [MPOL_DEFAULT] = { 45237012946SDavid Rientjes .rebind = mpol_rebind_default, 45337012946SDavid Rientjes }, 45437012946SDavid Rientjes [MPOL_INTERLEAVE] = { 45537012946SDavid Rientjes .create = mpol_new_interleave, 45637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 45737012946SDavid Rientjes }, 45837012946SDavid Rientjes [MPOL_PREFERRED] = { 45937012946SDavid Rientjes .create = mpol_new_preferred, 46037012946SDavid Rientjes .rebind = mpol_rebind_preferred, 46137012946SDavid Rientjes }, 46237012946SDavid Rientjes [MPOL_BIND] = { 46337012946SDavid Rientjes .create = mpol_new_bind, 46437012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 46537012946SDavid Rientjes }, 46637012946SDavid Rientjes }; 46737012946SDavid Rientjes 468fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 469fc301289SChristoph Lameter unsigned long flags); 4701a75a6c8SChristoph Lameter 4716f4576e3SNaoya Horiguchi struct queue_pages { 4726f4576e3SNaoya Horiguchi struct list_head *pagelist; 4736f4576e3SNaoya Horiguchi unsigned long flags; 4746f4576e3SNaoya Horiguchi nodemask_t *nmask; 4756f4576e3SNaoya Horiguchi struct vm_area_struct *prev; 4766f4576e3SNaoya Horiguchi }; 4776f4576e3SNaoya Horiguchi 47898094945SNaoya Horiguchi /* 47998094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 48098094945SNaoya Horiguchi * and move them to the pagelist if they do. 48198094945SNaoya Horiguchi */ 4826f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 4836f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 4841da177e4SLinus Torvalds { 4856f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 4866f4576e3SNaoya Horiguchi struct page *page; 4876f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 4886f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 489248db92dSKirill A. Shutemov int nid, ret; 49091612e0dSHugh Dickins pte_t *pte; 491705e87c0SHugh Dickins spinlock_t *ptl; 492941150a3SHugh Dickins 493248db92dSKirill A. Shutemov if (pmd_trans_huge(*pmd)) { 494248db92dSKirill A. Shutemov ptl = pmd_lock(walk->mm, pmd); 495248db92dSKirill A. Shutemov if (pmd_trans_huge(*pmd)) { 496248db92dSKirill A. Shutemov page = pmd_page(*pmd); 497248db92dSKirill A. Shutemov if (is_huge_zero_page(page)) { 498248db92dSKirill A. Shutemov spin_unlock(ptl); 49978ddc534SKirill A. Shutemov split_huge_pmd(vma, pmd, addr); 500248db92dSKirill A. Shutemov } else { 501248db92dSKirill A. Shutemov get_page(page); 502248db92dSKirill A. Shutemov spin_unlock(ptl); 503248db92dSKirill A. Shutemov lock_page(page); 504248db92dSKirill A. Shutemov ret = split_huge_page(page); 505248db92dSKirill A. Shutemov unlock_page(page); 506248db92dSKirill A. Shutemov put_page(page); 507248db92dSKirill A. Shutemov if (ret) 5086f4576e3SNaoya Horiguchi return 0; 509248db92dSKirill A. Shutemov } 510248db92dSKirill A. Shutemov } else { 511248db92dSKirill A. Shutemov spin_unlock(ptl); 512248db92dSKirill A. Shutemov } 513248db92dSKirill A. Shutemov } 51491612e0dSHugh Dickins 515337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 516337d9abfSNaoya Horiguchi return 0; 517248db92dSKirill A. Shutemov retry: 5186f4576e3SNaoya Horiguchi pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5196f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 52091612e0dSHugh Dickins if (!pte_present(*pte)) 52191612e0dSHugh Dickins continue; 5226aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5236aab341eSLinus Torvalds if (!page) 52491612e0dSHugh Dickins continue; 525053837fcSNick Piggin /* 52662b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 52762b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 528053837fcSNick Piggin */ 529b79bc0a0SHugh Dickins if (PageReserved(page)) 530f4598c8bSChristoph Lameter continue; 5316aab341eSLinus Torvalds nid = page_to_nid(page); 5326f4576e3SNaoya Horiguchi if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) 53338e35860SChristoph Lameter continue; 534800d8c63SKirill A. Shutemov if (PageTransCompound(page)) { 535248db92dSKirill A. Shutemov get_page(page); 536248db92dSKirill A. Shutemov pte_unmap_unlock(pte, ptl); 537248db92dSKirill A. Shutemov lock_page(page); 538248db92dSKirill A. Shutemov ret = split_huge_page(page); 539248db92dSKirill A. Shutemov unlock_page(page); 540248db92dSKirill A. Shutemov put_page(page); 541248db92dSKirill A. Shutemov /* Failed to split -- skip. */ 542248db92dSKirill A. Shutemov if (ret) { 543248db92dSKirill A. Shutemov pte = pte_offset_map_lock(walk->mm, pmd, 544248db92dSKirill A. Shutemov addr, &ptl); 545248db92dSKirill A. Shutemov continue; 546248db92dSKirill A. Shutemov } 547248db92dSKirill A. Shutemov goto retry; 548248db92dSKirill A. Shutemov } 54938e35860SChristoph Lameter 5506f4576e3SNaoya Horiguchi migrate_page_add(page, qp->pagelist, flags); 5516f4576e3SNaoya Horiguchi } 5526f4576e3SNaoya Horiguchi pte_unmap_unlock(pte - 1, ptl); 5536f4576e3SNaoya Horiguchi cond_resched(); 5546f4576e3SNaoya Horiguchi return 0; 55591612e0dSHugh Dickins } 55691612e0dSHugh Dickins 5576f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5586f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5596f4576e3SNaoya Horiguchi struct mm_walk *walk) 560e2d8cf40SNaoya Horiguchi { 561e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5626f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5636f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 564e2d8cf40SNaoya Horiguchi int nid; 565e2d8cf40SNaoya Horiguchi struct page *page; 566cb900f41SKirill A. Shutemov spinlock_t *ptl; 567d4c54919SNaoya Horiguchi pte_t entry; 568e2d8cf40SNaoya Horiguchi 5696f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5706f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 571d4c54919SNaoya Horiguchi if (!pte_present(entry)) 572d4c54919SNaoya Horiguchi goto unlock; 573d4c54919SNaoya Horiguchi page = pte_page(entry); 574e2d8cf40SNaoya Horiguchi nid = page_to_nid(page); 5756f4576e3SNaoya Horiguchi if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) 576e2d8cf40SNaoya Horiguchi goto unlock; 577e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 578e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 579e2d8cf40SNaoya Horiguchi (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 5806f4576e3SNaoya Horiguchi isolate_huge_page(page, qp->pagelist); 581e2d8cf40SNaoya Horiguchi unlock: 582cb900f41SKirill A. Shutemov spin_unlock(ptl); 583e2d8cf40SNaoya Horiguchi #else 584e2d8cf40SNaoya Horiguchi BUG(); 585e2d8cf40SNaoya Horiguchi #endif 58691612e0dSHugh Dickins return 0; 5871da177e4SLinus Torvalds } 5881da177e4SLinus Torvalds 5895877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 590b24f53a0SLee Schermerhorn /* 5914b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 5924b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 5934b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 5944b10e7d5SMel Gorman * 5954b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 5964b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 5974b10e7d5SMel Gorman * changes to the core. 598b24f53a0SLee Schermerhorn */ 5994b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6004b10e7d5SMel Gorman unsigned long addr, unsigned long end) 601b24f53a0SLee Schermerhorn { 6024b10e7d5SMel Gorman int nr_updated; 603b24f53a0SLee Schermerhorn 6044d942466SMel Gorman nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); 60503c5a6e1SMel Gorman if (nr_updated) 60603c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 607b24f53a0SLee Schermerhorn 6084b10e7d5SMel Gorman return nr_updated; 609b24f53a0SLee Schermerhorn } 610b24f53a0SLee Schermerhorn #else 611b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 612b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 613b24f53a0SLee Schermerhorn { 614b24f53a0SLee Schermerhorn return 0; 615b24f53a0SLee Schermerhorn } 6165877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 617b24f53a0SLee Schermerhorn 6186f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6196f4576e3SNaoya Horiguchi struct mm_walk *walk) 6201da177e4SLinus Torvalds { 6216f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6226f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6235b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6246f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 625dc9aa5b9SChristoph Lameter 62677bf45e7SKirill A. Shutemov if (!vma_migratable(vma)) 62748684a65SNaoya Horiguchi return 1; 62848684a65SNaoya Horiguchi 6295b952b3cSAndi Kleen if (endvma > end) 6305b952b3cSAndi Kleen endvma = end; 6315b952b3cSAndi Kleen if (vma->vm_start > start) 6325b952b3cSAndi Kleen start = vma->vm_start; 633b24f53a0SLee Schermerhorn 634b24f53a0SLee Schermerhorn if (!(flags & MPOL_MF_DISCONTIG_OK)) { 635b24f53a0SLee Schermerhorn if (!vma->vm_next && vma->vm_end < end) 636d05f0cdcSHugh Dickins return -EFAULT; 6376f4576e3SNaoya Horiguchi if (qp->prev && qp->prev->vm_end < vma->vm_start) 638d05f0cdcSHugh Dickins return -EFAULT; 639b24f53a0SLee Schermerhorn } 640b24f53a0SLee Schermerhorn 6416f4576e3SNaoya Horiguchi qp->prev = vma; 6426f4576e3SNaoya Horiguchi 643b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6442c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6454355c018SLiang Chen if (!is_vm_hugetlb_page(vma) && 6464355c018SLiang Chen (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && 6474355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 648b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6496f4576e3SNaoya Horiguchi return 1; 650b24f53a0SLee Schermerhorn } 651b24f53a0SLee Schermerhorn 6526f4576e3SNaoya Horiguchi /* queue pages from current vma */ 65377bf45e7SKirill A. Shutemov if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 6546f4576e3SNaoya Horiguchi return 0; 6556f4576e3SNaoya Horiguchi return 1; 6566f4576e3SNaoya Horiguchi } 657b24f53a0SLee Schermerhorn 6586f4576e3SNaoya Horiguchi /* 6596f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 6606f4576e3SNaoya Horiguchi * 6616f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 6626f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 6636f4576e3SNaoya Horiguchi * passed via @private.) 6646f4576e3SNaoya Horiguchi */ 6656f4576e3SNaoya Horiguchi static int 6666f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 6676f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 6686f4576e3SNaoya Horiguchi struct list_head *pagelist) 6696f4576e3SNaoya Horiguchi { 6706f4576e3SNaoya Horiguchi struct queue_pages qp = { 6716f4576e3SNaoya Horiguchi .pagelist = pagelist, 6726f4576e3SNaoya Horiguchi .flags = flags, 6736f4576e3SNaoya Horiguchi .nmask = nodes, 6746f4576e3SNaoya Horiguchi .prev = NULL, 6756f4576e3SNaoya Horiguchi }; 6766f4576e3SNaoya Horiguchi struct mm_walk queue_pages_walk = { 6776f4576e3SNaoya Horiguchi .hugetlb_entry = queue_pages_hugetlb, 6786f4576e3SNaoya Horiguchi .pmd_entry = queue_pages_pte_range, 6796f4576e3SNaoya Horiguchi .test_walk = queue_pages_test_walk, 6806f4576e3SNaoya Horiguchi .mm = mm, 6816f4576e3SNaoya Horiguchi .private = &qp, 6826f4576e3SNaoya Horiguchi }; 6836f4576e3SNaoya Horiguchi 6846f4576e3SNaoya Horiguchi return walk_page_range(start, end, &queue_pages_walk); 6851da177e4SLinus Torvalds } 6861da177e4SLinus Torvalds 687869833f2SKOSAKI Motohiro /* 688869833f2SKOSAKI Motohiro * Apply policy to a single VMA 689869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 690869833f2SKOSAKI Motohiro */ 691869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 692869833f2SKOSAKI Motohiro struct mempolicy *pol) 6938d34694cSKOSAKI Motohiro { 694869833f2SKOSAKI Motohiro int err; 695869833f2SKOSAKI Motohiro struct mempolicy *old; 696869833f2SKOSAKI Motohiro struct mempolicy *new; 6978d34694cSKOSAKI Motohiro 6988d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 6998d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7008d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7018d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7028d34694cSKOSAKI Motohiro 703869833f2SKOSAKI Motohiro new = mpol_dup(pol); 704869833f2SKOSAKI Motohiro if (IS_ERR(new)) 705869833f2SKOSAKI Motohiro return PTR_ERR(new); 706869833f2SKOSAKI Motohiro 707869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7088d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 709869833f2SKOSAKI Motohiro if (err) 710869833f2SKOSAKI Motohiro goto err_out; 7118d34694cSKOSAKI Motohiro } 712869833f2SKOSAKI Motohiro 713869833f2SKOSAKI Motohiro old = vma->vm_policy; 714869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 715869833f2SKOSAKI Motohiro mpol_put(old); 716869833f2SKOSAKI Motohiro 717869833f2SKOSAKI Motohiro return 0; 718869833f2SKOSAKI Motohiro err_out: 719869833f2SKOSAKI Motohiro mpol_put(new); 7208d34694cSKOSAKI Motohiro return err; 7218d34694cSKOSAKI Motohiro } 7228d34694cSKOSAKI Motohiro 7231da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7249d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7259d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7261da177e4SLinus Torvalds { 7271da177e4SLinus Torvalds struct vm_area_struct *next; 7289d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7299d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7309d8cebd4SKOSAKI Motohiro int err = 0; 731e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7329d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7339d8cebd4SKOSAKI Motohiro unsigned long vmend; 7341da177e4SLinus Torvalds 735097d5910SLinus Torvalds vma = find_vma(mm, start); 7369d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 7379d8cebd4SKOSAKI Motohiro return -EFAULT; 7389d8cebd4SKOSAKI Motohiro 739097d5910SLinus Torvalds prev = vma->vm_prev; 740e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 741e26a5114SKOSAKI Motohiro prev = vma; 742e26a5114SKOSAKI Motohiro 7439d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 7441da177e4SLinus Torvalds next = vma->vm_next; 7459d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 7469d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 7479d8cebd4SKOSAKI Motohiro 748e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 749e26a5114SKOSAKI Motohiro continue; 750e26a5114SKOSAKI Motohiro 751e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 752e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 7539d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 754e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 75519a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 7569d8cebd4SKOSAKI Motohiro if (prev) { 7579d8cebd4SKOSAKI Motohiro vma = prev; 7589d8cebd4SKOSAKI Motohiro next = vma->vm_next; 7593964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 7609d8cebd4SKOSAKI Motohiro continue; 7613964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 7623964acd0SOleg Nesterov goto replace; 7631da177e4SLinus Torvalds } 7649d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 7659d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 7669d8cebd4SKOSAKI Motohiro if (err) 7679d8cebd4SKOSAKI Motohiro goto out; 7689d8cebd4SKOSAKI Motohiro } 7699d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 7709d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 7719d8cebd4SKOSAKI Motohiro if (err) 7729d8cebd4SKOSAKI Motohiro goto out; 7739d8cebd4SKOSAKI Motohiro } 7743964acd0SOleg Nesterov replace: 775869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 7769d8cebd4SKOSAKI Motohiro if (err) 7779d8cebd4SKOSAKI Motohiro goto out; 7789d8cebd4SKOSAKI Motohiro } 7799d8cebd4SKOSAKI Motohiro 7809d8cebd4SKOSAKI Motohiro out: 7811da177e4SLinus Torvalds return err; 7821da177e4SLinus Torvalds } 7831da177e4SLinus Torvalds 7841da177e4SLinus Torvalds /* Set the process memory policy */ 785028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 786028fec41SDavid Rientjes nodemask_t *nodes) 7871da177e4SLinus Torvalds { 78858568d2aSMiao Xie struct mempolicy *new, *old; 7894bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 79058568d2aSMiao Xie int ret; 7911da177e4SLinus Torvalds 7924bfc4495SKAMEZAWA Hiroyuki if (!scratch) 7934bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 794f4e53d91SLee Schermerhorn 7954bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 7964bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 7974bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 7984bfc4495SKAMEZAWA Hiroyuki goto out; 7994bfc4495SKAMEZAWA Hiroyuki } 8002c7c3a7dSOleg Nesterov 80158568d2aSMiao Xie task_lock(current); 8024bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 80358568d2aSMiao Xie if (ret) { 80458568d2aSMiao Xie task_unlock(current); 80558568d2aSMiao Xie mpol_put(new); 8064bfc4495SKAMEZAWA Hiroyuki goto out; 80758568d2aSMiao Xie } 80858568d2aSMiao Xie old = current->mempolicy; 8091da177e4SLinus Torvalds current->mempolicy = new; 81045c4745aSLee Schermerhorn if (new && new->mode == MPOL_INTERLEAVE && 811f5b087b5SDavid Rientjes nodes_weight(new->v.nodes)) 812dfcd3c0dSAndi Kleen current->il_next = first_node(new->v.nodes); 81358568d2aSMiao Xie task_unlock(current); 81458568d2aSMiao Xie mpol_put(old); 8154bfc4495SKAMEZAWA Hiroyuki ret = 0; 8164bfc4495SKAMEZAWA Hiroyuki out: 8174bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8184bfc4495SKAMEZAWA Hiroyuki return ret; 8191da177e4SLinus Torvalds } 8201da177e4SLinus Torvalds 821bea904d5SLee Schermerhorn /* 822bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 82358568d2aSMiao Xie * 82458568d2aSMiao Xie * Called with task's alloc_lock held 825bea904d5SLee Schermerhorn */ 826bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8271da177e4SLinus Torvalds { 828dfcd3c0dSAndi Kleen nodes_clear(*nodes); 829bea904d5SLee Schermerhorn if (p == &default_policy) 830bea904d5SLee Schermerhorn return; 831bea904d5SLee Schermerhorn 83245c4745aSLee Schermerhorn switch (p->mode) { 83319770b32SMel Gorman case MPOL_BIND: 83419770b32SMel Gorman /* Fall through */ 8351da177e4SLinus Torvalds case MPOL_INTERLEAVE: 836dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 8371da177e4SLinus Torvalds break; 8381da177e4SLinus Torvalds case MPOL_PREFERRED: 839fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 840dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 84153f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 8421da177e4SLinus Torvalds break; 8431da177e4SLinus Torvalds default: 8441da177e4SLinus Torvalds BUG(); 8451da177e4SLinus Torvalds } 8461da177e4SLinus Torvalds } 8471da177e4SLinus Torvalds 848d4edcf0dSDave Hansen static int lookup_node(unsigned long addr) 8491da177e4SLinus Torvalds { 8501da177e4SLinus Torvalds struct page *p; 8511da177e4SLinus Torvalds int err; 8521da177e4SLinus Torvalds 853d4edcf0dSDave Hansen err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL); 8541da177e4SLinus Torvalds if (err >= 0) { 8551da177e4SLinus Torvalds err = page_to_nid(p); 8561da177e4SLinus Torvalds put_page(p); 8571da177e4SLinus Torvalds } 8581da177e4SLinus Torvalds return err; 8591da177e4SLinus Torvalds } 8601da177e4SLinus Torvalds 8611da177e4SLinus Torvalds /* Retrieve NUMA policy */ 862dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8631da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8641da177e4SLinus Torvalds { 8658bccd85fSChristoph Lameter int err; 8661da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8671da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8681da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 8691da177e4SLinus Torvalds 870754af6f5SLee Schermerhorn if (flags & 871754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8721da177e4SLinus Torvalds return -EINVAL; 873754af6f5SLee Schermerhorn 874754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 875754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 876754af6f5SLee Schermerhorn return -EINVAL; 877754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 87858568d2aSMiao Xie task_lock(current); 879754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 88058568d2aSMiao Xie task_unlock(current); 881754af6f5SLee Schermerhorn return 0; 882754af6f5SLee Schermerhorn } 883754af6f5SLee Schermerhorn 8841da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 885bea904d5SLee Schermerhorn /* 886bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 887bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 888bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 889bea904d5SLee Schermerhorn */ 8901da177e4SLinus Torvalds down_read(&mm->mmap_sem); 8911da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 8921da177e4SLinus Torvalds if (!vma) { 8931da177e4SLinus Torvalds up_read(&mm->mmap_sem); 8941da177e4SLinus Torvalds return -EFAULT; 8951da177e4SLinus Torvalds } 8961da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 8971da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 8981da177e4SLinus Torvalds else 8991da177e4SLinus Torvalds pol = vma->vm_policy; 9001da177e4SLinus Torvalds } else if (addr) 9011da177e4SLinus Torvalds return -EINVAL; 9021da177e4SLinus Torvalds 9031da177e4SLinus Torvalds if (!pol) 904bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9051da177e4SLinus Torvalds 9061da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9071da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 908d4edcf0dSDave Hansen err = lookup_node(addr); 9091da177e4SLinus Torvalds if (err < 0) 9101da177e4SLinus Torvalds goto out; 9118bccd85fSChristoph Lameter *policy = err; 9121da177e4SLinus Torvalds } else if (pol == current->mempolicy && 91345c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 9148bccd85fSChristoph Lameter *policy = current->il_next; 9151da177e4SLinus Torvalds } else { 9161da177e4SLinus Torvalds err = -EINVAL; 9171da177e4SLinus Torvalds goto out; 9181da177e4SLinus Torvalds } 919bea904d5SLee Schermerhorn } else { 920bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 921bea904d5SLee Schermerhorn pol->mode; 922d79df630SDavid Rientjes /* 923d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 924d79df630SDavid Rientjes * the policy to userspace. 925d79df630SDavid Rientjes */ 926d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 927bea904d5SLee Schermerhorn } 9281da177e4SLinus Torvalds 9291da177e4SLinus Torvalds if (vma) { 9301da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 9311da177e4SLinus Torvalds vma = NULL; 9321da177e4SLinus Torvalds } 9331da177e4SLinus Torvalds 9341da177e4SLinus Torvalds err = 0; 93558568d2aSMiao Xie if (nmask) { 936c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 937c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 938c6b6ef8bSLee Schermerhorn } else { 93958568d2aSMiao Xie task_lock(current); 940bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 94158568d2aSMiao Xie task_unlock(current); 94258568d2aSMiao Xie } 943c6b6ef8bSLee Schermerhorn } 9441da177e4SLinus Torvalds 9451da177e4SLinus Torvalds out: 94652cd3b07SLee Schermerhorn mpol_cond_put(pol); 9471da177e4SLinus Torvalds if (vma) 9481da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 9491da177e4SLinus Torvalds return err; 9501da177e4SLinus Torvalds } 9511da177e4SLinus Torvalds 952b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 9538bccd85fSChristoph Lameter /* 9546ce3c4c0SChristoph Lameter * page migration 9556ce3c4c0SChristoph Lameter */ 956fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 957fc301289SChristoph Lameter unsigned long flags) 9586ce3c4c0SChristoph Lameter { 9596ce3c4c0SChristoph Lameter /* 960fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9616ce3c4c0SChristoph Lameter */ 96262695a84SNick Piggin if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 96362695a84SNick Piggin if (!isolate_lru_page(page)) { 96462695a84SNick Piggin list_add_tail(&page->lru, pagelist); 965599d0c95SMel Gorman inc_node_page_state(page, NR_ISOLATED_ANON + 9666d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 96762695a84SNick Piggin } 96862695a84SNick Piggin } 9696ce3c4c0SChristoph Lameter } 9706ce3c4c0SChristoph Lameter 971742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 97295a402c3SChristoph Lameter { 973e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 974e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 975e2d8cf40SNaoya Horiguchi node); 976e2d8cf40SNaoya Horiguchi else 97796db800fSVlastimil Babka return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | 978b360edb4SDavid Rientjes __GFP_THISNODE, 0); 97995a402c3SChristoph Lameter } 98095a402c3SChristoph Lameter 9816ce3c4c0SChristoph Lameter /* 9827e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 9837e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 9847e2ab150SChristoph Lameter */ 985dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 986dbcb0f19SAdrian Bunk int flags) 9877e2ab150SChristoph Lameter { 9887e2ab150SChristoph Lameter nodemask_t nmask; 9897e2ab150SChristoph Lameter LIST_HEAD(pagelist); 9907e2ab150SChristoph Lameter int err = 0; 9917e2ab150SChristoph Lameter 9927e2ab150SChristoph Lameter nodes_clear(nmask); 9937e2ab150SChristoph Lameter node_set(source, nmask); 9947e2ab150SChristoph Lameter 99508270807SMinchan Kim /* 99608270807SMinchan Kim * This does not "check" the range but isolates all pages that 99708270807SMinchan Kim * need migration. Between passing in the full user address 99808270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 99908270807SMinchan Kim */ 100008270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 100198094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10027e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10037e2ab150SChristoph Lameter 1004cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 100568711a74SDavid Rientjes err = migrate_pages(&pagelist, new_node_page, NULL, dest, 10069c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 1007cf608ac1SMinchan Kim if (err) 1008e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1009cf608ac1SMinchan Kim } 101095a402c3SChristoph Lameter 10117e2ab150SChristoph Lameter return err; 10127e2ab150SChristoph Lameter } 10137e2ab150SChristoph Lameter 10147e2ab150SChristoph Lameter /* 10157e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10167e2ab150SChristoph Lameter * layout as much as possible. 101739743889SChristoph Lameter * 101839743889SChristoph Lameter * Returns the number of page that could not be moved. 101939743889SChristoph Lameter */ 10200ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10210ce72d4fSAndrew Morton const nodemask_t *to, int flags) 102239743889SChristoph Lameter { 10237e2ab150SChristoph Lameter int busy = 0; 10240aedadf9SChristoph Lameter int err; 10257e2ab150SChristoph Lameter nodemask_t tmp; 102639743889SChristoph Lameter 10270aedadf9SChristoph Lameter err = migrate_prep(); 10280aedadf9SChristoph Lameter if (err) 10290aedadf9SChristoph Lameter return err; 10300aedadf9SChristoph Lameter 103139743889SChristoph Lameter down_read(&mm->mmap_sem); 1032d4984711SChristoph Lameter 10337e2ab150SChristoph Lameter /* 10347e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 10357e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 10367e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 10377e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 10387e2ab150SChristoph Lameter * 10397e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 10407e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 10417e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 10427e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 10437e2ab150SChristoph Lameter * 10447e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 10457e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 10467e2ab150SChristoph Lameter * (nothing left to migrate). 10477e2ab150SChristoph Lameter * 10487e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 10497e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 10507e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 10517e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 10527e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 10537e2ab150SChristoph Lameter * 10547e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 10557e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 10567e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 10577e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1058ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 10597e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 10607e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 10617e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 10627e2ab150SChristoph Lameter */ 10637e2ab150SChristoph Lameter 10640ce72d4fSAndrew Morton tmp = *from; 10657e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 10667e2ab150SChristoph Lameter int s,d; 1067b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 10687e2ab150SChristoph Lameter int dest = 0; 10697e2ab150SChristoph Lameter 10707e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 10714a5b18ccSLarry Woodman 10724a5b18ccSLarry Woodman /* 10734a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 10744a5b18ccSLarry Woodman * node relationship of the pages established between 10754a5b18ccSLarry Woodman * threads and memory areas. 10764a5b18ccSLarry Woodman * 10774a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 10784a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 10794a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 10804a5b18ccSLarry Woodman * copying memory from a node that is in the destination 10814a5b18ccSLarry Woodman * mask. 10824a5b18ccSLarry Woodman * 10834a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 10844a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 10854a5b18ccSLarry Woodman */ 10864a5b18ccSLarry Woodman 10870ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 10880ce72d4fSAndrew Morton (node_isset(s, *to))) 10894a5b18ccSLarry Woodman continue; 10904a5b18ccSLarry Woodman 10910ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 10927e2ab150SChristoph Lameter if (s == d) 10937e2ab150SChristoph Lameter continue; 10947e2ab150SChristoph Lameter 10957e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 10967e2ab150SChristoph Lameter dest = d; 10977e2ab150SChristoph Lameter 10987e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 10997e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11007e2ab150SChristoph Lameter break; 11017e2ab150SChristoph Lameter } 1102b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11037e2ab150SChristoph Lameter break; 11047e2ab150SChristoph Lameter 11057e2ab150SChristoph Lameter node_clear(source, tmp); 11067e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11077e2ab150SChristoph Lameter if (err > 0) 11087e2ab150SChristoph Lameter busy += err; 11097e2ab150SChristoph Lameter if (err < 0) 11107e2ab150SChristoph Lameter break; 111139743889SChristoph Lameter } 111239743889SChristoph Lameter up_read(&mm->mmap_sem); 11137e2ab150SChristoph Lameter if (err < 0) 11147e2ab150SChristoph Lameter return err; 11157e2ab150SChristoph Lameter return busy; 1116b20a3503SChristoph Lameter 111739743889SChristoph Lameter } 111839743889SChristoph Lameter 11193ad33b24SLee Schermerhorn /* 11203ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1121d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 11223ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11233ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11243ad33b24SLee Schermerhorn * is in virtual address order. 11253ad33b24SLee Schermerhorn */ 1126d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 112795a402c3SChristoph Lameter { 1128d05f0cdcSHugh Dickins struct vm_area_struct *vma; 11293ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 113095a402c3SChristoph Lameter 1131d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 11323ad33b24SLee Schermerhorn while (vma) { 11333ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 11343ad33b24SLee Schermerhorn if (address != -EFAULT) 11353ad33b24SLee Schermerhorn break; 11363ad33b24SLee Schermerhorn vma = vma->vm_next; 11373ad33b24SLee Schermerhorn } 11383ad33b24SLee Schermerhorn 113911c731e8SWanpeng Li if (PageHuge(page)) { 1140cc81717eSMichal Hocko BUG_ON(!vma); 114174060e4dSNaoya Horiguchi return alloc_huge_page_noerr(vma, address, 1); 114211c731e8SWanpeng Li } 114311c731e8SWanpeng Li /* 114411c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 114511c731e8SWanpeng Li */ 11463ad33b24SLee Schermerhorn return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 114795a402c3SChristoph Lameter } 1148b20a3503SChristoph Lameter #else 1149b20a3503SChristoph Lameter 1150b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1151b20a3503SChristoph Lameter unsigned long flags) 1152b20a3503SChristoph Lameter { 1153b20a3503SChristoph Lameter } 1154b20a3503SChristoph Lameter 11550ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11560ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1157b20a3503SChristoph Lameter { 1158b20a3503SChristoph Lameter return -ENOSYS; 1159b20a3503SChristoph Lameter } 116095a402c3SChristoph Lameter 1161d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 116295a402c3SChristoph Lameter { 116395a402c3SChristoph Lameter return NULL; 116495a402c3SChristoph Lameter } 1165b20a3503SChristoph Lameter #endif 1166b20a3503SChristoph Lameter 1167dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1168028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1169028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 11706ce3c4c0SChristoph Lameter { 11716ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 11726ce3c4c0SChristoph Lameter struct mempolicy *new; 11736ce3c4c0SChristoph Lameter unsigned long end; 11746ce3c4c0SChristoph Lameter int err; 11756ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 11766ce3c4c0SChristoph Lameter 1177b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 11786ce3c4c0SChristoph Lameter return -EINVAL; 117974c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 11806ce3c4c0SChristoph Lameter return -EPERM; 11816ce3c4c0SChristoph Lameter 11826ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 11836ce3c4c0SChristoph Lameter return -EINVAL; 11846ce3c4c0SChristoph Lameter 11856ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 11866ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 11876ce3c4c0SChristoph Lameter 11886ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 11896ce3c4c0SChristoph Lameter end = start + len; 11906ce3c4c0SChristoph Lameter 11916ce3c4c0SChristoph Lameter if (end < start) 11926ce3c4c0SChristoph Lameter return -EINVAL; 11936ce3c4c0SChristoph Lameter if (end == start) 11946ce3c4c0SChristoph Lameter return 0; 11956ce3c4c0SChristoph Lameter 1196028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 11976ce3c4c0SChristoph Lameter if (IS_ERR(new)) 11986ce3c4c0SChristoph Lameter return PTR_ERR(new); 11996ce3c4c0SChristoph Lameter 1200b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1201b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1202b24f53a0SLee Schermerhorn 12036ce3c4c0SChristoph Lameter /* 12046ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12056ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12066ce3c4c0SChristoph Lameter */ 12076ce3c4c0SChristoph Lameter if (!new) 12086ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12096ce3c4c0SChristoph Lameter 1210028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1211028fec41SDavid Rientjes start, start + len, mode, mode_flags, 121200ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12136ce3c4c0SChristoph Lameter 12140aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12150aedadf9SChristoph Lameter 12160aedadf9SChristoph Lameter err = migrate_prep(); 12170aedadf9SChristoph Lameter if (err) 1218b05ca738SKOSAKI Motohiro goto mpol_out; 12190aedadf9SChristoph Lameter } 12204bfc4495SKAMEZAWA Hiroyuki { 12214bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 12224bfc4495SKAMEZAWA Hiroyuki if (scratch) { 12236ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 122458568d2aSMiao Xie task_lock(current); 12254bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 122658568d2aSMiao Xie task_unlock(current); 12274bfc4495SKAMEZAWA Hiroyuki if (err) 122858568d2aSMiao Xie up_write(&mm->mmap_sem); 12294bfc4495SKAMEZAWA Hiroyuki } else 12304bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 12314bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 12324bfc4495SKAMEZAWA Hiroyuki } 1233b05ca738SKOSAKI Motohiro if (err) 1234b05ca738SKOSAKI Motohiro goto mpol_out; 1235b05ca738SKOSAKI Motohiro 1236d05f0cdcSHugh Dickins err = queue_pages_range(mm, start, end, nmask, 12376ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1238d05f0cdcSHugh Dickins if (!err) 12399d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 12407e2ab150SChristoph Lameter 1241b24f53a0SLee Schermerhorn if (!err) { 1242b24f53a0SLee Schermerhorn int nr_failed = 0; 1243b24f53a0SLee Schermerhorn 1244cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1245b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1246d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1247d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1248cf608ac1SMinchan Kim if (nr_failed) 124974060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1250cf608ac1SMinchan Kim } 12516ce3c4c0SChristoph Lameter 1252b24f53a0SLee Schermerhorn if (nr_failed && (flags & MPOL_MF_STRICT)) 12536ce3c4c0SChristoph Lameter err = -EIO; 1254ab8a3e14SKOSAKI Motohiro } else 1255b0e5fd73SJoonsoo Kim putback_movable_pages(&pagelist); 1256b20a3503SChristoph Lameter 12576ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1258b05ca738SKOSAKI Motohiro mpol_out: 1259f0be3d32SLee Schermerhorn mpol_put(new); 12606ce3c4c0SChristoph Lameter return err; 12616ce3c4c0SChristoph Lameter } 12626ce3c4c0SChristoph Lameter 126339743889SChristoph Lameter /* 12648bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 12658bccd85fSChristoph Lameter */ 12668bccd85fSChristoph Lameter 12678bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 126839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 12698bccd85fSChristoph Lameter unsigned long maxnode) 12708bccd85fSChristoph Lameter { 12718bccd85fSChristoph Lameter unsigned long k; 12728bccd85fSChristoph Lameter unsigned long nlongs; 12738bccd85fSChristoph Lameter unsigned long endmask; 12748bccd85fSChristoph Lameter 12758bccd85fSChristoph Lameter --maxnode; 12768bccd85fSChristoph Lameter nodes_clear(*nodes); 12778bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 12788bccd85fSChristoph Lameter return 0; 1279a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1280636f13c1SChris Wright return -EINVAL; 12818bccd85fSChristoph Lameter 12828bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 12838bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 12848bccd85fSChristoph Lameter endmask = ~0UL; 12858bccd85fSChristoph Lameter else 12868bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 12878bccd85fSChristoph Lameter 12888bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 12898bccd85fSChristoph Lameter if the non supported part is all zero. */ 12908bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 12918bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 12928bccd85fSChristoph Lameter return -EINVAL; 12938bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 12948bccd85fSChristoph Lameter unsigned long t; 12958bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 12968bccd85fSChristoph Lameter return -EFAULT; 12978bccd85fSChristoph Lameter if (k == nlongs - 1) { 12988bccd85fSChristoph Lameter if (t & endmask) 12998bccd85fSChristoph Lameter return -EINVAL; 13008bccd85fSChristoph Lameter } else if (t) 13018bccd85fSChristoph Lameter return -EINVAL; 13028bccd85fSChristoph Lameter } 13038bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 13048bccd85fSChristoph Lameter endmask = ~0UL; 13058bccd85fSChristoph Lameter } 13068bccd85fSChristoph Lameter 13078bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 13088bccd85fSChristoph Lameter return -EFAULT; 13098bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 13108bccd85fSChristoph Lameter return 0; 13118bccd85fSChristoph Lameter } 13128bccd85fSChristoph Lameter 13138bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 13148bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 13158bccd85fSChristoph Lameter nodemask_t *nodes) 13168bccd85fSChristoph Lameter { 13178bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 13188bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 13198bccd85fSChristoph Lameter 13208bccd85fSChristoph Lameter if (copy > nbytes) { 13218bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 13228bccd85fSChristoph Lameter return -EINVAL; 13238bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 13248bccd85fSChristoph Lameter return -EFAULT; 13258bccd85fSChristoph Lameter copy = nbytes; 13268bccd85fSChristoph Lameter } 13278bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 13288bccd85fSChristoph Lameter } 13298bccd85fSChristoph Lameter 1330938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1331f7f28ca9SRasmus Villemoes unsigned long, mode, const unsigned long __user *, nmask, 1332938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 13338bccd85fSChristoph Lameter { 13348bccd85fSChristoph Lameter nodemask_t nodes; 13358bccd85fSChristoph Lameter int err; 1336028fec41SDavid Rientjes unsigned short mode_flags; 13378bccd85fSChristoph Lameter 1338028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1339028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1340a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1341a3b51e01SDavid Rientjes return -EINVAL; 13424c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 13434c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 13444c50bc01SDavid Rientjes return -EINVAL; 13458bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13468bccd85fSChristoph Lameter if (err) 13478bccd85fSChristoph Lameter return err; 1348028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 13498bccd85fSChristoph Lameter } 13508bccd85fSChristoph Lameter 13518bccd85fSChristoph Lameter /* Set the process memory policy */ 135223c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1353938bb9f5SHeiko Carstens unsigned long, maxnode) 13548bccd85fSChristoph Lameter { 13558bccd85fSChristoph Lameter int err; 13568bccd85fSChristoph Lameter nodemask_t nodes; 1357028fec41SDavid Rientjes unsigned short flags; 13588bccd85fSChristoph Lameter 1359028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1360028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1361028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 13628bccd85fSChristoph Lameter return -EINVAL; 13634c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 13644c50bc01SDavid Rientjes return -EINVAL; 13658bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13668bccd85fSChristoph Lameter if (err) 13678bccd85fSChristoph Lameter return err; 1368028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 13698bccd85fSChristoph Lameter } 13708bccd85fSChristoph Lameter 1371938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1372938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1373938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 137439743889SChristoph Lameter { 1375c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 1376596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 137739743889SChristoph Lameter struct task_struct *task; 137839743889SChristoph Lameter nodemask_t task_nodes; 137939743889SChristoph Lameter int err; 1380596d7cfaSKOSAKI Motohiro nodemask_t *old; 1381596d7cfaSKOSAKI Motohiro nodemask_t *new; 1382596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 138339743889SChristoph Lameter 1384596d7cfaSKOSAKI Motohiro if (!scratch) 1385596d7cfaSKOSAKI Motohiro return -ENOMEM; 138639743889SChristoph Lameter 1387596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1388596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1389596d7cfaSKOSAKI Motohiro 1390596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 139139743889SChristoph Lameter if (err) 1392596d7cfaSKOSAKI Motohiro goto out; 1393596d7cfaSKOSAKI Motohiro 1394596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1395596d7cfaSKOSAKI Motohiro if (err) 1396596d7cfaSKOSAKI Motohiro goto out; 139739743889SChristoph Lameter 139839743889SChristoph Lameter /* Find the mm_struct */ 139955cfaa3cSZeng Zhaoming rcu_read_lock(); 1400228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 140139743889SChristoph Lameter if (!task) { 140255cfaa3cSZeng Zhaoming rcu_read_unlock(); 1403596d7cfaSKOSAKI Motohiro err = -ESRCH; 1404596d7cfaSKOSAKI Motohiro goto out; 140539743889SChristoph Lameter } 14063268c63eSChristoph Lameter get_task_struct(task); 140739743889SChristoph Lameter 1408596d7cfaSKOSAKI Motohiro err = -EINVAL; 140939743889SChristoph Lameter 141039743889SChristoph Lameter /* 141139743889SChristoph Lameter * Check if this process has the right to modify the specified 141239743889SChristoph Lameter * process. The right exists if the process has administrative 14137f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 141439743889SChristoph Lameter * userid as the target process. 141539743889SChristoph Lameter */ 1416c69e8d9cSDavid Howells tcred = __task_cred(task); 1417b38a86ebSEric W. Biederman if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1418b38a86ebSEric W. Biederman !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 141974c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1420c69e8d9cSDavid Howells rcu_read_unlock(); 142139743889SChristoph Lameter err = -EPERM; 14223268c63eSChristoph Lameter goto out_put; 142339743889SChristoph Lameter } 1424c69e8d9cSDavid Howells rcu_read_unlock(); 142539743889SChristoph Lameter 142639743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 142739743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1428596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 142939743889SChristoph Lameter err = -EPERM; 14303268c63eSChristoph Lameter goto out_put; 143139743889SChristoph Lameter } 143239743889SChristoph Lameter 143301f13bd6SLai Jiangshan if (!nodes_subset(*new, node_states[N_MEMORY])) { 14343b42d28bSChristoph Lameter err = -EINVAL; 14353268c63eSChristoph Lameter goto out_put; 14363b42d28bSChristoph Lameter } 14373b42d28bSChristoph Lameter 143886c3a764SDavid Quigley err = security_task_movememory(task); 143986c3a764SDavid Quigley if (err) 14403268c63eSChristoph Lameter goto out_put; 144186c3a764SDavid Quigley 14423268c63eSChristoph Lameter mm = get_task_mm(task); 14433268c63eSChristoph Lameter put_task_struct(task); 1444f2a9ef88SSasha Levin 1445f2a9ef88SSasha Levin if (!mm) { 1446f2a9ef88SSasha Levin err = -EINVAL; 1447f2a9ef88SSasha Levin goto out; 1448f2a9ef88SSasha Levin } 1449f2a9ef88SSasha Levin 1450596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 145174c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 14523268c63eSChristoph Lameter 145339743889SChristoph Lameter mmput(mm); 14543268c63eSChristoph Lameter out: 1455596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1456596d7cfaSKOSAKI Motohiro 145739743889SChristoph Lameter return err; 14583268c63eSChristoph Lameter 14593268c63eSChristoph Lameter out_put: 14603268c63eSChristoph Lameter put_task_struct(task); 14613268c63eSChristoph Lameter goto out; 14623268c63eSChristoph Lameter 146339743889SChristoph Lameter } 146439743889SChristoph Lameter 146539743889SChristoph Lameter 14668bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1467938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1468938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1469938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 14708bccd85fSChristoph Lameter { 1471dbcb0f19SAdrian Bunk int err; 1472dbcb0f19SAdrian Bunk int uninitialized_var(pval); 14738bccd85fSChristoph Lameter nodemask_t nodes; 14748bccd85fSChristoph Lameter 14758bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 14768bccd85fSChristoph Lameter return -EINVAL; 14778bccd85fSChristoph Lameter 14788bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 14798bccd85fSChristoph Lameter 14808bccd85fSChristoph Lameter if (err) 14818bccd85fSChristoph Lameter return err; 14828bccd85fSChristoph Lameter 14838bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 14848bccd85fSChristoph Lameter return -EFAULT; 14858bccd85fSChristoph Lameter 14868bccd85fSChristoph Lameter if (nmask) 14878bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 14888bccd85fSChristoph Lameter 14898bccd85fSChristoph Lameter return err; 14908bccd85fSChristoph Lameter } 14918bccd85fSChristoph Lameter 14921da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 14931da177e4SLinus Torvalds 1494c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1495c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1496c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1497c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 14981da177e4SLinus Torvalds { 14991da177e4SLinus Torvalds long err; 15001da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15011da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15021da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15031da177e4SLinus Torvalds 15041da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15051da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15061da177e4SLinus Torvalds 15071da177e4SLinus Torvalds if (nmask) 15081da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 15091da177e4SLinus Torvalds 15101da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 15111da177e4SLinus Torvalds 15121da177e4SLinus Torvalds if (!err && nmask) { 15132bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 15142bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 15152bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 15161da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 15171da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 15181da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 15191da177e4SLinus Torvalds } 15201da177e4SLinus Torvalds 15211da177e4SLinus Torvalds return err; 15221da177e4SLinus Torvalds } 15231da177e4SLinus Torvalds 1524c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1525c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 15261da177e4SLinus Torvalds { 15271da177e4SLinus Torvalds long err = 0; 15281da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15291da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15301da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15311da177e4SLinus Torvalds 15321da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15331da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15341da177e4SLinus Torvalds 15351da177e4SLinus Torvalds if (nmask) { 15361da177e4SLinus Torvalds err = compat_get_bitmap(bm, nmask, nr_bits); 15371da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 15381da177e4SLinus Torvalds err |= copy_to_user(nm, bm, alloc_size); 15391da177e4SLinus Torvalds } 15401da177e4SLinus Torvalds 15411da177e4SLinus Torvalds if (err) 15421da177e4SLinus Torvalds return -EFAULT; 15431da177e4SLinus Torvalds 15441da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 15451da177e4SLinus Torvalds } 15461da177e4SLinus Torvalds 1547c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1548c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1549c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 15501da177e4SLinus Torvalds { 15511da177e4SLinus Torvalds long err = 0; 15521da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15531da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1554dfcd3c0dSAndi Kleen nodemask_t bm; 15551da177e4SLinus Torvalds 15561da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15571da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15581da177e4SLinus Torvalds 15591da177e4SLinus Torvalds if (nmask) { 1560dfcd3c0dSAndi Kleen err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 15611da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 1562dfcd3c0dSAndi Kleen err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 15631da177e4SLinus Torvalds } 15641da177e4SLinus Torvalds 15651da177e4SLinus Torvalds if (err) 15661da177e4SLinus Torvalds return -EFAULT; 15671da177e4SLinus Torvalds 15681da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 15691da177e4SLinus Torvalds } 15701da177e4SLinus Torvalds 15711da177e4SLinus Torvalds #endif 15721da177e4SLinus Torvalds 157374d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 157474d2c3a0SOleg Nesterov unsigned long addr) 15751da177e4SLinus Torvalds { 15768d90274bSOleg Nesterov struct mempolicy *pol = NULL; 15771da177e4SLinus Torvalds 15781da177e4SLinus Torvalds if (vma) { 1579480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 15808d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 158100442ad0SMel Gorman } else if (vma->vm_policy) { 15821da177e4SLinus Torvalds pol = vma->vm_policy; 158300442ad0SMel Gorman 158400442ad0SMel Gorman /* 158500442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 158600442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 158700442ad0SMel Gorman * count on these policies which will be dropped by 158800442ad0SMel Gorman * mpol_cond_put() later 158900442ad0SMel Gorman */ 159000442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 159100442ad0SMel Gorman mpol_get(pol); 159200442ad0SMel Gorman } 15931da177e4SLinus Torvalds } 1594f15ca78eSOleg Nesterov 159574d2c3a0SOleg Nesterov return pol; 159674d2c3a0SOleg Nesterov } 159774d2c3a0SOleg Nesterov 159874d2c3a0SOleg Nesterov /* 1599dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 160074d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 160174d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 160274d2c3a0SOleg Nesterov * 160374d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1604dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 160574d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 160674d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 160774d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 160874d2c3a0SOleg Nesterov * extra reference for shared policies. 160974d2c3a0SOleg Nesterov */ 1610dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1611dd6eecb9SOleg Nesterov unsigned long addr) 161274d2c3a0SOleg Nesterov { 161374d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 161474d2c3a0SOleg Nesterov 16158d90274bSOleg Nesterov if (!pol) 1616dd6eecb9SOleg Nesterov pol = get_task_policy(current); 16178d90274bSOleg Nesterov 16181da177e4SLinus Torvalds return pol; 16191da177e4SLinus Torvalds } 16201da177e4SLinus Torvalds 16216b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1622fc314724SMel Gorman { 16236b6482bbSOleg Nesterov struct mempolicy *pol; 1624f15ca78eSOleg Nesterov 1625fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1626fc314724SMel Gorman bool ret = false; 1627fc314724SMel Gorman 1628fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1629fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1630fc314724SMel Gorman ret = true; 1631fc314724SMel Gorman mpol_cond_put(pol); 1632fc314724SMel Gorman 1633fc314724SMel Gorman return ret; 16348d90274bSOleg Nesterov } 16358d90274bSOleg Nesterov 1636fc314724SMel Gorman pol = vma->vm_policy; 16378d90274bSOleg Nesterov if (!pol) 16386b6482bbSOleg Nesterov pol = get_task_policy(current); 1639fc314724SMel Gorman 1640fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1641fc314724SMel Gorman } 1642fc314724SMel Gorman 1643d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1644d3eb1570SLai Jiangshan { 1645d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1646d3eb1570SLai Jiangshan 1647d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1648d3eb1570SLai Jiangshan 1649d3eb1570SLai Jiangshan /* 1650d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1651d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1652d3eb1570SLai Jiangshan * 1653d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1654d3eb1570SLai Jiangshan * so if the following test faile, it implies 1655d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1656d3eb1570SLai Jiangshan */ 1657d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1658d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1659d3eb1570SLai Jiangshan 1660d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1661d3eb1570SLai Jiangshan } 1662d3eb1570SLai Jiangshan 166352cd3b07SLee Schermerhorn /* 166452cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 166552cd3b07SLee Schermerhorn * page allocation 166652cd3b07SLee Schermerhorn */ 166752cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 166819770b32SMel Gorman { 166919770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 167045c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1671d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 167219770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 167319770b32SMel Gorman return &policy->v.nodes; 167419770b32SMel Gorman 167519770b32SMel Gorman return NULL; 167619770b32SMel Gorman } 167719770b32SMel Gorman 167852cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */ 16792f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, 16802f5f9486SAndi Kleen int nd) 16811da177e4SLinus Torvalds { 168245c4745aSLee Schermerhorn switch (policy->mode) { 16831da177e4SLinus Torvalds case MPOL_PREFERRED: 1684fc36b8d3SLee Schermerhorn if (!(policy->flags & MPOL_F_LOCAL)) 16851da177e4SLinus Torvalds nd = policy->v.preferred_node; 16861da177e4SLinus Torvalds break; 16871da177e4SLinus Torvalds case MPOL_BIND: 168819770b32SMel Gorman /* 168952cd3b07SLee Schermerhorn * Normally, MPOL_BIND allocations are node-local within the 169052cd3b07SLee Schermerhorn * allowed nodemask. However, if __GFP_THISNODE is set and the 16916eb27e1fSBob Liu * current node isn't part of the mask, we use the zonelist for 169252cd3b07SLee Schermerhorn * the first node in the mask instead. 169319770b32SMel Gorman */ 169419770b32SMel Gorman if (unlikely(gfp & __GFP_THISNODE) && 169519770b32SMel Gorman unlikely(!node_isset(nd, policy->v.nodes))) 169619770b32SMel Gorman nd = first_node(policy->v.nodes); 169719770b32SMel Gorman break; 16981da177e4SLinus Torvalds default: 16991da177e4SLinus Torvalds BUG(); 17001da177e4SLinus Torvalds } 17010e88460dSMel Gorman return node_zonelist(nd, gfp); 17021da177e4SLinus Torvalds } 17031da177e4SLinus Torvalds 17041da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 17051da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 17061da177e4SLinus Torvalds { 17071da177e4SLinus Torvalds unsigned nid, next; 17081da177e4SLinus Torvalds struct task_struct *me = current; 17091da177e4SLinus Torvalds 17101da177e4SLinus Torvalds nid = me->il_next; 17110edaf86cSAndrew Morton next = next_node_in(nid, policy->v.nodes); 1712f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 17131da177e4SLinus Torvalds me->il_next = next; 17141da177e4SLinus Torvalds return nid; 17151da177e4SLinus Torvalds } 17161da177e4SLinus Torvalds 1717dc85da15SChristoph Lameter /* 1718dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1719dc85da15SChristoph Lameter * next slab entry. 1720dc85da15SChristoph Lameter */ 17212a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1722dc85da15SChristoph Lameter { 1723e7b691b0SAndi Kleen struct mempolicy *policy; 17242a389610SDavid Rientjes int node = numa_mem_id(); 1725e7b691b0SAndi Kleen 1726e7b691b0SAndi Kleen if (in_interrupt()) 17272a389610SDavid Rientjes return node; 1728e7b691b0SAndi Kleen 1729e7b691b0SAndi Kleen policy = current->mempolicy; 1730fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 17312a389610SDavid Rientjes return node; 1732765c4507SChristoph Lameter 1733bea904d5SLee Schermerhorn switch (policy->mode) { 1734bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1735fc36b8d3SLee Schermerhorn /* 1736fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1737fc36b8d3SLee Schermerhorn */ 1738bea904d5SLee Schermerhorn return policy->v.preferred_node; 1739bea904d5SLee Schermerhorn 1740dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1741dc85da15SChristoph Lameter return interleave_nodes(policy); 1742dc85da15SChristoph Lameter 1743dd1a239fSMel Gorman case MPOL_BIND: { 1744c33d6c06SMel Gorman struct zoneref *z; 1745c33d6c06SMel Gorman 1746dc85da15SChristoph Lameter /* 1747dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1748dc85da15SChristoph Lameter * first node. 1749dc85da15SChristoph Lameter */ 175019770b32SMel Gorman struct zonelist *zonelist; 175119770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1752*c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1753c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1754c33d6c06SMel Gorman &policy->v.nodes); 1755c33d6c06SMel Gorman return z->zone ? z->zone->node : node; 1756dd1a239fSMel Gorman } 1757dc85da15SChristoph Lameter 1758dc85da15SChristoph Lameter default: 1759bea904d5SLee Schermerhorn BUG(); 1760dc85da15SChristoph Lameter } 1761dc85da15SChristoph Lameter } 1762dc85da15SChristoph Lameter 1763fee83b3aSAndrew Morton /* 1764fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1765fee83b3aSAndrew Morton * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1766fee83b3aSAndrew Morton * number of present nodes. 1767fee83b3aSAndrew Morton */ 17681da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 1769fee83b3aSAndrew Morton struct vm_area_struct *vma, unsigned long n) 17701da177e4SLinus Torvalds { 1771dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1772f5b087b5SDavid Rientjes unsigned target; 1773fee83b3aSAndrew Morton int i; 1774fee83b3aSAndrew Morton int nid; 17751da177e4SLinus Torvalds 1776f5b087b5SDavid Rientjes if (!nnodes) 1777f5b087b5SDavid Rientjes return numa_node_id(); 1778fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1779fee83b3aSAndrew Morton nid = first_node(pol->v.nodes); 1780fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1781dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 17821da177e4SLinus Torvalds return nid; 17831da177e4SLinus Torvalds } 17841da177e4SLinus Torvalds 17855da7ca86SChristoph Lameter /* Determine a node number for interleave */ 17865da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 17875da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 17885da7ca86SChristoph Lameter { 17895da7ca86SChristoph Lameter if (vma) { 17905da7ca86SChristoph Lameter unsigned long off; 17915da7ca86SChristoph Lameter 17923b98b087SNishanth Aravamudan /* 17933b98b087SNishanth Aravamudan * for small pages, there is no difference between 17943b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 17953b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 17963b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 17973b98b087SNishanth Aravamudan * a useful offset. 17983b98b087SNishanth Aravamudan */ 17993b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 18003b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 18015da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 18025da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 18035da7ca86SChristoph Lameter } else 18045da7ca86SChristoph Lameter return interleave_nodes(pol); 18055da7ca86SChristoph Lameter } 18065da7ca86SChristoph Lameter 180700ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1808480eccf9SLee Schermerhorn /* 1809480eccf9SLee Schermerhorn * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1810b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1811b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1812b46e14acSFabian Frederick * @gfp_flags: for requested zone 1813b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1814b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1815480eccf9SLee Schermerhorn * 181652cd3b07SLee Schermerhorn * Returns a zonelist suitable for a huge page allocation and a pointer 181752cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 181852cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 181952cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1820c0ff7453SMiao Xie * 1821d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 1822480eccf9SLee Schermerhorn */ 1823396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 182419770b32SMel Gorman gfp_t gfp_flags, struct mempolicy **mpol, 182519770b32SMel Gorman nodemask_t **nodemask) 18265da7ca86SChristoph Lameter { 1827480eccf9SLee Schermerhorn struct zonelist *zl; 18285da7ca86SChristoph Lameter 1829dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 183019770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 18315da7ca86SChristoph Lameter 183252cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 183352cd3b07SLee Schermerhorn zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1834a5516438SAndi Kleen huge_page_shift(hstate_vma(vma))), gfp_flags); 183552cd3b07SLee Schermerhorn } else { 18362f5f9486SAndi Kleen zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); 183752cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 183852cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1839480eccf9SLee Schermerhorn } 1840480eccf9SLee Schermerhorn return zl; 18415da7ca86SChristoph Lameter } 184206808b08SLee Schermerhorn 184306808b08SLee Schermerhorn /* 184406808b08SLee Schermerhorn * init_nodemask_of_mempolicy 184506808b08SLee Schermerhorn * 184606808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 184706808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 184806808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 184906808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 185006808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 185106808b08SLee Schermerhorn * of non-default mempolicy. 185206808b08SLee Schermerhorn * 185306808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 185406808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 185506808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 185606808b08SLee Schermerhorn * 185706808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 185806808b08SLee Schermerhorn */ 185906808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 186006808b08SLee Schermerhorn { 186106808b08SLee Schermerhorn struct mempolicy *mempolicy; 186206808b08SLee Schermerhorn int nid; 186306808b08SLee Schermerhorn 186406808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 186506808b08SLee Schermerhorn return false; 186606808b08SLee Schermerhorn 1867c0ff7453SMiao Xie task_lock(current); 186806808b08SLee Schermerhorn mempolicy = current->mempolicy; 186906808b08SLee Schermerhorn switch (mempolicy->mode) { 187006808b08SLee Schermerhorn case MPOL_PREFERRED: 187106808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 187206808b08SLee Schermerhorn nid = numa_node_id(); 187306808b08SLee Schermerhorn else 187406808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 187506808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 187606808b08SLee Schermerhorn break; 187706808b08SLee Schermerhorn 187806808b08SLee Schermerhorn case MPOL_BIND: 187906808b08SLee Schermerhorn /* Fall through */ 188006808b08SLee Schermerhorn case MPOL_INTERLEAVE: 188106808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 188206808b08SLee Schermerhorn break; 188306808b08SLee Schermerhorn 188406808b08SLee Schermerhorn default: 188506808b08SLee Schermerhorn BUG(); 188606808b08SLee Schermerhorn } 1887c0ff7453SMiao Xie task_unlock(current); 188806808b08SLee Schermerhorn 188906808b08SLee Schermerhorn return true; 189006808b08SLee Schermerhorn } 189100ac59adSChen, Kenneth W #endif 18925da7ca86SChristoph Lameter 18936f48d0ebSDavid Rientjes /* 18946f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 18956f48d0ebSDavid Rientjes * 18966f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 18976f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 18986f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 18996f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 19006f48d0ebSDavid Rientjes * 19016f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 19026f48d0ebSDavid Rientjes */ 19036f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 19046f48d0ebSDavid Rientjes const nodemask_t *mask) 19056f48d0ebSDavid Rientjes { 19066f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 19076f48d0ebSDavid Rientjes bool ret = true; 19086f48d0ebSDavid Rientjes 19096f48d0ebSDavid Rientjes if (!mask) 19106f48d0ebSDavid Rientjes return ret; 19116f48d0ebSDavid Rientjes task_lock(tsk); 19126f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 19136f48d0ebSDavid Rientjes if (!mempolicy) 19146f48d0ebSDavid Rientjes goto out; 19156f48d0ebSDavid Rientjes 19166f48d0ebSDavid Rientjes switch (mempolicy->mode) { 19176f48d0ebSDavid Rientjes case MPOL_PREFERRED: 19186f48d0ebSDavid Rientjes /* 19196f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 19206f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 19216f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 19226f48d0ebSDavid Rientjes * nodes in mask. 19236f48d0ebSDavid Rientjes */ 19246f48d0ebSDavid Rientjes break; 19256f48d0ebSDavid Rientjes case MPOL_BIND: 19266f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 19276f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 19286f48d0ebSDavid Rientjes break; 19296f48d0ebSDavid Rientjes default: 19306f48d0ebSDavid Rientjes BUG(); 19316f48d0ebSDavid Rientjes } 19326f48d0ebSDavid Rientjes out: 19336f48d0ebSDavid Rientjes task_unlock(tsk); 19346f48d0ebSDavid Rientjes return ret; 19356f48d0ebSDavid Rientjes } 19366f48d0ebSDavid Rientjes 19371da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 19381da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1939662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1940662f3a0bSAndi Kleen unsigned nid) 19411da177e4SLinus Torvalds { 19421da177e4SLinus Torvalds struct zonelist *zl; 19431da177e4SLinus Torvalds struct page *page; 19441da177e4SLinus Torvalds 19450e88460dSMel Gorman zl = node_zonelist(nid, gfp); 19461da177e4SLinus Torvalds page = __alloc_pages(gfp, order, zl); 1947dd1a239fSMel Gorman if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) 1948ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 19491da177e4SLinus Torvalds return page; 19501da177e4SLinus Torvalds } 19511da177e4SLinus Torvalds 19521da177e4SLinus Torvalds /** 19530bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 19541da177e4SLinus Torvalds * 19551da177e4SLinus Torvalds * @gfp: 19561da177e4SLinus Torvalds * %GFP_USER user allocation. 19571da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 19581da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 19591da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 19601da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 19611da177e4SLinus Torvalds * 19620bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 19631da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 19641da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 1965be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 1966be97a41bSVlastimil Babka * @hugepage: for hugepages try only the preferred node if possible 19671da177e4SLinus Torvalds * 19681da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 19691da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 19701da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 19711da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 1972be97a41bSVlastimil Babka * all allocations for pages that will be mapped into user space. Returns 1973be97a41bSVlastimil Babka * NULL when no page can be allocated. 19741da177e4SLinus Torvalds */ 19751da177e4SLinus Torvalds struct page * 19760bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1977be97a41bSVlastimil Babka unsigned long addr, int node, bool hugepage) 19781da177e4SLinus Torvalds { 1979cc9a6c87SMel Gorman struct mempolicy *pol; 1980c0ff7453SMiao Xie struct page *page; 1981cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 1982be97a41bSVlastimil Babka struct zonelist *zl; 1983be97a41bSVlastimil Babka nodemask_t *nmask; 19841da177e4SLinus Torvalds 1985cc9a6c87SMel Gorman retry_cpuset: 1986dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 1987d26914d1SMel Gorman cpuset_mems_cookie = read_mems_allowed_begin(); 1988cc9a6c87SMel Gorman 1989be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 19901da177e4SLinus Torvalds unsigned nid; 19915da7ca86SChristoph Lameter 19928eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 199352cd3b07SLee Schermerhorn mpol_cond_put(pol); 19940bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 1995be97a41bSVlastimil Babka goto out; 19961da177e4SLinus Torvalds } 19971da177e4SLinus Torvalds 19980867a57cSVlastimil Babka if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 19990867a57cSVlastimil Babka int hpage_node = node; 20000867a57cSVlastimil Babka 20010867a57cSVlastimil Babka /* 20020867a57cSVlastimil Babka * For hugepage allocation and non-interleave policy which 20030867a57cSVlastimil Babka * allows the current node (or other explicitly preferred 20040867a57cSVlastimil Babka * node) we only try to allocate from the current/preferred 20050867a57cSVlastimil Babka * node and don't fall back to other nodes, as the cost of 20060867a57cSVlastimil Babka * remote accesses would likely offset THP benefits. 20070867a57cSVlastimil Babka * 20080867a57cSVlastimil Babka * If the policy is interleave, or does not allow the current 20090867a57cSVlastimil Babka * node in its nodemask, we allocate the standard way. 20100867a57cSVlastimil Babka */ 20110867a57cSVlastimil Babka if (pol->mode == MPOL_PREFERRED && 20120867a57cSVlastimil Babka !(pol->flags & MPOL_F_LOCAL)) 20130867a57cSVlastimil Babka hpage_node = pol->v.preferred_node; 20140867a57cSVlastimil Babka 20150867a57cSVlastimil Babka nmask = policy_nodemask(gfp, pol); 20160867a57cSVlastimil Babka if (!nmask || node_isset(hpage_node, *nmask)) { 20170867a57cSVlastimil Babka mpol_cond_put(pol); 201896db800fSVlastimil Babka page = __alloc_pages_node(hpage_node, 20190867a57cSVlastimil Babka gfp | __GFP_THISNODE, order); 20200867a57cSVlastimil Babka goto out; 20210867a57cSVlastimil Babka } 20220867a57cSVlastimil Babka } 20230867a57cSVlastimil Babka 2024077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 2025be97a41bSVlastimil Babka zl = policy_zonelist(gfp, pol, node); 2026077fcf11SAneesh Kumar K.V mpol_cond_put(pol); 2027be97a41bSVlastimil Babka page = __alloc_pages_nodemask(gfp, order, zl, nmask); 2028be97a41bSVlastimil Babka out: 2029be97a41bSVlastimil Babka if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2030077fcf11SAneesh Kumar K.V goto retry_cpuset; 2031077fcf11SAneesh Kumar K.V return page; 2032077fcf11SAneesh Kumar K.V } 2033077fcf11SAneesh Kumar K.V 20341da177e4SLinus Torvalds /** 20351da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 20361da177e4SLinus Torvalds * 20371da177e4SLinus Torvalds * @gfp: 20381da177e4SLinus Torvalds * %GFP_USER user allocation, 20391da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 20401da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 20411da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 20421da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 20431da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 20441da177e4SLinus Torvalds * 20451da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 20461da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 20471da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 20481da177e4SLinus Torvalds * 2049cf2a473cSPaul Jackson * Don't call cpuset_update_task_memory_state() unless 20501da177e4SLinus Torvalds * 1) it's ok to take cpuset_sem (can WAIT), and 20511da177e4SLinus Torvalds * 2) allocating for current task (not interrupt). 20521da177e4SLinus Torvalds */ 2053dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 20541da177e4SLinus Torvalds { 20558d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2056c0ff7453SMiao Xie struct page *page; 2057cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 20581da177e4SLinus Torvalds 20598d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 20608d90274bSOleg Nesterov pol = get_task_policy(current); 206152cd3b07SLee Schermerhorn 2062cc9a6c87SMel Gorman retry_cpuset: 2063d26914d1SMel Gorman cpuset_mems_cookie = read_mems_allowed_begin(); 2064cc9a6c87SMel Gorman 206552cd3b07SLee Schermerhorn /* 206652cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 206752cd3b07SLee Schermerhorn * nor system default_policy 206852cd3b07SLee Schermerhorn */ 206945c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2070c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2071c0ff7453SMiao Xie else 2072c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 20735c4b4be3SAndi Kleen policy_zonelist(gfp, pol, numa_node_id()), 20745c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2075cc9a6c87SMel Gorman 2076d26914d1SMel Gorman if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2077cc9a6c87SMel Gorman goto retry_cpuset; 2078cc9a6c87SMel Gorman 2079c0ff7453SMiao Xie return page; 20801da177e4SLinus Torvalds } 20811da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 20821da177e4SLinus Torvalds 2083ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2084ef0855d3SOleg Nesterov { 2085ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2086ef0855d3SOleg Nesterov 2087ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2088ef0855d3SOleg Nesterov return PTR_ERR(pol); 2089ef0855d3SOleg Nesterov dst->vm_policy = pol; 2090ef0855d3SOleg Nesterov return 0; 2091ef0855d3SOleg Nesterov } 2092ef0855d3SOleg Nesterov 20934225399aSPaul Jackson /* 2094846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 20954225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 20964225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 20974225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 20984225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2099708c1bbcSMiao Xie * 2100708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2101708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 21024225399aSPaul Jackson */ 21034225399aSPaul Jackson 2104846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2105846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 21061da177e4SLinus Torvalds { 21071da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 21081da177e4SLinus Torvalds 21091da177e4SLinus Torvalds if (!new) 21101da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2111708c1bbcSMiao Xie 2112708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2113708c1bbcSMiao Xie if (old == current->mempolicy) { 2114708c1bbcSMiao Xie task_lock(current); 2115708c1bbcSMiao Xie *new = *old; 2116708c1bbcSMiao Xie task_unlock(current); 2117708c1bbcSMiao Xie } else 2118708c1bbcSMiao Xie *new = *old; 2119708c1bbcSMiao Xie 21204225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 21214225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2122708c1bbcSMiao Xie if (new->flags & MPOL_F_REBINDING) 2123708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); 2124708c1bbcSMiao Xie else 2125708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 21264225399aSPaul Jackson } 21271da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 21281da177e4SLinus Torvalds return new; 21291da177e4SLinus Torvalds } 21301da177e4SLinus Torvalds 21311da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2132fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 21331da177e4SLinus Torvalds { 21341da177e4SLinus Torvalds if (!a || !b) 2135fcfb4dccSKOSAKI Motohiro return false; 213645c4745aSLee Schermerhorn if (a->mode != b->mode) 2137fcfb4dccSKOSAKI Motohiro return false; 213819800502SBob Liu if (a->flags != b->flags) 2139fcfb4dccSKOSAKI Motohiro return false; 214019800502SBob Liu if (mpol_store_user_nodemask(a)) 214119800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2142fcfb4dccSKOSAKI Motohiro return false; 214319800502SBob Liu 214445c4745aSLee Schermerhorn switch (a->mode) { 214519770b32SMel Gorman case MPOL_BIND: 214619770b32SMel Gorman /* Fall through */ 21471da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2148fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 21491da177e4SLinus Torvalds case MPOL_PREFERRED: 215075719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 21511da177e4SLinus Torvalds default: 21521da177e4SLinus Torvalds BUG(); 2153fcfb4dccSKOSAKI Motohiro return false; 21541da177e4SLinus Torvalds } 21551da177e4SLinus Torvalds } 21561da177e4SLinus Torvalds 21571da177e4SLinus Torvalds /* 21581da177e4SLinus Torvalds * Shared memory backing store policy support. 21591da177e4SLinus Torvalds * 21601da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 21611da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 21624a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 21631da177e4SLinus Torvalds * for any accesses to the tree. 21641da177e4SLinus Torvalds */ 21651da177e4SLinus Torvalds 21664a8c7bb5SNathan Zimmer /* 21674a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 21684a8c7bb5SNathan Zimmer * reading or for writing 21694a8c7bb5SNathan Zimmer */ 21701da177e4SLinus Torvalds static struct sp_node * 21711da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 21721da177e4SLinus Torvalds { 21731da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 21741da177e4SLinus Torvalds 21751da177e4SLinus Torvalds while (n) { 21761da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 21771da177e4SLinus Torvalds 21781da177e4SLinus Torvalds if (start >= p->end) 21791da177e4SLinus Torvalds n = n->rb_right; 21801da177e4SLinus Torvalds else if (end <= p->start) 21811da177e4SLinus Torvalds n = n->rb_left; 21821da177e4SLinus Torvalds else 21831da177e4SLinus Torvalds break; 21841da177e4SLinus Torvalds } 21851da177e4SLinus Torvalds if (!n) 21861da177e4SLinus Torvalds return NULL; 21871da177e4SLinus Torvalds for (;;) { 21881da177e4SLinus Torvalds struct sp_node *w = NULL; 21891da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 21901da177e4SLinus Torvalds if (!prev) 21911da177e4SLinus Torvalds break; 21921da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 21931da177e4SLinus Torvalds if (w->end <= start) 21941da177e4SLinus Torvalds break; 21951da177e4SLinus Torvalds n = prev; 21961da177e4SLinus Torvalds } 21971da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 21981da177e4SLinus Torvalds } 21991da177e4SLinus Torvalds 22004a8c7bb5SNathan Zimmer /* 22014a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 22024a8c7bb5SNathan Zimmer * writing. 22034a8c7bb5SNathan Zimmer */ 22041da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 22051da177e4SLinus Torvalds { 22061da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 22071da177e4SLinus Torvalds struct rb_node *parent = NULL; 22081da177e4SLinus Torvalds struct sp_node *nd; 22091da177e4SLinus Torvalds 22101da177e4SLinus Torvalds while (*p) { 22111da177e4SLinus Torvalds parent = *p; 22121da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 22131da177e4SLinus Torvalds if (new->start < nd->start) 22141da177e4SLinus Torvalds p = &(*p)->rb_left; 22151da177e4SLinus Torvalds else if (new->end > nd->end) 22161da177e4SLinus Torvalds p = &(*p)->rb_right; 22171da177e4SLinus Torvalds else 22181da177e4SLinus Torvalds BUG(); 22191da177e4SLinus Torvalds } 22201da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 22211da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2222140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 222345c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 22241da177e4SLinus Torvalds } 22251da177e4SLinus Torvalds 22261da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 22271da177e4SLinus Torvalds struct mempolicy * 22281da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 22291da177e4SLinus Torvalds { 22301da177e4SLinus Torvalds struct mempolicy *pol = NULL; 22311da177e4SLinus Torvalds struct sp_node *sn; 22321da177e4SLinus Torvalds 22331da177e4SLinus Torvalds if (!sp->root.rb_node) 22341da177e4SLinus Torvalds return NULL; 22354a8c7bb5SNathan Zimmer read_lock(&sp->lock); 22361da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 22371da177e4SLinus Torvalds if (sn) { 22381da177e4SLinus Torvalds mpol_get(sn->policy); 22391da177e4SLinus Torvalds pol = sn->policy; 22401da177e4SLinus Torvalds } 22414a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 22421da177e4SLinus Torvalds return pol; 22431da177e4SLinus Torvalds } 22441da177e4SLinus Torvalds 224563f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 224663f74ca2SKOSAKI Motohiro { 224763f74ca2SKOSAKI Motohiro mpol_put(n->policy); 224863f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 224963f74ca2SKOSAKI Motohiro } 225063f74ca2SKOSAKI Motohiro 2251771fb4d8SLee Schermerhorn /** 2252771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2253771fb4d8SLee Schermerhorn * 2254b46e14acSFabian Frederick * @page: page to be checked 2255b46e14acSFabian Frederick * @vma: vm area where page mapped 2256b46e14acSFabian Frederick * @addr: virtual address where page mapped 2257771fb4d8SLee Schermerhorn * 2258771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2259771fb4d8SLee Schermerhorn * node id. 2260771fb4d8SLee Schermerhorn * 2261771fb4d8SLee Schermerhorn * Returns: 2262771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2263771fb4d8SLee Schermerhorn * node - node id where the page should be 2264771fb4d8SLee Schermerhorn * 2265771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2266771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2267771fb4d8SLee Schermerhorn */ 2268771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2269771fb4d8SLee Schermerhorn { 2270771fb4d8SLee Schermerhorn struct mempolicy *pol; 2271c33d6c06SMel Gorman struct zoneref *z; 2272771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2273771fb4d8SLee Schermerhorn unsigned long pgoff; 227490572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 227590572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 2276771fb4d8SLee Schermerhorn int polnid = -1; 2277771fb4d8SLee Schermerhorn int ret = -1; 2278771fb4d8SLee Schermerhorn 2279771fb4d8SLee Schermerhorn BUG_ON(!vma); 2280771fb4d8SLee Schermerhorn 2281dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2282771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2283771fb4d8SLee Schermerhorn goto out; 2284771fb4d8SLee Schermerhorn 2285771fb4d8SLee Schermerhorn switch (pol->mode) { 2286771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2287771fb4d8SLee Schermerhorn BUG_ON(addr >= vma->vm_end); 2288771fb4d8SLee Schermerhorn BUG_ON(addr < vma->vm_start); 2289771fb4d8SLee Schermerhorn 2290771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2291771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2292771fb4d8SLee Schermerhorn polnid = offset_il_node(pol, vma, pgoff); 2293771fb4d8SLee Schermerhorn break; 2294771fb4d8SLee Schermerhorn 2295771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2296771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2297771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2298771fb4d8SLee Schermerhorn else 2299771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2300771fb4d8SLee Schermerhorn break; 2301771fb4d8SLee Schermerhorn 2302771fb4d8SLee Schermerhorn case MPOL_BIND: 2303c33d6c06SMel Gorman 2304771fb4d8SLee Schermerhorn /* 2305771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2306771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2307771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2308771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2309771fb4d8SLee Schermerhorn */ 2310771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2311771fb4d8SLee Schermerhorn goto out; 2312c33d6c06SMel Gorman z = first_zones_zonelist( 2313771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2314771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2315c33d6c06SMel Gorman &pol->v.nodes); 2316c33d6c06SMel Gorman polnid = z->zone->node; 2317771fb4d8SLee Schermerhorn break; 2318771fb4d8SLee Schermerhorn 2319771fb4d8SLee Schermerhorn default: 2320771fb4d8SLee Schermerhorn BUG(); 2321771fb4d8SLee Schermerhorn } 23225606e387SMel Gorman 23235606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2324e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 232590572890SPeter Zijlstra polnid = thisnid; 23265606e387SMel Gorman 232710f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2328de1c9ce6SRik van Riel goto out; 2329de1c9ce6SRik van Riel } 2330e42c8ff2SMel Gorman 2331771fb4d8SLee Schermerhorn if (curnid != polnid) 2332771fb4d8SLee Schermerhorn ret = polnid; 2333771fb4d8SLee Schermerhorn out: 2334771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2335771fb4d8SLee Schermerhorn 2336771fb4d8SLee Schermerhorn return ret; 2337771fb4d8SLee Schermerhorn } 2338771fb4d8SLee Schermerhorn 2339c11600e4SDavid Rientjes /* 2340c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2341c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2342c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2343c11600e4SDavid Rientjes * policy. 2344c11600e4SDavid Rientjes */ 2345c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2346c11600e4SDavid Rientjes { 2347c11600e4SDavid Rientjes struct mempolicy *pol; 2348c11600e4SDavid Rientjes 2349c11600e4SDavid Rientjes task_lock(task); 2350c11600e4SDavid Rientjes pol = task->mempolicy; 2351c11600e4SDavid Rientjes task->mempolicy = NULL; 2352c11600e4SDavid Rientjes task_unlock(task); 2353c11600e4SDavid Rientjes mpol_put(pol); 2354c11600e4SDavid Rientjes } 2355c11600e4SDavid Rientjes 23561da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 23571da177e4SLinus Torvalds { 2358140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 23591da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 236063f74ca2SKOSAKI Motohiro sp_free(n); 23611da177e4SLinus Torvalds } 23621da177e4SLinus Torvalds 236342288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 236442288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 236542288fe3SMel Gorman { 236642288fe3SMel Gorman node->start = start; 236742288fe3SMel Gorman node->end = end; 236842288fe3SMel Gorman node->policy = pol; 236942288fe3SMel Gorman } 237042288fe3SMel Gorman 2371dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2372dbcb0f19SAdrian Bunk struct mempolicy *pol) 23731da177e4SLinus Torvalds { 2374869833f2SKOSAKI Motohiro struct sp_node *n; 2375869833f2SKOSAKI Motohiro struct mempolicy *newpol; 23761da177e4SLinus Torvalds 2377869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 23781da177e4SLinus Torvalds if (!n) 23791da177e4SLinus Torvalds return NULL; 2380869833f2SKOSAKI Motohiro 2381869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2382869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2383869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2384869833f2SKOSAKI Motohiro return NULL; 2385869833f2SKOSAKI Motohiro } 2386869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 238742288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2388869833f2SKOSAKI Motohiro 23891da177e4SLinus Torvalds return n; 23901da177e4SLinus Torvalds } 23911da177e4SLinus Torvalds 23921da177e4SLinus Torvalds /* Replace a policy range. */ 23931da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 23941da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 23951da177e4SLinus Torvalds { 2396b22d127aSMel Gorman struct sp_node *n; 239742288fe3SMel Gorman struct sp_node *n_new = NULL; 239842288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2399b22d127aSMel Gorman int ret = 0; 24001da177e4SLinus Torvalds 240142288fe3SMel Gorman restart: 24024a8c7bb5SNathan Zimmer write_lock(&sp->lock); 24031da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 24041da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 24051da177e4SLinus Torvalds while (n && n->start < end) { 24061da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 24071da177e4SLinus Torvalds if (n->start >= start) { 24081da177e4SLinus Torvalds if (n->end <= end) 24091da177e4SLinus Torvalds sp_delete(sp, n); 24101da177e4SLinus Torvalds else 24111da177e4SLinus Torvalds n->start = end; 24121da177e4SLinus Torvalds } else { 24131da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 24141da177e4SLinus Torvalds if (n->end > end) { 241542288fe3SMel Gorman if (!n_new) 241642288fe3SMel Gorman goto alloc_new; 241742288fe3SMel Gorman 241842288fe3SMel Gorman *mpol_new = *n->policy; 241942288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 24207880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 24211da177e4SLinus Torvalds n->end = start; 24225ca39575SHillf Danton sp_insert(sp, n_new); 242342288fe3SMel Gorman n_new = NULL; 242442288fe3SMel Gorman mpol_new = NULL; 24251da177e4SLinus Torvalds break; 24261da177e4SLinus Torvalds } else 24271da177e4SLinus Torvalds n->end = start; 24281da177e4SLinus Torvalds } 24291da177e4SLinus Torvalds if (!next) 24301da177e4SLinus Torvalds break; 24311da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 24321da177e4SLinus Torvalds } 24331da177e4SLinus Torvalds if (new) 24341da177e4SLinus Torvalds sp_insert(sp, new); 24354a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 243642288fe3SMel Gorman ret = 0; 243742288fe3SMel Gorman 243842288fe3SMel Gorman err_out: 243942288fe3SMel Gorman if (mpol_new) 244042288fe3SMel Gorman mpol_put(mpol_new); 244142288fe3SMel Gorman if (n_new) 244242288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 244342288fe3SMel Gorman 2444b22d127aSMel Gorman return ret; 244542288fe3SMel Gorman 244642288fe3SMel Gorman alloc_new: 24474a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 244842288fe3SMel Gorman ret = -ENOMEM; 244942288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 245042288fe3SMel Gorman if (!n_new) 245142288fe3SMel Gorman goto err_out; 245242288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 245342288fe3SMel Gorman if (!mpol_new) 245442288fe3SMel Gorman goto err_out; 245542288fe3SMel Gorman goto restart; 24561da177e4SLinus Torvalds } 24571da177e4SLinus Torvalds 245871fe804bSLee Schermerhorn /** 245971fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 246071fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 246171fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 246271fe804bSLee Schermerhorn * 246371fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 246471fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 246571fe804bSLee Schermerhorn * This must be released on exit. 24664bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 246771fe804bSLee Schermerhorn */ 246871fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 24697339ff83SRobin Holt { 247058568d2aSMiao Xie int ret; 247158568d2aSMiao Xie 247271fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 24734a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 24747339ff83SRobin Holt 247571fe804bSLee Schermerhorn if (mpol) { 24767339ff83SRobin Holt struct vm_area_struct pvma; 247771fe804bSLee Schermerhorn struct mempolicy *new; 24784bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 24797339ff83SRobin Holt 24804bfc4495SKAMEZAWA Hiroyuki if (!scratch) 24815c0c1654SLee Schermerhorn goto put_mpol; 248271fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 248371fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 248415d77835SLee Schermerhorn if (IS_ERR(new)) 24850cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 248658568d2aSMiao Xie 248758568d2aSMiao Xie task_lock(current); 24884bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 248958568d2aSMiao Xie task_unlock(current); 249015d77835SLee Schermerhorn if (ret) 24915c0c1654SLee Schermerhorn goto put_new; 249271fe804bSLee Schermerhorn 249371fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 24947339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 249571fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 249671fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 249715d77835SLee Schermerhorn 24985c0c1654SLee Schermerhorn put_new: 249971fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 25000cae3457SDan Carpenter free_scratch: 25014bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 25025c0c1654SLee Schermerhorn put_mpol: 25035c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 25047339ff83SRobin Holt } 25057339ff83SRobin Holt } 25067339ff83SRobin Holt 25071da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 25081da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 25091da177e4SLinus Torvalds { 25101da177e4SLinus Torvalds int err; 25111da177e4SLinus Torvalds struct sp_node *new = NULL; 25121da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 25131da177e4SLinus Torvalds 2514028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 25151da177e4SLinus Torvalds vma->vm_pgoff, 251645c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2517028fec41SDavid Rientjes npol ? npol->flags : -1, 251800ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 25191da177e4SLinus Torvalds 25201da177e4SLinus Torvalds if (npol) { 25211da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 25221da177e4SLinus Torvalds if (!new) 25231da177e4SLinus Torvalds return -ENOMEM; 25241da177e4SLinus Torvalds } 25251da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 25261da177e4SLinus Torvalds if (err && new) 252763f74ca2SKOSAKI Motohiro sp_free(new); 25281da177e4SLinus Torvalds return err; 25291da177e4SLinus Torvalds } 25301da177e4SLinus Torvalds 25311da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 25321da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 25331da177e4SLinus Torvalds { 25341da177e4SLinus Torvalds struct sp_node *n; 25351da177e4SLinus Torvalds struct rb_node *next; 25361da177e4SLinus Torvalds 25371da177e4SLinus Torvalds if (!p->root.rb_node) 25381da177e4SLinus Torvalds return; 25394a8c7bb5SNathan Zimmer write_lock(&p->lock); 25401da177e4SLinus Torvalds next = rb_first(&p->root); 25411da177e4SLinus Torvalds while (next) { 25421da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25431da177e4SLinus Torvalds next = rb_next(&n->nd); 254463f74ca2SKOSAKI Motohiro sp_delete(p, n); 25451da177e4SLinus Torvalds } 25464a8c7bb5SNathan Zimmer write_unlock(&p->lock); 25471da177e4SLinus Torvalds } 25481da177e4SLinus Torvalds 25491a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2550c297663cSMel Gorman static int __initdata numabalancing_override; 25511a687c2eSMel Gorman 25521a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 25531a687c2eSMel Gorman { 25541a687c2eSMel Gorman bool numabalancing_default = false; 25551a687c2eSMel Gorman 25561a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 25571a687c2eSMel Gorman numabalancing_default = true; 25581a687c2eSMel Gorman 2559c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2560c297663cSMel Gorman if (numabalancing_override) 2561c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2562c297663cSMel Gorman 2563b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2564756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2565c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 25661a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 25671a687c2eSMel Gorman } 25681a687c2eSMel Gorman } 25691a687c2eSMel Gorman 25701a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 25711a687c2eSMel Gorman { 25721a687c2eSMel Gorman int ret = 0; 25731a687c2eSMel Gorman if (!str) 25741a687c2eSMel Gorman goto out; 25751a687c2eSMel Gorman 25761a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2577c297663cSMel Gorman numabalancing_override = 1; 25781a687c2eSMel Gorman ret = 1; 25791a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2580c297663cSMel Gorman numabalancing_override = -1; 25811a687c2eSMel Gorman ret = 1; 25821a687c2eSMel Gorman } 25831a687c2eSMel Gorman out: 25841a687c2eSMel Gorman if (!ret) 25854a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 25861a687c2eSMel Gorman 25871a687c2eSMel Gorman return ret; 25881a687c2eSMel Gorman } 25891a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 25901a687c2eSMel Gorman #else 25911a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 25921a687c2eSMel Gorman { 25931a687c2eSMel Gorman } 25941a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 25951a687c2eSMel Gorman 25961da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 25971da177e4SLinus Torvalds void __init numa_policy_init(void) 25981da177e4SLinus Torvalds { 2599b71636e2SPaul Mundt nodemask_t interleave_nodes; 2600b71636e2SPaul Mundt unsigned long largest = 0; 2601b71636e2SPaul Mundt int nid, prefer = 0; 2602b71636e2SPaul Mundt 26031da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 26041da177e4SLinus Torvalds sizeof(struct mempolicy), 260520c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 26061da177e4SLinus Torvalds 26071da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 26081da177e4SLinus Torvalds sizeof(struct sp_node), 260920c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 26101da177e4SLinus Torvalds 26115606e387SMel Gorman for_each_node(nid) { 26125606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 26135606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 26145606e387SMel Gorman .mode = MPOL_PREFERRED, 26155606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 26165606e387SMel Gorman .v = { .preferred_node = nid, }, 26175606e387SMel Gorman }; 26185606e387SMel Gorman } 26195606e387SMel Gorman 2620b71636e2SPaul Mundt /* 2621b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2622b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2623b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2624b71636e2SPaul Mundt */ 2625b71636e2SPaul Mundt nodes_clear(interleave_nodes); 262601f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2627b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 26281da177e4SLinus Torvalds 2629b71636e2SPaul Mundt /* Preserve the largest node */ 2630b71636e2SPaul Mundt if (largest < total_pages) { 2631b71636e2SPaul Mundt largest = total_pages; 2632b71636e2SPaul Mundt prefer = nid; 2633b71636e2SPaul Mundt } 2634b71636e2SPaul Mundt 2635b71636e2SPaul Mundt /* Interleave this node? */ 2636b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2637b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2638b71636e2SPaul Mundt } 2639b71636e2SPaul Mundt 2640b71636e2SPaul Mundt /* All too small, use the largest */ 2641b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2642b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2643b71636e2SPaul Mundt 2644028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2645b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 26461a687c2eSMel Gorman 26471a687c2eSMel Gorman check_numabalancing_enable(); 26481da177e4SLinus Torvalds } 26491da177e4SLinus Torvalds 26508bccd85fSChristoph Lameter /* Reset policy of current process to default */ 26511da177e4SLinus Torvalds void numa_default_policy(void) 26521da177e4SLinus Torvalds { 2653028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 26541da177e4SLinus Torvalds } 265568860ec1SPaul Jackson 26564225399aSPaul Jackson /* 2657095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2658095f1fc4SLee Schermerhorn */ 2659095f1fc4SLee Schermerhorn 2660095f1fc4SLee Schermerhorn /* 2661f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 26621a75a6c8SChristoph Lameter */ 2663345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2664345ace9cSLee Schermerhorn { 2665345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2666345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2667345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2668345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2669d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2670345ace9cSLee Schermerhorn }; 26711a75a6c8SChristoph Lameter 2672095f1fc4SLee Schermerhorn 2673095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2674095f1fc4SLee Schermerhorn /** 2675f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2676095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 267771fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2678095f1fc4SLee Schermerhorn * 2679095f1fc4SLee Schermerhorn * Format of input: 2680095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2681095f1fc4SLee Schermerhorn * 268271fe804bSLee Schermerhorn * On success, returns 0, else 1 2683095f1fc4SLee Schermerhorn */ 2684a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2685095f1fc4SLee Schermerhorn { 268671fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2687b4652e84SLee Schermerhorn unsigned short mode; 2688f2a07f40SHugh Dickins unsigned short mode_flags; 268971fe804bSLee Schermerhorn nodemask_t nodes; 2690095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2691095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2692095f1fc4SLee Schermerhorn int err = 1; 2693095f1fc4SLee Schermerhorn 2694095f1fc4SLee Schermerhorn if (nodelist) { 2695095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2696095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 269771fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2698095f1fc4SLee Schermerhorn goto out; 269901f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2700095f1fc4SLee Schermerhorn goto out; 270171fe804bSLee Schermerhorn } else 270271fe804bSLee Schermerhorn nodes_clear(nodes); 270371fe804bSLee Schermerhorn 2704095f1fc4SLee Schermerhorn if (flags) 2705095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2706095f1fc4SLee Schermerhorn 2707479e2802SPeter Zijlstra for (mode = 0; mode < MPOL_MAX; mode++) { 2708345ace9cSLee Schermerhorn if (!strcmp(str, policy_modes[mode])) { 2709095f1fc4SLee Schermerhorn break; 2710095f1fc4SLee Schermerhorn } 2711095f1fc4SLee Schermerhorn } 2712a720094dSMel Gorman if (mode >= MPOL_MAX) 2713095f1fc4SLee Schermerhorn goto out; 2714095f1fc4SLee Schermerhorn 271571fe804bSLee Schermerhorn switch (mode) { 2716095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 271771fe804bSLee Schermerhorn /* 271871fe804bSLee Schermerhorn * Insist on a nodelist of one node only 271971fe804bSLee Schermerhorn */ 2720095f1fc4SLee Schermerhorn if (nodelist) { 2721095f1fc4SLee Schermerhorn char *rest = nodelist; 2722095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2723095f1fc4SLee Schermerhorn rest++; 2724926f2ae0SKOSAKI Motohiro if (*rest) 2725926f2ae0SKOSAKI Motohiro goto out; 2726095f1fc4SLee Schermerhorn } 2727095f1fc4SLee Schermerhorn break; 2728095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2729095f1fc4SLee Schermerhorn /* 2730095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2731095f1fc4SLee Schermerhorn */ 2732095f1fc4SLee Schermerhorn if (!nodelist) 273301f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 27343f226aa1SLee Schermerhorn break; 273571fe804bSLee Schermerhorn case MPOL_LOCAL: 27363f226aa1SLee Schermerhorn /* 273771fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 27383f226aa1SLee Schermerhorn */ 273971fe804bSLee Schermerhorn if (nodelist) 27403f226aa1SLee Schermerhorn goto out; 274171fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 27423f226aa1SLee Schermerhorn break; 2743413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2744413b43deSRavikiran G Thirumalai /* 2745413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2746413b43deSRavikiran G Thirumalai */ 2747413b43deSRavikiran G Thirumalai if (!nodelist) 2748413b43deSRavikiran G Thirumalai err = 0; 2749413b43deSRavikiran G Thirumalai goto out; 2750d69b2e63SKOSAKI Motohiro case MPOL_BIND: 275171fe804bSLee Schermerhorn /* 2752d69b2e63SKOSAKI Motohiro * Insist on a nodelist 275371fe804bSLee Schermerhorn */ 2754d69b2e63SKOSAKI Motohiro if (!nodelist) 2755d69b2e63SKOSAKI Motohiro goto out; 2756095f1fc4SLee Schermerhorn } 2757095f1fc4SLee Schermerhorn 275871fe804bSLee Schermerhorn mode_flags = 0; 2759095f1fc4SLee Schermerhorn if (flags) { 2760095f1fc4SLee Schermerhorn /* 2761095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2762095f1fc4SLee Schermerhorn * mode flags. 2763095f1fc4SLee Schermerhorn */ 2764095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 276571fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2766095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 276771fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2768095f1fc4SLee Schermerhorn else 2769926f2ae0SKOSAKI Motohiro goto out; 2770095f1fc4SLee Schermerhorn } 277171fe804bSLee Schermerhorn 277271fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 277371fe804bSLee Schermerhorn if (IS_ERR(new)) 2774926f2ae0SKOSAKI Motohiro goto out; 2775926f2ae0SKOSAKI Motohiro 2776f2a07f40SHugh Dickins /* 2777f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2778f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2779f2a07f40SHugh Dickins */ 2780f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2781f2a07f40SHugh Dickins new->v.nodes = nodes; 2782f2a07f40SHugh Dickins else if (nodelist) 2783f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2784f2a07f40SHugh Dickins else 2785f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2786f2a07f40SHugh Dickins 2787f2a07f40SHugh Dickins /* 2788f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2789f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2790f2a07f40SHugh Dickins */ 2791e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2792f2a07f40SHugh Dickins 2793926f2ae0SKOSAKI Motohiro err = 0; 279471fe804bSLee Schermerhorn 2795095f1fc4SLee Schermerhorn out: 2796095f1fc4SLee Schermerhorn /* Restore string for error message */ 2797095f1fc4SLee Schermerhorn if (nodelist) 2798095f1fc4SLee Schermerhorn *--nodelist = ':'; 2799095f1fc4SLee Schermerhorn if (flags) 2800095f1fc4SLee Schermerhorn *--flags = '='; 280171fe804bSLee Schermerhorn if (!err) 280271fe804bSLee Schermerhorn *mpol = new; 2803095f1fc4SLee Schermerhorn return err; 2804095f1fc4SLee Schermerhorn } 2805095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2806095f1fc4SLee Schermerhorn 280771fe804bSLee Schermerhorn /** 280871fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 280971fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 281071fe804bSLee Schermerhorn * @maxlen: length of @buffer 281171fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 281271fe804bSLee Schermerhorn * 2813948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2814948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2815948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 28161a75a6c8SChristoph Lameter */ 2817948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 28181a75a6c8SChristoph Lameter { 28191a75a6c8SChristoph Lameter char *p = buffer; 2820948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2821948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2822948927eeSDavid Rientjes unsigned short flags = 0; 28231a75a6c8SChristoph Lameter 28248790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2825bea904d5SLee Schermerhorn mode = pol->mode; 2826948927eeSDavid Rientjes flags = pol->flags; 2827948927eeSDavid Rientjes } 2828bea904d5SLee Schermerhorn 28291a75a6c8SChristoph Lameter switch (mode) { 28301a75a6c8SChristoph Lameter case MPOL_DEFAULT: 28311a75a6c8SChristoph Lameter break; 28321a75a6c8SChristoph Lameter case MPOL_PREFERRED: 2833fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 2834f2a07f40SHugh Dickins mode = MPOL_LOCAL; 283553f2556bSLee Schermerhorn else 2836fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 28371a75a6c8SChristoph Lameter break; 28381a75a6c8SChristoph Lameter case MPOL_BIND: 28391a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 28401a75a6c8SChristoph Lameter nodes = pol->v.nodes; 28411a75a6c8SChristoph Lameter break; 28421a75a6c8SChristoph Lameter default: 2843948927eeSDavid Rientjes WARN_ON_ONCE(1); 2844948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 2845948927eeSDavid Rientjes return; 28461a75a6c8SChristoph Lameter } 28471a75a6c8SChristoph Lameter 2848b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 28491a75a6c8SChristoph Lameter 2850fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2851948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 2852f5b087b5SDavid Rientjes 28532291990aSLee Schermerhorn /* 28542291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 28552291990aSLee Schermerhorn */ 2856f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 28572291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 28582291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 28592291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2860f5b087b5SDavid Rientjes } 2861f5b087b5SDavid Rientjes 28629e763e0fSTejun Heo if (!nodes_empty(nodes)) 28639e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 28649e763e0fSTejun Heo nodemask_pr_args(&nodes)); 28651a75a6c8SChristoph Lameter } 2866