146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 71a520110eSChristoph Hellwig #include <linux/pagewalk.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 766e84f315SIngo Molnar #include <linux/sched/mm.h> 776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 78f719ff9bSIngo Molnar #include <linux/sched/task.h> 791da177e4SLinus Torvalds #include <linux/nodemask.h> 801da177e4SLinus Torvalds #include <linux/cpuset.h> 811da177e4SLinus Torvalds #include <linux/slab.h> 821da177e4SLinus Torvalds #include <linux/string.h> 83b95f1b31SPaul Gortmaker #include <linux/export.h> 84b488893aSPavel Emelyanov #include <linux/nsproxy.h> 851da177e4SLinus Torvalds #include <linux/interrupt.h> 861da177e4SLinus Torvalds #include <linux/init.h> 871da177e4SLinus Torvalds #include <linux/compat.h> 8831367466SOtto Ebeling #include <linux/ptrace.h> 89dc9aa5b9SChristoph Lameter #include <linux/swap.h> 901a75a6c8SChristoph Lameter #include <linux/seq_file.h> 911a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 92b20a3503SChristoph Lameter #include <linux/migrate.h> 9362b61f61SHugh Dickins #include <linux/ksm.h> 9495a402c3SChristoph Lameter #include <linux/rmap.h> 9586c3a764SDavid Quigley #include <linux/security.h> 96dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 97095f1fc4SLee Schermerhorn #include <linux/ctype.h> 986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 100b1de0d13SMitchel Humpherys #include <linux/printk.h> 101c8633798SNaoya Horiguchi #include <linux/swapops.h> 102dc9aa5b9SChristoph Lameter 1031da177e4SLinus Torvalds #include <asm/tlbflush.h> 1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1051da177e4SLinus Torvalds 10662695a84SNick Piggin #include "internal.h" 10762695a84SNick Piggin 10838e35860SChristoph Lameter /* Internal flags */ 109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 111dc9aa5b9SChristoph Lameter 112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1141da177e4SLinus Torvalds 1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1161da177e4SLinus Torvalds policied. */ 1176267276fSChristoph Lameter enum zone_type policy_zone = 0; 1181da177e4SLinus Torvalds 119bea904d5SLee Schermerhorn /* 120bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 121bea904d5SLee Schermerhorn */ 122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1231da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 124bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 125fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1261da177e4SLinus Torvalds }; 1271da177e4SLinus Torvalds 1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1295606e387SMel Gorman 130b2ca916cSDan Williams /** 131b2ca916cSDan Williams * numa_map_to_online_node - Find closest online node 132f6e92f40SKrzysztof Kozlowski * @node: Node id to start the search 133b2ca916cSDan Williams * 134b2ca916cSDan Williams * Lookup the next closest node by distance if @nid is not online. 135b2ca916cSDan Williams */ 136b2ca916cSDan Williams int numa_map_to_online_node(int node) 137b2ca916cSDan Williams { 1384fcbe96eSDan Williams int min_dist = INT_MAX, dist, n, min_node; 139b2ca916cSDan Williams 1404fcbe96eSDan Williams if (node == NUMA_NO_NODE || node_online(node)) 1414fcbe96eSDan Williams return node; 142b2ca916cSDan Williams 143b2ca916cSDan Williams min_node = node; 144b2ca916cSDan Williams for_each_online_node(n) { 145b2ca916cSDan Williams dist = node_distance(node, n); 146b2ca916cSDan Williams if (dist < min_dist) { 147b2ca916cSDan Williams min_dist = dist; 148b2ca916cSDan Williams min_node = n; 149b2ca916cSDan Williams } 150b2ca916cSDan Williams } 151b2ca916cSDan Williams 152b2ca916cSDan Williams return min_node; 153b2ca916cSDan Williams } 154b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node); 155b2ca916cSDan Williams 15674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1575606e387SMel Gorman { 1585606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 159f15ca78eSOleg Nesterov int node; 1605606e387SMel Gorman 161f15ca78eSOleg Nesterov if (pol) 162f15ca78eSOleg Nesterov return pol; 1635606e387SMel Gorman 164f15ca78eSOleg Nesterov node = numa_node_id(); 1651da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1661da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 167f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 168f15ca78eSOleg Nesterov if (pol->mode) 169f15ca78eSOleg Nesterov return pol; 1701da6f0e1SJianguo Wu } 1715606e387SMel Gorman 172f15ca78eSOleg Nesterov return &default_policy; 1735606e387SMel Gorman } 1745606e387SMel Gorman 17537012946SDavid Rientjes static const struct mempolicy_operations { 17637012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 177213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 17837012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 17937012946SDavid Rientjes 180f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 181f5b087b5SDavid Rientjes { 1826d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1834c50bc01SDavid Rientjes } 1844c50bc01SDavid Rientjes 1854c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1864c50bc01SDavid Rientjes const nodemask_t *rel) 1874c50bc01SDavid Rientjes { 1884c50bc01SDavid Rientjes nodemask_t tmp; 1894c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1904c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 191f5b087b5SDavid Rientjes } 192f5b087b5SDavid Rientjes 19337012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 19437012946SDavid Rientjes { 19537012946SDavid Rientjes if (nodes_empty(*nodes)) 19637012946SDavid Rientjes return -EINVAL; 19737012946SDavid Rientjes pol->v.nodes = *nodes; 19837012946SDavid Rientjes return 0; 19937012946SDavid Rientjes } 20037012946SDavid Rientjes 20137012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 20237012946SDavid Rientjes { 20337012946SDavid Rientjes if (!nodes) 204fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 20537012946SDavid Rientjes else if (nodes_empty(*nodes)) 20637012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 20737012946SDavid Rientjes else 20837012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 20937012946SDavid Rientjes return 0; 21037012946SDavid Rientjes } 21137012946SDavid Rientjes 21237012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 21337012946SDavid Rientjes { 214859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 21537012946SDavid Rientjes return -EINVAL; 21637012946SDavid Rientjes pol->v.nodes = *nodes; 21737012946SDavid Rientjes return 0; 21837012946SDavid Rientjes } 21937012946SDavid Rientjes 22058568d2aSMiao Xie /* 22158568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 22258568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 22358568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 22458568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 22558568d2aSMiao Xie * 22658568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 227c1e8d7c6SMichel Lespinasse * and mempolicy. May also be called holding the mmap_lock for write. 22858568d2aSMiao Xie */ 2294bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2304bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 23158568d2aSMiao Xie { 23258568d2aSMiao Xie int ret; 23358568d2aSMiao Xie 23458568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 23558568d2aSMiao Xie if (pol == NULL) 23658568d2aSMiao Xie return 0; 23701f13bd6SLai Jiangshan /* Check N_MEMORY */ 2384bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 23901f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 24058568d2aSMiao Xie 24158568d2aSMiao Xie VM_BUG_ON(!nodes); 24258568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 24358568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 24458568d2aSMiao Xie else { 24558568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2464bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 24758568d2aSMiao Xie else 2484bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2494bfc4495SKAMEZAWA Hiroyuki 25058568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 25158568d2aSMiao Xie pol->w.user_nodemask = *nodes; 25258568d2aSMiao Xie else 25358568d2aSMiao Xie pol->w.cpuset_mems_allowed = 25458568d2aSMiao Xie cpuset_current_mems_allowed; 25558568d2aSMiao Xie } 25658568d2aSMiao Xie 2574bfc4495SKAMEZAWA Hiroyuki if (nodes) 2584bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2594bfc4495SKAMEZAWA Hiroyuki else 2604bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 26158568d2aSMiao Xie return ret; 26258568d2aSMiao Xie } 26358568d2aSMiao Xie 26458568d2aSMiao Xie /* 26558568d2aSMiao Xie * This function just creates a new policy, does some check and simple 26658568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 26758568d2aSMiao Xie */ 268028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 269028fec41SDavid Rientjes nodemask_t *nodes) 2701da177e4SLinus Torvalds { 2711da177e4SLinus Torvalds struct mempolicy *policy; 2721da177e4SLinus Torvalds 273028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 27400ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 275140d5a49SPaul Mundt 2763e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2773e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 27837012946SDavid Rientjes return ERR_PTR(-EINVAL); 279d3a71033SLee Schermerhorn return NULL; 28037012946SDavid Rientjes } 2813e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2823e1f0645SDavid Rientjes 2833e1f0645SDavid Rientjes /* 2843e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2853e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2863e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2873e1f0645SDavid Rientjes */ 2883e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2893e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2903e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2913e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2923e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2933e1f0645SDavid Rientjes } 294479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2958d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2968d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2978d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 298479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 299479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 3003e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 3013e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 3021da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 3031da177e4SLinus Torvalds if (!policy) 3041da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 3051da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 30645c4745aSLee Schermerhorn policy->mode = mode; 30737012946SDavid Rientjes policy->flags = flags; 3083e1f0645SDavid Rientjes 30937012946SDavid Rientjes return policy; 31037012946SDavid Rientjes } 31137012946SDavid Rientjes 31252cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 31352cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 31452cd3b07SLee Schermerhorn { 31552cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 31652cd3b07SLee Schermerhorn return; 31752cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 31852cd3b07SLee Schermerhorn } 31952cd3b07SLee Schermerhorn 320213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 32137012946SDavid Rientjes { 32237012946SDavid Rientjes } 32337012946SDavid Rientjes 324213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 3251d0d2680SDavid Rientjes { 3261d0d2680SDavid Rientjes nodemask_t tmp; 3271d0d2680SDavid Rientjes 32837012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32937012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 33037012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 33137012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3321d0d2680SDavid Rientjes else { 333213980c0SVlastimil Babka nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, 334213980c0SVlastimil Babka *nodes); 33529b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3361d0d2680SDavid Rientjes } 33737012946SDavid Rientjes 338708c1bbcSMiao Xie if (nodes_empty(tmp)) 339708c1bbcSMiao Xie tmp = *nodes; 340708c1bbcSMiao Xie 3411d0d2680SDavid Rientjes pol->v.nodes = tmp; 34237012946SDavid Rientjes } 34337012946SDavid Rientjes 34437012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 345213980c0SVlastimil Babka const nodemask_t *nodes) 34637012946SDavid Rientjes { 34737012946SDavid Rientjes nodemask_t tmp; 34837012946SDavid Rientjes 34937012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3501d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3511d0d2680SDavid Rientjes 352fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3531d0d2680SDavid Rientjes pol->v.preferred_node = node; 354fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 355fc36b8d3SLee Schermerhorn } else 356fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 35737012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 35837012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3591d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 360fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3611d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 36237012946SDavid Rientjes pol->w.cpuset_mems_allowed, 36337012946SDavid Rientjes *nodes); 36437012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3651d0d2680SDavid Rientjes } 3661d0d2680SDavid Rientjes } 36737012946SDavid Rientjes 368708c1bbcSMiao Xie /* 369708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 370708c1bbcSMiao Xie * 371c1e8d7c6SMichel Lespinasse * Per-vma policies are protected by mmap_lock. Allocations using per-task 372213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 373213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 374708c1bbcSMiao Xie */ 375213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 37637012946SDavid Rientjes { 37737012946SDavid Rientjes if (!pol) 37837012946SDavid Rientjes return; 3792e25644eSVlastimil Babka if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) && 38037012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 38137012946SDavid Rientjes return; 382708c1bbcSMiao Xie 383213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3841d0d2680SDavid Rientjes } 3851d0d2680SDavid Rientjes 3861d0d2680SDavid Rientjes /* 3871d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3881d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 38958568d2aSMiao Xie * 39058568d2aSMiao Xie * Called with task's alloc_lock held. 3911d0d2680SDavid Rientjes */ 3921d0d2680SDavid Rientjes 393213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3941d0d2680SDavid Rientjes { 395213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3961d0d2680SDavid Rientjes } 3971d0d2680SDavid Rientjes 3981d0d2680SDavid Rientjes /* 3991d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 4001d0d2680SDavid Rientjes * 401c1e8d7c6SMichel Lespinasse * Call holding a reference to mm. Takes mm->mmap_lock during call. 4021d0d2680SDavid Rientjes */ 4031d0d2680SDavid Rientjes 4041d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 4051d0d2680SDavid Rientjes { 4061d0d2680SDavid Rientjes struct vm_area_struct *vma; 4071d0d2680SDavid Rientjes 408d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 4091d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 410213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 411d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 4121d0d2680SDavid Rientjes } 4131d0d2680SDavid Rientjes 41437012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 41537012946SDavid Rientjes [MPOL_DEFAULT] = { 41637012946SDavid Rientjes .rebind = mpol_rebind_default, 41737012946SDavid Rientjes }, 41837012946SDavid Rientjes [MPOL_INTERLEAVE] = { 41937012946SDavid Rientjes .create = mpol_new_interleave, 42037012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 42137012946SDavid Rientjes }, 42237012946SDavid Rientjes [MPOL_PREFERRED] = { 42337012946SDavid Rientjes .create = mpol_new_preferred, 42437012946SDavid Rientjes .rebind = mpol_rebind_preferred, 42537012946SDavid Rientjes }, 42637012946SDavid Rientjes [MPOL_BIND] = { 42737012946SDavid Rientjes .create = mpol_new_bind, 42837012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 42937012946SDavid Rientjes }, 43037012946SDavid Rientjes }; 43137012946SDavid Rientjes 432a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 433fc301289SChristoph Lameter unsigned long flags); 4341a75a6c8SChristoph Lameter 4356f4576e3SNaoya Horiguchi struct queue_pages { 4366f4576e3SNaoya Horiguchi struct list_head *pagelist; 4376f4576e3SNaoya Horiguchi unsigned long flags; 4386f4576e3SNaoya Horiguchi nodemask_t *nmask; 439f18da660SLi Xinhai unsigned long start; 440f18da660SLi Xinhai unsigned long end; 441f18da660SLi Xinhai struct vm_area_struct *first; 4426f4576e3SNaoya Horiguchi }; 4436f4576e3SNaoya Horiguchi 44498094945SNaoya Horiguchi /* 44588aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 44688aaa2a1SNaoya Horiguchi * 44788aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 44888aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 44988aaa2a1SNaoya Horiguchi */ 45088aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 45188aaa2a1SNaoya Horiguchi struct queue_pages *qp) 45288aaa2a1SNaoya Horiguchi { 45388aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 45488aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 45588aaa2a1SNaoya Horiguchi 45688aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 45788aaa2a1SNaoya Horiguchi } 45888aaa2a1SNaoya Horiguchi 459a7f40cfeSYang Shi /* 460d8835445SYang Shi * queue_pages_pmd() has four possible return values: 461d8835445SYang Shi * 0 - pages are placed on the right node or queued successfully. 462d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 463d8835445SYang Shi * specified. 464d8835445SYang Shi * 2 - THP was split. 465d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 466d8835445SYang Shi * existing page was already on a node that does not follow the 467d8835445SYang Shi * policy. 468a7f40cfeSYang Shi */ 469c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 470c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 471959a7e13SJules Irenge __releases(ptl) 472c8633798SNaoya Horiguchi { 473c8633798SNaoya Horiguchi int ret = 0; 474c8633798SNaoya Horiguchi struct page *page; 475c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 476c8633798SNaoya Horiguchi unsigned long flags; 477c8633798SNaoya Horiguchi 478c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 479a7f40cfeSYang Shi ret = -EIO; 480c8633798SNaoya Horiguchi goto unlock; 481c8633798SNaoya Horiguchi } 482c8633798SNaoya Horiguchi page = pmd_page(*pmd); 483c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 484c8633798SNaoya Horiguchi spin_unlock(ptl); 485c8633798SNaoya Horiguchi __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 486d8835445SYang Shi ret = 2; 487c8633798SNaoya Horiguchi goto out; 488c8633798SNaoya Horiguchi } 489d8835445SYang Shi if (!queue_pages_required(page, qp)) 490c8633798SNaoya Horiguchi goto unlock; 491c8633798SNaoya Horiguchi 492c8633798SNaoya Horiguchi flags = qp->flags; 493c8633798SNaoya Horiguchi /* go to thp migration */ 494a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 495a53190a4SYang Shi if (!vma_migratable(walk->vma) || 496a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 497d8835445SYang Shi ret = 1; 498a7f40cfeSYang Shi goto unlock; 499a7f40cfeSYang Shi } 500a7f40cfeSYang Shi } else 501a7f40cfeSYang Shi ret = -EIO; 502c8633798SNaoya Horiguchi unlock: 503c8633798SNaoya Horiguchi spin_unlock(ptl); 504c8633798SNaoya Horiguchi out: 505c8633798SNaoya Horiguchi return ret; 506c8633798SNaoya Horiguchi } 507c8633798SNaoya Horiguchi 50888aaa2a1SNaoya Horiguchi /* 50998094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 51098094945SNaoya Horiguchi * and move them to the pagelist if they do. 511d8835445SYang Shi * 512d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 513d8835445SYang Shi * 0 - pages are placed on the right node or queued successfully. 514d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 515d8835445SYang Shi * specified. 516d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 517d8835445SYang Shi * on a node that does not follow the policy. 51898094945SNaoya Horiguchi */ 5196f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 5206f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 5211da177e4SLinus Torvalds { 5226f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5236f4576e3SNaoya Horiguchi struct page *page; 5246f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5256f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 526c8633798SNaoya Horiguchi int ret; 527d8835445SYang Shi bool has_unmovable = false; 5283f088420SShijie Luo pte_t *pte, *mapped_pte; 529705e87c0SHugh Dickins spinlock_t *ptl; 530941150a3SHugh Dickins 531c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 532c8633798SNaoya Horiguchi if (ptl) { 533c8633798SNaoya Horiguchi ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 534d8835445SYang Shi if (ret != 2) 535a7f40cfeSYang Shi return ret; 536248db92dSKirill A. Shutemov } 537d8835445SYang Shi /* THP was split, fall through to pte walk */ 53891612e0dSHugh Dickins 539337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 540337d9abfSNaoya Horiguchi return 0; 54194723aafSMichal Hocko 5423f088420SShijie Luo mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5436f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 54491612e0dSHugh Dickins if (!pte_present(*pte)) 54591612e0dSHugh Dickins continue; 5466aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5476aab341eSLinus Torvalds if (!page) 54891612e0dSHugh Dickins continue; 549053837fcSNick Piggin /* 55062b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 55162b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 552053837fcSNick Piggin */ 553b79bc0a0SHugh Dickins if (PageReserved(page)) 554f4598c8bSChristoph Lameter continue; 55588aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 55638e35860SChristoph Lameter continue; 557a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 558d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 559d8835445SYang Shi if (!vma_migratable(vma)) { 560d8835445SYang Shi has_unmovable = true; 561a7f40cfeSYang Shi break; 562d8835445SYang Shi } 563a53190a4SYang Shi 564a53190a4SYang Shi /* 565a53190a4SYang Shi * Do not abort immediately since there may be 566a53190a4SYang Shi * temporary off LRU pages in the range. Still 567a53190a4SYang Shi * need migrate other LRU pages. 568a53190a4SYang Shi */ 569a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 570a53190a4SYang Shi has_unmovable = true; 571a7f40cfeSYang Shi } else 572a7f40cfeSYang Shi break; 5736f4576e3SNaoya Horiguchi } 5743f088420SShijie Luo pte_unmap_unlock(mapped_pte, ptl); 5756f4576e3SNaoya Horiguchi cond_resched(); 576d8835445SYang Shi 577d8835445SYang Shi if (has_unmovable) 578d8835445SYang Shi return 1; 579d8835445SYang Shi 580a7f40cfeSYang Shi return addr != end ? -EIO : 0; 58191612e0dSHugh Dickins } 58291612e0dSHugh Dickins 5836f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5846f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5856f4576e3SNaoya Horiguchi struct mm_walk *walk) 586e2d8cf40SNaoya Horiguchi { 587dcf17635SLi Xinhai int ret = 0; 588e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5896f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 590dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 591e2d8cf40SNaoya Horiguchi struct page *page; 592cb900f41SKirill A. Shutemov spinlock_t *ptl; 593d4c54919SNaoya Horiguchi pte_t entry; 594e2d8cf40SNaoya Horiguchi 5956f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5966f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 597d4c54919SNaoya Horiguchi if (!pte_present(entry)) 598d4c54919SNaoya Horiguchi goto unlock; 599d4c54919SNaoya Horiguchi page = pte_page(entry); 60088aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 601e2d8cf40SNaoya Horiguchi goto unlock; 602dcf17635SLi Xinhai 603dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 604dcf17635SLi Xinhai /* 605dcf17635SLi Xinhai * STRICT alone means only detecting misplaced page and no 606dcf17635SLi Xinhai * need to further check other vma. 607dcf17635SLi Xinhai */ 608dcf17635SLi Xinhai ret = -EIO; 609dcf17635SLi Xinhai goto unlock; 610dcf17635SLi Xinhai } 611dcf17635SLi Xinhai 612dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 613dcf17635SLi Xinhai /* 614dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 615dcf17635SLi Xinhai * stopped walking current vma. 616dcf17635SLi Xinhai * Detecting misplaced page but allow migrating pages which 617dcf17635SLi Xinhai * have been queued. 618dcf17635SLi Xinhai */ 619dcf17635SLi Xinhai ret = 1; 620dcf17635SLi Xinhai goto unlock; 621dcf17635SLi Xinhai } 622dcf17635SLi Xinhai 623e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 624e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 625dcf17635SLi Xinhai (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { 626dcf17635SLi Xinhai if (!isolate_huge_page(page, qp->pagelist) && 627dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 628dcf17635SLi Xinhai /* 629dcf17635SLi Xinhai * Failed to isolate page but allow migrating pages 630dcf17635SLi Xinhai * which have been queued. 631dcf17635SLi Xinhai */ 632dcf17635SLi Xinhai ret = 1; 633dcf17635SLi Xinhai } 634e2d8cf40SNaoya Horiguchi unlock: 635cb900f41SKirill A. Shutemov spin_unlock(ptl); 636e2d8cf40SNaoya Horiguchi #else 637e2d8cf40SNaoya Horiguchi BUG(); 638e2d8cf40SNaoya Horiguchi #endif 639dcf17635SLi Xinhai return ret; 6401da177e4SLinus Torvalds } 6411da177e4SLinus Torvalds 6425877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 643b24f53a0SLee Schermerhorn /* 6444b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6454b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6464b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6474b10e7d5SMel Gorman * 6484b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6494b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6504b10e7d5SMel Gorman * changes to the core. 651b24f53a0SLee Schermerhorn */ 6524b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6534b10e7d5SMel Gorman unsigned long addr, unsigned long end) 654b24f53a0SLee Schermerhorn { 6554b10e7d5SMel Gorman int nr_updated; 656b24f53a0SLee Schermerhorn 65758705444SPeter Xu nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA); 65803c5a6e1SMel Gorman if (nr_updated) 65903c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 660b24f53a0SLee Schermerhorn 6614b10e7d5SMel Gorman return nr_updated; 662b24f53a0SLee Schermerhorn } 663b24f53a0SLee Schermerhorn #else 664b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 665b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 666b24f53a0SLee Schermerhorn { 667b24f53a0SLee Schermerhorn return 0; 668b24f53a0SLee Schermerhorn } 6695877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 670b24f53a0SLee Schermerhorn 6716f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6726f4576e3SNaoya Horiguchi struct mm_walk *walk) 6731da177e4SLinus Torvalds { 6746f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6756f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6765b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6776f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 678dc9aa5b9SChristoph Lameter 679a18b3ac2SLi Xinhai /* range check first */ 680ce33135cSMiaohe Lin VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 681f18da660SLi Xinhai 682f18da660SLi Xinhai if (!qp->first) { 683f18da660SLi Xinhai qp->first = vma; 684f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 685f18da660SLi Xinhai (qp->start < vma->vm_start)) 686f18da660SLi Xinhai /* hole at head side of range */ 687a18b3ac2SLi Xinhai return -EFAULT; 688a18b3ac2SLi Xinhai } 689f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 690f18da660SLi Xinhai ((vma->vm_end < qp->end) && 691f18da660SLi Xinhai (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) 692f18da660SLi Xinhai /* hole at middle or tail of range */ 693f18da660SLi Xinhai return -EFAULT; 694a18b3ac2SLi Xinhai 695a7f40cfeSYang Shi /* 696a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 697a7f40cfeSYang Shi * regardless of vma_migratable 698a7f40cfeSYang Shi */ 699a7f40cfeSYang Shi if (!vma_migratable(vma) && 700a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 70148684a65SNaoya Horiguchi return 1; 70248684a65SNaoya Horiguchi 7035b952b3cSAndi Kleen if (endvma > end) 7045b952b3cSAndi Kleen endvma = end; 705b24f53a0SLee Schermerhorn 706b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 7072c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 7083122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 7094355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 710b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 7116f4576e3SNaoya Horiguchi return 1; 712b24f53a0SLee Schermerhorn } 713b24f53a0SLee Schermerhorn 7146f4576e3SNaoya Horiguchi /* queue pages from current vma */ 715a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 7166f4576e3SNaoya Horiguchi return 0; 7176f4576e3SNaoya Horiguchi return 1; 7186f4576e3SNaoya Horiguchi } 719b24f53a0SLee Schermerhorn 7207b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 7217b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 7227b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 7237b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 7247b86ac33SChristoph Hellwig }; 7257b86ac33SChristoph Hellwig 7266f4576e3SNaoya Horiguchi /* 7276f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7286f4576e3SNaoya Horiguchi * 7296f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7306f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 731d8835445SYang Shi * passed via @private. 732d8835445SYang Shi * 733d8835445SYang Shi * queue_pages_range() has three possible return values: 734d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 735d8835445SYang Shi * specified. 736d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 737a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 738a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 739a85dfc30SYang Shi * your accessible address space (-EFAULT) 7406f4576e3SNaoya Horiguchi */ 7416f4576e3SNaoya Horiguchi static int 7426f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7436f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 7446f4576e3SNaoya Horiguchi struct list_head *pagelist) 7456f4576e3SNaoya Horiguchi { 746f18da660SLi Xinhai int err; 7476f4576e3SNaoya Horiguchi struct queue_pages qp = { 7486f4576e3SNaoya Horiguchi .pagelist = pagelist, 7496f4576e3SNaoya Horiguchi .flags = flags, 7506f4576e3SNaoya Horiguchi .nmask = nodes, 751f18da660SLi Xinhai .start = start, 752f18da660SLi Xinhai .end = end, 753f18da660SLi Xinhai .first = NULL, 7546f4576e3SNaoya Horiguchi }; 7556f4576e3SNaoya Horiguchi 756f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 757f18da660SLi Xinhai 758f18da660SLi Xinhai if (!qp.first) 759f18da660SLi Xinhai /* whole range in hole */ 760f18da660SLi Xinhai err = -EFAULT; 761f18da660SLi Xinhai 762f18da660SLi Xinhai return err; 7631da177e4SLinus Torvalds } 7641da177e4SLinus Torvalds 765869833f2SKOSAKI Motohiro /* 766869833f2SKOSAKI Motohiro * Apply policy to a single VMA 767c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for writing. 768869833f2SKOSAKI Motohiro */ 769869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 770869833f2SKOSAKI Motohiro struct mempolicy *pol) 7718d34694cSKOSAKI Motohiro { 772869833f2SKOSAKI Motohiro int err; 773869833f2SKOSAKI Motohiro struct mempolicy *old; 774869833f2SKOSAKI Motohiro struct mempolicy *new; 7758d34694cSKOSAKI Motohiro 7768d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7778d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7788d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7798d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7808d34694cSKOSAKI Motohiro 781869833f2SKOSAKI Motohiro new = mpol_dup(pol); 782869833f2SKOSAKI Motohiro if (IS_ERR(new)) 783869833f2SKOSAKI Motohiro return PTR_ERR(new); 784869833f2SKOSAKI Motohiro 785869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7868d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 787869833f2SKOSAKI Motohiro if (err) 788869833f2SKOSAKI Motohiro goto err_out; 7898d34694cSKOSAKI Motohiro } 790869833f2SKOSAKI Motohiro 791869833f2SKOSAKI Motohiro old = vma->vm_policy; 792c1e8d7c6SMichel Lespinasse vma->vm_policy = new; /* protected by mmap_lock */ 793869833f2SKOSAKI Motohiro mpol_put(old); 794869833f2SKOSAKI Motohiro 795869833f2SKOSAKI Motohiro return 0; 796869833f2SKOSAKI Motohiro err_out: 797869833f2SKOSAKI Motohiro mpol_put(new); 7988d34694cSKOSAKI Motohiro return err; 7998d34694cSKOSAKI Motohiro } 8008d34694cSKOSAKI Motohiro 8011da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 8029d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 8039d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 8041da177e4SLinus Torvalds { 8051da177e4SLinus Torvalds struct vm_area_struct *next; 8069d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 8079d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 8089d8cebd4SKOSAKI Motohiro int err = 0; 809e26a5114SKOSAKI Motohiro pgoff_t pgoff; 8109d8cebd4SKOSAKI Motohiro unsigned long vmstart; 8119d8cebd4SKOSAKI Motohiro unsigned long vmend; 8121da177e4SLinus Torvalds 813097d5910SLinus Torvalds vma = find_vma(mm, start); 814f18da660SLi Xinhai VM_BUG_ON(!vma); 8159d8cebd4SKOSAKI Motohiro 816097d5910SLinus Torvalds prev = vma->vm_prev; 817e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 818e26a5114SKOSAKI Motohiro prev = vma; 819e26a5114SKOSAKI Motohiro 8209d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 8211da177e4SLinus Torvalds next = vma->vm_next; 8229d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 8239d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 8249d8cebd4SKOSAKI Motohiro 825e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 826e26a5114SKOSAKI Motohiro continue; 827e26a5114SKOSAKI Motohiro 828e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 829e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 8309d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 831e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 83219a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 8339d8cebd4SKOSAKI Motohiro if (prev) { 8349d8cebd4SKOSAKI Motohiro vma = prev; 8359d8cebd4SKOSAKI Motohiro next = vma->vm_next; 8363964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 8379d8cebd4SKOSAKI Motohiro continue; 8383964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 8393964acd0SOleg Nesterov goto replace; 8401da177e4SLinus Torvalds } 8419d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 8429d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 8439d8cebd4SKOSAKI Motohiro if (err) 8449d8cebd4SKOSAKI Motohiro goto out; 8459d8cebd4SKOSAKI Motohiro } 8469d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 8479d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 8489d8cebd4SKOSAKI Motohiro if (err) 8499d8cebd4SKOSAKI Motohiro goto out; 8509d8cebd4SKOSAKI Motohiro } 8513964acd0SOleg Nesterov replace: 852869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 8539d8cebd4SKOSAKI Motohiro if (err) 8549d8cebd4SKOSAKI Motohiro goto out; 8559d8cebd4SKOSAKI Motohiro } 8569d8cebd4SKOSAKI Motohiro 8579d8cebd4SKOSAKI Motohiro out: 8581da177e4SLinus Torvalds return err; 8591da177e4SLinus Torvalds } 8601da177e4SLinus Torvalds 8611da177e4SLinus Torvalds /* Set the process memory policy */ 862028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 863028fec41SDavid Rientjes nodemask_t *nodes) 8641da177e4SLinus Torvalds { 86558568d2aSMiao Xie struct mempolicy *new, *old; 8664bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 86758568d2aSMiao Xie int ret; 8681da177e4SLinus Torvalds 8694bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8704bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 871f4e53d91SLee Schermerhorn 8724bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8734bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8744bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8754bfc4495SKAMEZAWA Hiroyuki goto out; 8764bfc4495SKAMEZAWA Hiroyuki } 8772c7c3a7dSOleg Nesterov 878bda420b9SHuang Ying if (flags & MPOL_F_NUMA_BALANCING) { 879bda420b9SHuang Ying if (new && new->mode == MPOL_BIND) { 880bda420b9SHuang Ying new->flags |= (MPOL_F_MOF | MPOL_F_MORON); 881bda420b9SHuang Ying } else { 882bda420b9SHuang Ying ret = -EINVAL; 883bda420b9SHuang Ying mpol_put(new); 884bda420b9SHuang Ying goto out; 885bda420b9SHuang Ying } 886bda420b9SHuang Ying } 887bda420b9SHuang Ying 8884bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 88958568d2aSMiao Xie if (ret) { 89058568d2aSMiao Xie mpol_put(new); 8914bfc4495SKAMEZAWA Hiroyuki goto out; 89258568d2aSMiao Xie } 89378b132e9SWei Yang task_lock(current); 89458568d2aSMiao Xie old = current->mempolicy; 8951da177e4SLinus Torvalds current->mempolicy = new; 89645816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 89745816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 89858568d2aSMiao Xie task_unlock(current); 89958568d2aSMiao Xie mpol_put(old); 9004bfc4495SKAMEZAWA Hiroyuki ret = 0; 9014bfc4495SKAMEZAWA Hiroyuki out: 9024bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 9034bfc4495SKAMEZAWA Hiroyuki return ret; 9041da177e4SLinus Torvalds } 9051da177e4SLinus Torvalds 906bea904d5SLee Schermerhorn /* 907bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 90858568d2aSMiao Xie * 90958568d2aSMiao Xie * Called with task's alloc_lock held 910bea904d5SLee Schermerhorn */ 911bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 9121da177e4SLinus Torvalds { 913dfcd3c0dSAndi Kleen nodes_clear(*nodes); 914bea904d5SLee Schermerhorn if (p == &default_policy) 915bea904d5SLee Schermerhorn return; 916bea904d5SLee Schermerhorn 91745c4745aSLee Schermerhorn switch (p->mode) { 91819770b32SMel Gorman case MPOL_BIND: 9191da177e4SLinus Torvalds case MPOL_INTERLEAVE: 920dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 9211da177e4SLinus Torvalds break; 9221da177e4SLinus Torvalds case MPOL_PREFERRED: 923fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 924dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 92553f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 9261da177e4SLinus Torvalds break; 9271da177e4SLinus Torvalds default: 9281da177e4SLinus Torvalds BUG(); 9291da177e4SLinus Torvalds } 9301da177e4SLinus Torvalds } 9311da177e4SLinus Torvalds 9323b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 9331da177e4SLinus Torvalds { 934ba841078SPeter Xu struct page *p = NULL; 9351da177e4SLinus Torvalds int err; 9361da177e4SLinus Torvalds 9373b9aadf7SAndrea Arcangeli int locked = 1; 9383b9aadf7SAndrea Arcangeli err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); 9392d3a36a4SMichal Hocko if (err > 0) { 9401da177e4SLinus Torvalds err = page_to_nid(p); 9411da177e4SLinus Torvalds put_page(p); 9421da177e4SLinus Torvalds } 9433b9aadf7SAndrea Arcangeli if (locked) 944d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9451da177e4SLinus Torvalds return err; 9461da177e4SLinus Torvalds } 9471da177e4SLinus Torvalds 9481da177e4SLinus Torvalds /* Retrieve NUMA policy */ 949dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9501da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9511da177e4SLinus Torvalds { 9528bccd85fSChristoph Lameter int err; 9531da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9541da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9553b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9561da177e4SLinus Torvalds 957754af6f5SLee Schermerhorn if (flags & 958754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9591da177e4SLinus Torvalds return -EINVAL; 960754af6f5SLee Schermerhorn 961754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 962754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 963754af6f5SLee Schermerhorn return -EINVAL; 964754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 96558568d2aSMiao Xie task_lock(current); 966754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 96758568d2aSMiao Xie task_unlock(current); 968754af6f5SLee Schermerhorn return 0; 969754af6f5SLee Schermerhorn } 970754af6f5SLee Schermerhorn 9711da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 972bea904d5SLee Schermerhorn /* 973bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 974bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 975bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 976bea904d5SLee Schermerhorn */ 977d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 97833e3575cSLiam Howlett vma = vma_lookup(mm, addr); 9791da177e4SLinus Torvalds if (!vma) { 980d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9811da177e4SLinus Torvalds return -EFAULT; 9821da177e4SLinus Torvalds } 9831da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9841da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9851da177e4SLinus Torvalds else 9861da177e4SLinus Torvalds pol = vma->vm_policy; 9871da177e4SLinus Torvalds } else if (addr) 9881da177e4SLinus Torvalds return -EINVAL; 9891da177e4SLinus Torvalds 9901da177e4SLinus Torvalds if (!pol) 991bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9921da177e4SLinus Torvalds 9931da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9941da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9953b9aadf7SAndrea Arcangeli /* 9963b9aadf7SAndrea Arcangeli * Take a refcount on the mpol, lookup_node() 997baf2f90bSLu Jialin * will drop the mmap_lock, so after calling 9983b9aadf7SAndrea Arcangeli * lookup_node() only "pol" remains valid, "vma" 9993b9aadf7SAndrea Arcangeli * is stale. 10003b9aadf7SAndrea Arcangeli */ 10013b9aadf7SAndrea Arcangeli pol_refcount = pol; 10023b9aadf7SAndrea Arcangeli vma = NULL; 10033b9aadf7SAndrea Arcangeli mpol_get(pol); 10043b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 10051da177e4SLinus Torvalds if (err < 0) 10061da177e4SLinus Torvalds goto out; 10078bccd85fSChristoph Lameter *policy = err; 10081da177e4SLinus Torvalds } else if (pol == current->mempolicy && 100945c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 101045816682SVlastimil Babka *policy = next_node_in(current->il_prev, pol->v.nodes); 10111da177e4SLinus Torvalds } else { 10121da177e4SLinus Torvalds err = -EINVAL; 10131da177e4SLinus Torvalds goto out; 10141da177e4SLinus Torvalds } 1015bea904d5SLee Schermerhorn } else { 1016bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 1017bea904d5SLee Schermerhorn pol->mode; 1018d79df630SDavid Rientjes /* 1019d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 1020d79df630SDavid Rientjes * the policy to userspace. 1021d79df630SDavid Rientjes */ 1022d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 1023bea904d5SLee Schermerhorn } 10241da177e4SLinus Torvalds 10251da177e4SLinus Torvalds err = 0; 102658568d2aSMiao Xie if (nmask) { 1027c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 1028c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 1029c6b6ef8bSLee Schermerhorn } else { 103058568d2aSMiao Xie task_lock(current); 1031bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 103258568d2aSMiao Xie task_unlock(current); 103358568d2aSMiao Xie } 1034c6b6ef8bSLee Schermerhorn } 10351da177e4SLinus Torvalds 10361da177e4SLinus Torvalds out: 103752cd3b07SLee Schermerhorn mpol_cond_put(pol); 10381da177e4SLinus Torvalds if (vma) 1039d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 10403b9aadf7SAndrea Arcangeli if (pol_refcount) 10413b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10421da177e4SLinus Torvalds return err; 10431da177e4SLinus Torvalds } 10441da177e4SLinus Torvalds 1045b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10468bccd85fSChristoph Lameter /* 1047c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 10486ce3c4c0SChristoph Lameter */ 1049a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1050fc301289SChristoph Lameter unsigned long flags) 10516ce3c4c0SChristoph Lameter { 1052c8633798SNaoya Horiguchi struct page *head = compound_head(page); 10536ce3c4c0SChristoph Lameter /* 1054fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 10556ce3c4c0SChristoph Lameter */ 1056c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1057c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 1058c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 1059c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 10609de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head), 10616c357848SMatthew Wilcox (Oracle) thp_nr_pages(head)); 1062a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1063a53190a4SYang Shi /* 1064a53190a4SYang Shi * Non-movable page may reach here. And, there may be 1065a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 1066a53190a4SYang Shi * Treat them as unmovable pages since they can't be 1067a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1068a53190a4SYang Shi * should return -EIO for this case too. 1069a53190a4SYang Shi */ 1070a53190a4SYang Shi return -EIO; 107162695a84SNick Piggin } 107262695a84SNick Piggin } 1073a53190a4SYang Shi 1074a53190a4SYang Shi return 0; 10756ce3c4c0SChristoph Lameter } 10766ce3c4c0SChristoph Lameter 10776ce3c4c0SChristoph Lameter /* 10787e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10797e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10807e2ab150SChristoph Lameter */ 1081dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1082dbcb0f19SAdrian Bunk int flags) 10837e2ab150SChristoph Lameter { 10847e2ab150SChristoph Lameter nodemask_t nmask; 10857e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10867e2ab150SChristoph Lameter int err = 0; 1087a0976311SJoonsoo Kim struct migration_target_control mtc = { 1088a0976311SJoonsoo Kim .nid = dest, 1089a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1090a0976311SJoonsoo Kim }; 10917e2ab150SChristoph Lameter 10927e2ab150SChristoph Lameter nodes_clear(nmask); 10937e2ab150SChristoph Lameter node_set(source, nmask); 10947e2ab150SChristoph Lameter 109508270807SMinchan Kim /* 109608270807SMinchan Kim * This does not "check" the range but isolates all pages that 109708270807SMinchan Kim * need migration. Between passing in the full user address 109808270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 109908270807SMinchan Kim */ 110008270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 110198094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 11027e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 11037e2ab150SChristoph Lameter 1104cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1105a0976311SJoonsoo Kim err = migrate_pages(&pagelist, alloc_migration_target, NULL, 1106a0976311SJoonsoo Kim (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL); 1107cf608ac1SMinchan Kim if (err) 1108e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1109cf608ac1SMinchan Kim } 111095a402c3SChristoph Lameter 11117e2ab150SChristoph Lameter return err; 11127e2ab150SChristoph Lameter } 11137e2ab150SChristoph Lameter 11147e2ab150SChristoph Lameter /* 11157e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 11167e2ab150SChristoph Lameter * layout as much as possible. 111739743889SChristoph Lameter * 111839743889SChristoph Lameter * Returns the number of page that could not be moved. 111939743889SChristoph Lameter */ 11200ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11210ce72d4fSAndrew Morton const nodemask_t *to, int flags) 112239743889SChristoph Lameter { 11237e2ab150SChristoph Lameter int busy = 0; 1124f555befdSJan Stancek int err = 0; 11257e2ab150SChristoph Lameter nodemask_t tmp; 112639743889SChristoph Lameter 1127361a2a22SMinchan Kim lru_cache_disable(); 11280aedadf9SChristoph Lameter 1129d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1130d4984711SChristoph Lameter 11317e2ab150SChristoph Lameter /* 11327e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11337e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11347e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11357e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11367e2ab150SChristoph Lameter * 11377e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11387e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11397e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11407e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11417e2ab150SChristoph Lameter * 11427e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11437e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11447e2ab150SChristoph Lameter * (nothing left to migrate). 11457e2ab150SChristoph Lameter * 11467e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11477e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11487e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11497e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11507e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11517e2ab150SChristoph Lameter * 11527e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11537e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11547e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11557e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1156ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11577e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11587e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11597e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11607e2ab150SChristoph Lameter */ 11617e2ab150SChristoph Lameter 11620ce72d4fSAndrew Morton tmp = *from; 11637e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11647e2ab150SChristoph Lameter int s, d; 1165b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11667e2ab150SChristoph Lameter int dest = 0; 11677e2ab150SChristoph Lameter 11687e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11694a5b18ccSLarry Woodman 11704a5b18ccSLarry Woodman /* 11714a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11724a5b18ccSLarry Woodman * node relationship of the pages established between 11734a5b18ccSLarry Woodman * threads and memory areas. 11744a5b18ccSLarry Woodman * 11754a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11764a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11774a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11784a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11794a5b18ccSLarry Woodman * mask. 11804a5b18ccSLarry Woodman * 11814a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11824a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11834a5b18ccSLarry Woodman */ 11844a5b18ccSLarry Woodman 11850ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11860ce72d4fSAndrew Morton (node_isset(s, *to))) 11874a5b18ccSLarry Woodman continue; 11884a5b18ccSLarry Woodman 11890ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11907e2ab150SChristoph Lameter if (s == d) 11917e2ab150SChristoph Lameter continue; 11927e2ab150SChristoph Lameter 11937e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11947e2ab150SChristoph Lameter dest = d; 11957e2ab150SChristoph Lameter 11967e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11977e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11987e2ab150SChristoph Lameter break; 11997e2ab150SChristoph Lameter } 1200b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 12017e2ab150SChristoph Lameter break; 12027e2ab150SChristoph Lameter 12037e2ab150SChristoph Lameter node_clear(source, tmp); 12047e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 12057e2ab150SChristoph Lameter if (err > 0) 12067e2ab150SChristoph Lameter busy += err; 12077e2ab150SChristoph Lameter if (err < 0) 12087e2ab150SChristoph Lameter break; 120939743889SChristoph Lameter } 1210d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1211d479960eSMinchan Kim 1212361a2a22SMinchan Kim lru_cache_enable(); 12137e2ab150SChristoph Lameter if (err < 0) 12147e2ab150SChristoph Lameter return err; 12157e2ab150SChristoph Lameter return busy; 1216b20a3503SChristoph Lameter 121739743889SChristoph Lameter } 121839743889SChristoph Lameter 12193ad33b24SLee Schermerhorn /* 12203ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1221d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 12223ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 12233ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 12243ad33b24SLee Schermerhorn * is in virtual address order. 12253ad33b24SLee Schermerhorn */ 1226666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 122795a402c3SChristoph Lameter { 1228d05f0cdcSHugh Dickins struct vm_area_struct *vma; 12293f649ab7SKees Cook unsigned long address; 123095a402c3SChristoph Lameter 1231d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 12323ad33b24SLee Schermerhorn while (vma) { 12333ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 12343ad33b24SLee Schermerhorn if (address != -EFAULT) 12353ad33b24SLee Schermerhorn break; 12363ad33b24SLee Schermerhorn vma = vma->vm_next; 12373ad33b24SLee Schermerhorn } 12383ad33b24SLee Schermerhorn 123911c731e8SWanpeng Li if (PageHuge(page)) { 1240389c8178SMichal Hocko return alloc_huge_page_vma(page_hstate(compound_head(page)), 1241389c8178SMichal Hocko vma, address); 124294723aafSMichal Hocko } else if (PageTransHuge(page)) { 1243c8633798SNaoya Horiguchi struct page *thp; 1244c8633798SNaoya Horiguchi 124519deb769SDavid Rientjes thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 124619deb769SDavid Rientjes HPAGE_PMD_ORDER); 1247c8633798SNaoya Horiguchi if (!thp) 1248c8633798SNaoya Horiguchi return NULL; 1249c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1250c8633798SNaoya Horiguchi return thp; 125111c731e8SWanpeng Li } 125211c731e8SWanpeng Li /* 125311c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 125411c731e8SWanpeng Li */ 12550f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 12560f556856SMichal Hocko vma, address); 125795a402c3SChristoph Lameter } 1258b20a3503SChristoph Lameter #else 1259b20a3503SChristoph Lameter 1260a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1261b20a3503SChristoph Lameter unsigned long flags) 1262b20a3503SChristoph Lameter { 1263a53190a4SYang Shi return -EIO; 1264b20a3503SChristoph Lameter } 1265b20a3503SChristoph Lameter 12660ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12670ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1268b20a3503SChristoph Lameter { 1269b20a3503SChristoph Lameter return -ENOSYS; 1270b20a3503SChristoph Lameter } 127195a402c3SChristoph Lameter 1272666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 127395a402c3SChristoph Lameter { 127495a402c3SChristoph Lameter return NULL; 127595a402c3SChristoph Lameter } 1276b20a3503SChristoph Lameter #endif 1277b20a3503SChristoph Lameter 1278dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1279028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1280028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12816ce3c4c0SChristoph Lameter { 12826ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12836ce3c4c0SChristoph Lameter struct mempolicy *new; 12846ce3c4c0SChristoph Lameter unsigned long end; 12856ce3c4c0SChristoph Lameter int err; 1286d8835445SYang Shi int ret; 12876ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12886ce3c4c0SChristoph Lameter 1289b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12906ce3c4c0SChristoph Lameter return -EINVAL; 129174c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12926ce3c4c0SChristoph Lameter return -EPERM; 12936ce3c4c0SChristoph Lameter 12946ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12956ce3c4c0SChristoph Lameter return -EINVAL; 12966ce3c4c0SChristoph Lameter 12976ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12986ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12996ce3c4c0SChristoph Lameter 13006ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 13016ce3c4c0SChristoph Lameter end = start + len; 13026ce3c4c0SChristoph Lameter 13036ce3c4c0SChristoph Lameter if (end < start) 13046ce3c4c0SChristoph Lameter return -EINVAL; 13056ce3c4c0SChristoph Lameter if (end == start) 13066ce3c4c0SChristoph Lameter return 0; 13076ce3c4c0SChristoph Lameter 1308028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 13096ce3c4c0SChristoph Lameter if (IS_ERR(new)) 13106ce3c4c0SChristoph Lameter return PTR_ERR(new); 13116ce3c4c0SChristoph Lameter 1312b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1313b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1314b24f53a0SLee Schermerhorn 13156ce3c4c0SChristoph Lameter /* 13166ce3c4c0SChristoph Lameter * If we are using the default policy then operation 13176ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 13186ce3c4c0SChristoph Lameter */ 13196ce3c4c0SChristoph Lameter if (!new) 13206ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 13216ce3c4c0SChristoph Lameter 1322028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1323028fec41SDavid Rientjes start, start + len, mode, mode_flags, 132400ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 13256ce3c4c0SChristoph Lameter 13260aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 13270aedadf9SChristoph Lameter 1328361a2a22SMinchan Kim lru_cache_disable(); 13290aedadf9SChristoph Lameter } 13304bfc4495SKAMEZAWA Hiroyuki { 13314bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 13324bfc4495SKAMEZAWA Hiroyuki if (scratch) { 1333d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 13344bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 13354bfc4495SKAMEZAWA Hiroyuki if (err) 1336d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 13374bfc4495SKAMEZAWA Hiroyuki } else 13384bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13394bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13404bfc4495SKAMEZAWA Hiroyuki } 1341b05ca738SKOSAKI Motohiro if (err) 1342b05ca738SKOSAKI Motohiro goto mpol_out; 1343b05ca738SKOSAKI Motohiro 1344d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13456ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1346d8835445SYang Shi 1347d8835445SYang Shi if (ret < 0) { 1348a85dfc30SYang Shi err = ret; 1349d8835445SYang Shi goto up_out; 1350d8835445SYang Shi } 1351d8835445SYang Shi 13529d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13537e2ab150SChristoph Lameter 1354b24f53a0SLee Schermerhorn if (!err) { 1355b24f53a0SLee Schermerhorn int nr_failed = 0; 1356b24f53a0SLee Schermerhorn 1357cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1358b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1359d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1360d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1361cf608ac1SMinchan Kim if (nr_failed) 136274060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1363cf608ac1SMinchan Kim } 13646ce3c4c0SChristoph Lameter 1365d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13666ce3c4c0SChristoph Lameter err = -EIO; 1367a85dfc30SYang Shi } else { 1368d8835445SYang Shi up_out: 1369a85dfc30SYang Shi if (!list_empty(&pagelist)) 1370a85dfc30SYang Shi putback_movable_pages(&pagelist); 1371a85dfc30SYang Shi } 1372a85dfc30SYang Shi 1373d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1374b05ca738SKOSAKI Motohiro mpol_out: 1375f0be3d32SLee Schermerhorn mpol_put(new); 1376d479960eSMinchan Kim if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1377361a2a22SMinchan Kim lru_cache_enable(); 13786ce3c4c0SChristoph Lameter return err; 13796ce3c4c0SChristoph Lameter } 13806ce3c4c0SChristoph Lameter 138139743889SChristoph Lameter /* 13828bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13838bccd85fSChristoph Lameter */ 13848bccd85fSChristoph Lameter 13858bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 138639743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13878bccd85fSChristoph Lameter unsigned long maxnode) 13888bccd85fSChristoph Lameter { 13898bccd85fSChristoph Lameter unsigned long k; 139056521e7aSYisheng Xie unsigned long t; 13918bccd85fSChristoph Lameter unsigned long nlongs; 13928bccd85fSChristoph Lameter unsigned long endmask; 13938bccd85fSChristoph Lameter 13948bccd85fSChristoph Lameter --maxnode; 13958bccd85fSChristoph Lameter nodes_clear(*nodes); 13968bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13978bccd85fSChristoph Lameter return 0; 1398a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1399636f13c1SChris Wright return -EINVAL; 14008bccd85fSChristoph Lameter 14018bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 14028bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 14038bccd85fSChristoph Lameter endmask = ~0UL; 14048bccd85fSChristoph Lameter else 14058bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 14068bccd85fSChristoph Lameter 140756521e7aSYisheng Xie /* 140856521e7aSYisheng Xie * When the user specified more nodes than supported just check 140956521e7aSYisheng Xie * if the non supported part is all zero. 141056521e7aSYisheng Xie * 141156521e7aSYisheng Xie * If maxnode have more longs than MAX_NUMNODES, check 141256521e7aSYisheng Xie * the bits in that area first. And then go through to 141356521e7aSYisheng Xie * check the rest bits which equal or bigger than MAX_NUMNODES. 141456521e7aSYisheng Xie * Otherwise, just check bits [MAX_NUMNODES, maxnode). 141556521e7aSYisheng Xie */ 14168bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 14178bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 14188bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 14198bccd85fSChristoph Lameter return -EFAULT; 14208bccd85fSChristoph Lameter if (k == nlongs - 1) { 14218bccd85fSChristoph Lameter if (t & endmask) 14228bccd85fSChristoph Lameter return -EINVAL; 14238bccd85fSChristoph Lameter } else if (t) 14248bccd85fSChristoph Lameter return -EINVAL; 14258bccd85fSChristoph Lameter } 14268bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 14278bccd85fSChristoph Lameter endmask = ~0UL; 14288bccd85fSChristoph Lameter } 14298bccd85fSChristoph Lameter 143056521e7aSYisheng Xie if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { 143156521e7aSYisheng Xie unsigned long valid_mask = endmask; 143256521e7aSYisheng Xie 143356521e7aSYisheng Xie valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 143456521e7aSYisheng Xie if (get_user(t, nmask + nlongs - 1)) 143556521e7aSYisheng Xie return -EFAULT; 143656521e7aSYisheng Xie if (t & valid_mask) 143756521e7aSYisheng Xie return -EINVAL; 143856521e7aSYisheng Xie } 143956521e7aSYisheng Xie 14408bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 14418bccd85fSChristoph Lameter return -EFAULT; 14428bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 14438bccd85fSChristoph Lameter return 0; 14448bccd85fSChristoph Lameter } 14458bccd85fSChristoph Lameter 14468bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14478bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14488bccd85fSChristoph Lameter nodemask_t *nodes) 14498bccd85fSChristoph Lameter { 14508bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1451050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 14528bccd85fSChristoph Lameter 14538bccd85fSChristoph Lameter if (copy > nbytes) { 14548bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14558bccd85fSChristoph Lameter return -EINVAL; 14568bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14578bccd85fSChristoph Lameter return -EFAULT; 14588bccd85fSChristoph Lameter copy = nbytes; 14598bccd85fSChristoph Lameter } 14608bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14618bccd85fSChristoph Lameter } 14628bccd85fSChristoph Lameter 1463e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1464e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1465e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14668bccd85fSChristoph Lameter { 14678bccd85fSChristoph Lameter nodemask_t nodes; 14688bccd85fSChristoph Lameter int err; 1469028fec41SDavid Rientjes unsigned short mode_flags; 14708bccd85fSChristoph Lameter 1471057d3389SAndrey Konovalov start = untagged_addr(start); 1472028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1473028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1474a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1475a3b51e01SDavid Rientjes return -EINVAL; 14764c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 14774c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 14784c50bc01SDavid Rientjes return -EINVAL; 14798bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14808bccd85fSChristoph Lameter if (err) 14818bccd85fSChristoph Lameter return err; 1482028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 14838bccd85fSChristoph Lameter } 14848bccd85fSChristoph Lameter 1485e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1486e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1487e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1488e7dc9ad6SDominik Brodowski { 1489e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1490e7dc9ad6SDominik Brodowski } 1491e7dc9ad6SDominik Brodowski 14928bccd85fSChristoph Lameter /* Set the process memory policy */ 1493af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1494af03c4acSDominik Brodowski unsigned long maxnode) 14958bccd85fSChristoph Lameter { 14968bccd85fSChristoph Lameter int err; 14978bccd85fSChristoph Lameter nodemask_t nodes; 1498028fec41SDavid Rientjes unsigned short flags; 14998bccd85fSChristoph Lameter 1500028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1501028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1502028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 15038bccd85fSChristoph Lameter return -EINVAL; 15044c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 15054c50bc01SDavid Rientjes return -EINVAL; 15068bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 15078bccd85fSChristoph Lameter if (err) 15088bccd85fSChristoph Lameter return err; 1509028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 15108bccd85fSChristoph Lameter } 15118bccd85fSChristoph Lameter 1512af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1513af03c4acSDominik Brodowski unsigned long, maxnode) 1514af03c4acSDominik Brodowski { 1515af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1516af03c4acSDominik Brodowski } 1517af03c4acSDominik Brodowski 1518b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1519b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1520b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 152139743889SChristoph Lameter { 1522596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 152339743889SChristoph Lameter struct task_struct *task; 152439743889SChristoph Lameter nodemask_t task_nodes; 152539743889SChristoph Lameter int err; 1526596d7cfaSKOSAKI Motohiro nodemask_t *old; 1527596d7cfaSKOSAKI Motohiro nodemask_t *new; 1528596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 152939743889SChristoph Lameter 1530596d7cfaSKOSAKI Motohiro if (!scratch) 1531596d7cfaSKOSAKI Motohiro return -ENOMEM; 153239743889SChristoph Lameter 1533596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1534596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1535596d7cfaSKOSAKI Motohiro 1536596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 153739743889SChristoph Lameter if (err) 1538596d7cfaSKOSAKI Motohiro goto out; 1539596d7cfaSKOSAKI Motohiro 1540596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1541596d7cfaSKOSAKI Motohiro if (err) 1542596d7cfaSKOSAKI Motohiro goto out; 154339743889SChristoph Lameter 154439743889SChristoph Lameter /* Find the mm_struct */ 154555cfaa3cSZeng Zhaoming rcu_read_lock(); 1546228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 154739743889SChristoph Lameter if (!task) { 154855cfaa3cSZeng Zhaoming rcu_read_unlock(); 1549596d7cfaSKOSAKI Motohiro err = -ESRCH; 1550596d7cfaSKOSAKI Motohiro goto out; 155139743889SChristoph Lameter } 15523268c63eSChristoph Lameter get_task_struct(task); 155339743889SChristoph Lameter 1554596d7cfaSKOSAKI Motohiro err = -EINVAL; 155539743889SChristoph Lameter 155639743889SChristoph Lameter /* 155731367466SOtto Ebeling * Check if this process has the right to modify the specified process. 155831367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 155939743889SChristoph Lameter */ 156031367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1561c69e8d9cSDavid Howells rcu_read_unlock(); 156239743889SChristoph Lameter err = -EPERM; 15633268c63eSChristoph Lameter goto out_put; 156439743889SChristoph Lameter } 1565c69e8d9cSDavid Howells rcu_read_unlock(); 156639743889SChristoph Lameter 156739743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 156839743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1569596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 157039743889SChristoph Lameter err = -EPERM; 15713268c63eSChristoph Lameter goto out_put; 157239743889SChristoph Lameter } 157339743889SChristoph Lameter 15740486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 15750486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 15760486a38bSYisheng Xie if (nodes_empty(*new)) 15773268c63eSChristoph Lameter goto out_put; 15780486a38bSYisheng Xie 157986c3a764SDavid Quigley err = security_task_movememory(task); 158086c3a764SDavid Quigley if (err) 15813268c63eSChristoph Lameter goto out_put; 158286c3a764SDavid Quigley 15833268c63eSChristoph Lameter mm = get_task_mm(task); 15843268c63eSChristoph Lameter put_task_struct(task); 1585f2a9ef88SSasha Levin 1586f2a9ef88SSasha Levin if (!mm) { 1587f2a9ef88SSasha Levin err = -EINVAL; 1588f2a9ef88SSasha Levin goto out; 1589f2a9ef88SSasha Levin } 1590f2a9ef88SSasha Levin 1591596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 159274c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 15933268c63eSChristoph Lameter 159439743889SChristoph Lameter mmput(mm); 15953268c63eSChristoph Lameter out: 1596596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1597596d7cfaSKOSAKI Motohiro 159839743889SChristoph Lameter return err; 15993268c63eSChristoph Lameter 16003268c63eSChristoph Lameter out_put: 16013268c63eSChristoph Lameter put_task_struct(task); 16023268c63eSChristoph Lameter goto out; 16033268c63eSChristoph Lameter 160439743889SChristoph Lameter } 160539743889SChristoph Lameter 1606b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1607b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1608b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1609b6e9b0baSDominik Brodowski { 1610b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1611b6e9b0baSDominik Brodowski } 1612b6e9b0baSDominik Brodowski 161339743889SChristoph Lameter 16148bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1615af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1616af03c4acSDominik Brodowski unsigned long __user *nmask, 1617af03c4acSDominik Brodowski unsigned long maxnode, 1618af03c4acSDominik Brodowski unsigned long addr, 1619af03c4acSDominik Brodowski unsigned long flags) 16208bccd85fSChristoph Lameter { 1621dbcb0f19SAdrian Bunk int err; 16223f649ab7SKees Cook int pval; 16238bccd85fSChristoph Lameter nodemask_t nodes; 16248bccd85fSChristoph Lameter 1625050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 16268bccd85fSChristoph Lameter return -EINVAL; 16278bccd85fSChristoph Lameter 16284605f057SWenchao Hao addr = untagged_addr(addr); 16294605f057SWenchao Hao 16308bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 16318bccd85fSChristoph Lameter 16328bccd85fSChristoph Lameter if (err) 16338bccd85fSChristoph Lameter return err; 16348bccd85fSChristoph Lameter 16358bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 16368bccd85fSChristoph Lameter return -EFAULT; 16378bccd85fSChristoph Lameter 16388bccd85fSChristoph Lameter if (nmask) 16398bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 16408bccd85fSChristoph Lameter 16418bccd85fSChristoph Lameter return err; 16428bccd85fSChristoph Lameter } 16438bccd85fSChristoph Lameter 1644af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1645af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1646af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1647af03c4acSDominik Brodowski { 1648af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1649af03c4acSDominik Brodowski } 1650af03c4acSDominik Brodowski 16511da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 16521da177e4SLinus Torvalds 1653c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1654c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1655c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1656c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 16571da177e4SLinus Torvalds { 16581da177e4SLinus Torvalds long err; 16591da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16601da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16611da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16621da177e4SLinus Torvalds 1663050c17f2SRalph Campbell nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); 16641da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16651da177e4SLinus Torvalds 16661da177e4SLinus Torvalds if (nmask) 16671da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 16681da177e4SLinus Torvalds 1669af03c4acSDominik Brodowski err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 16701da177e4SLinus Torvalds 16711da177e4SLinus Torvalds if (!err && nmask) { 16722bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 16732bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 16742bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 16751da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 16761da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 16771da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 16781da177e4SLinus Torvalds } 16791da177e4SLinus Torvalds 16801da177e4SLinus Torvalds return err; 16811da177e4SLinus Torvalds } 16821da177e4SLinus Torvalds 1683c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1684c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 16851da177e4SLinus Torvalds { 16861da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16871da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16881da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16891da177e4SLinus Torvalds 16901da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16911da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16921da177e4SLinus Torvalds 16931da177e4SLinus Torvalds if (nmask) { 1694cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 16951da177e4SLinus Torvalds return -EFAULT; 1696cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1697cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1698cf01fb99SChris Salls return -EFAULT; 1699cf01fb99SChris Salls } 17001da177e4SLinus Torvalds 1701af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nm, nr_bits+1); 17021da177e4SLinus Torvalds } 17031da177e4SLinus Torvalds 1704c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1705c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1706c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 17071da177e4SLinus Torvalds { 17081da177e4SLinus Torvalds unsigned long __user *nm = NULL; 17091da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1710dfcd3c0dSAndi Kleen nodemask_t bm; 17111da177e4SLinus Torvalds 17121da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 17131da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 17141da177e4SLinus Torvalds 17151da177e4SLinus Torvalds if (nmask) { 1716cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 17171da177e4SLinus Torvalds return -EFAULT; 1718cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1719cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1720cf01fb99SChris Salls return -EFAULT; 1721cf01fb99SChris Salls } 17221da177e4SLinus Torvalds 1723e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); 17241da177e4SLinus Torvalds } 17251da177e4SLinus Torvalds 1726b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, 1727b6e9b0baSDominik Brodowski compat_ulong_t, maxnode, 1728b6e9b0baSDominik Brodowski const compat_ulong_t __user *, old_nodes, 1729b6e9b0baSDominik Brodowski const compat_ulong_t __user *, new_nodes) 1730b6e9b0baSDominik Brodowski { 1731b6e9b0baSDominik Brodowski unsigned long __user *old = NULL; 1732b6e9b0baSDominik Brodowski unsigned long __user *new = NULL; 1733b6e9b0baSDominik Brodowski nodemask_t tmp_mask; 1734b6e9b0baSDominik Brodowski unsigned long nr_bits; 1735b6e9b0baSDominik Brodowski unsigned long size; 1736b6e9b0baSDominik Brodowski 1737b6e9b0baSDominik Brodowski nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1738b6e9b0baSDominik Brodowski size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1739b6e9b0baSDominik Brodowski if (old_nodes) { 1740b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1741b6e9b0baSDominik Brodowski return -EFAULT; 1742b6e9b0baSDominik Brodowski old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1743b6e9b0baSDominik Brodowski if (new_nodes) 1744b6e9b0baSDominik Brodowski new = old + size / sizeof(unsigned long); 1745b6e9b0baSDominik Brodowski if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1746b6e9b0baSDominik Brodowski return -EFAULT; 1747b6e9b0baSDominik Brodowski } 1748b6e9b0baSDominik Brodowski if (new_nodes) { 1749b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1750b6e9b0baSDominik Brodowski return -EFAULT; 1751b6e9b0baSDominik Brodowski if (new == NULL) 1752b6e9b0baSDominik Brodowski new = compat_alloc_user_space(size); 1753b6e9b0baSDominik Brodowski if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1754b6e9b0baSDominik Brodowski return -EFAULT; 1755b6e9b0baSDominik Brodowski } 1756b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, nr_bits + 1, old, new); 1757b6e9b0baSDominik Brodowski } 1758b6e9b0baSDominik Brodowski 1759b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */ 17601da177e4SLinus Torvalds 176120ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 176220ca87f2SLi Xinhai { 176320ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 176420ca87f2SLi Xinhai return false; 176520ca87f2SLi Xinhai 176620ca87f2SLi Xinhai /* 176720ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 176820ca87f2SLi Xinhai * incurring periodic faults. 176920ca87f2SLi Xinhai */ 177020ca87f2SLi Xinhai if (vma_is_dax(vma)) 177120ca87f2SLi Xinhai return false; 177220ca87f2SLi Xinhai 177320ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 177420ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 177520ca87f2SLi Xinhai return false; 177620ca87f2SLi Xinhai 177720ca87f2SLi Xinhai /* 177820ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 177920ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 178020ca87f2SLi Xinhai * possible. 178120ca87f2SLi Xinhai */ 178220ca87f2SLi Xinhai if (vma->vm_file && 178320ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 178420ca87f2SLi Xinhai < policy_zone) 178520ca87f2SLi Xinhai return false; 178620ca87f2SLi Xinhai return true; 178720ca87f2SLi Xinhai } 178820ca87f2SLi Xinhai 178974d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 179074d2c3a0SOleg Nesterov unsigned long addr) 17911da177e4SLinus Torvalds { 17928d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17931da177e4SLinus Torvalds 17941da177e4SLinus Torvalds if (vma) { 1795480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17968d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 179700442ad0SMel Gorman } else if (vma->vm_policy) { 17981da177e4SLinus Torvalds pol = vma->vm_policy; 179900442ad0SMel Gorman 180000442ad0SMel Gorman /* 180100442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 180200442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 180300442ad0SMel Gorman * count on these policies which will be dropped by 180400442ad0SMel Gorman * mpol_cond_put() later 180500442ad0SMel Gorman */ 180600442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 180700442ad0SMel Gorman mpol_get(pol); 180800442ad0SMel Gorman } 18091da177e4SLinus Torvalds } 1810f15ca78eSOleg Nesterov 181174d2c3a0SOleg Nesterov return pol; 181274d2c3a0SOleg Nesterov } 181374d2c3a0SOleg Nesterov 181474d2c3a0SOleg Nesterov /* 1815dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 181674d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 181774d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 181874d2c3a0SOleg Nesterov * 181974d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1820dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 182174d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 182274d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 182374d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 182474d2c3a0SOleg Nesterov * extra reference for shared policies. 182574d2c3a0SOleg Nesterov */ 1826ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1827dd6eecb9SOleg Nesterov unsigned long addr) 182874d2c3a0SOleg Nesterov { 182974d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 183074d2c3a0SOleg Nesterov 18318d90274bSOleg Nesterov if (!pol) 1832dd6eecb9SOleg Nesterov pol = get_task_policy(current); 18338d90274bSOleg Nesterov 18341da177e4SLinus Torvalds return pol; 18351da177e4SLinus Torvalds } 18361da177e4SLinus Torvalds 18376b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1838fc314724SMel Gorman { 18396b6482bbSOleg Nesterov struct mempolicy *pol; 1840f15ca78eSOleg Nesterov 1841fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1842fc314724SMel Gorman bool ret = false; 1843fc314724SMel Gorman 1844fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1845fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1846fc314724SMel Gorman ret = true; 1847fc314724SMel Gorman mpol_cond_put(pol); 1848fc314724SMel Gorman 1849fc314724SMel Gorman return ret; 18508d90274bSOleg Nesterov } 18518d90274bSOleg Nesterov 1852fc314724SMel Gorman pol = vma->vm_policy; 18538d90274bSOleg Nesterov if (!pol) 18546b6482bbSOleg Nesterov pol = get_task_policy(current); 1855fc314724SMel Gorman 1856fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1857fc314724SMel Gorman } 1858fc314724SMel Gorman 1859d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1860d3eb1570SLai Jiangshan { 1861d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1862d3eb1570SLai Jiangshan 1863d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1864d3eb1570SLai Jiangshan 1865d3eb1570SLai Jiangshan /* 1866d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1867d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1868d3eb1570SLai Jiangshan * 1869d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1870f0953a1bSIngo Molnar * so if the following test fails, it implies 1871d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1872d3eb1570SLai Jiangshan */ 1873d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1874d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1875d3eb1570SLai Jiangshan 1876d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1877d3eb1570SLai Jiangshan } 1878d3eb1570SLai Jiangshan 187952cd3b07SLee Schermerhorn /* 188052cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 188152cd3b07SLee Schermerhorn * page allocation 188252cd3b07SLee Schermerhorn */ 18838ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 188419770b32SMel Gorman { 188519770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 188645c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1887d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 188819770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 188919770b32SMel Gorman return &policy->v.nodes; 189019770b32SMel Gorman 189119770b32SMel Gorman return NULL; 189219770b32SMel Gorman } 189319770b32SMel Gorman 189404ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */ 1895f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 18961da177e4SLinus Torvalds { 18976d840958SMichal Hocko if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) 18981da177e4SLinus Torvalds nd = policy->v.preferred_node; 18996d840958SMichal Hocko else { 190019770b32SMel Gorman /* 19016d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 19026d840958SMichal Hocko * because we might easily break the expectation to stay on the 19036d840958SMichal Hocko * requested node and not break the policy. 190419770b32SMel Gorman */ 19056d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 19061da177e4SLinus Torvalds } 19076d840958SMichal Hocko 190804ec6264SVlastimil Babka return nd; 19091da177e4SLinus Torvalds } 19101da177e4SLinus Torvalds 19111da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 19121da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 19131da177e4SLinus Torvalds { 191445816682SVlastimil Babka unsigned next; 19151da177e4SLinus Torvalds struct task_struct *me = current; 19161da177e4SLinus Torvalds 191745816682SVlastimil Babka next = next_node_in(me->il_prev, policy->v.nodes); 1918f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 191945816682SVlastimil Babka me->il_prev = next; 192045816682SVlastimil Babka return next; 19211da177e4SLinus Torvalds } 19221da177e4SLinus Torvalds 1923dc85da15SChristoph Lameter /* 1924dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1925dc85da15SChristoph Lameter * next slab entry. 1926dc85da15SChristoph Lameter */ 19272a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1928dc85da15SChristoph Lameter { 1929e7b691b0SAndi Kleen struct mempolicy *policy; 19302a389610SDavid Rientjes int node = numa_mem_id(); 1931e7b691b0SAndi Kleen 1932e7b691b0SAndi Kleen if (in_interrupt()) 19332a389610SDavid Rientjes return node; 1934e7b691b0SAndi Kleen 1935e7b691b0SAndi Kleen policy = current->mempolicy; 1936fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 19372a389610SDavid Rientjes return node; 1938765c4507SChristoph Lameter 1939bea904d5SLee Schermerhorn switch (policy->mode) { 1940bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1941fc36b8d3SLee Schermerhorn /* 1942fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1943fc36b8d3SLee Schermerhorn */ 1944bea904d5SLee Schermerhorn return policy->v.preferred_node; 1945bea904d5SLee Schermerhorn 1946dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1947dc85da15SChristoph Lameter return interleave_nodes(policy); 1948dc85da15SChristoph Lameter 1949dd1a239fSMel Gorman case MPOL_BIND: { 1950c33d6c06SMel Gorman struct zoneref *z; 1951c33d6c06SMel Gorman 1952dc85da15SChristoph Lameter /* 1953dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1954dc85da15SChristoph Lameter * first node. 1955dc85da15SChristoph Lameter */ 195619770b32SMel Gorman struct zonelist *zonelist; 195719770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1958c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1959c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1960c33d6c06SMel Gorman &policy->v.nodes); 1961c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1962dd1a239fSMel Gorman } 1963dc85da15SChristoph Lameter 1964dc85da15SChristoph Lameter default: 1965bea904d5SLee Schermerhorn BUG(); 1966dc85da15SChristoph Lameter } 1967dc85da15SChristoph Lameter } 1968dc85da15SChristoph Lameter 1969fee83b3aSAndrew Morton /* 1970fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1971fee83b3aSAndrew Morton * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1972fee83b3aSAndrew Morton * number of present nodes. 1973fee83b3aSAndrew Morton */ 197498c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19751da177e4SLinus Torvalds { 1976dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1977f5b087b5SDavid Rientjes unsigned target; 1978fee83b3aSAndrew Morton int i; 1979fee83b3aSAndrew Morton int nid; 19801da177e4SLinus Torvalds 1981f5b087b5SDavid Rientjes if (!nnodes) 1982f5b087b5SDavid Rientjes return numa_node_id(); 1983fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1984fee83b3aSAndrew Morton nid = first_node(pol->v.nodes); 1985fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1986dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 19871da177e4SLinus Torvalds return nid; 19881da177e4SLinus Torvalds } 19891da177e4SLinus Torvalds 19905da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19915da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19925da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19935da7ca86SChristoph Lameter { 19945da7ca86SChristoph Lameter if (vma) { 19955da7ca86SChristoph Lameter unsigned long off; 19965da7ca86SChristoph Lameter 19973b98b087SNishanth Aravamudan /* 19983b98b087SNishanth Aravamudan * for small pages, there is no difference between 19993b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 20003b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 20013b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 20023b98b087SNishanth Aravamudan * a useful offset. 20033b98b087SNishanth Aravamudan */ 20043b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 20053b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 20065da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 200798c70baaSLaurent Dufour return offset_il_node(pol, off); 20085da7ca86SChristoph Lameter } else 20095da7ca86SChristoph Lameter return interleave_nodes(pol); 20105da7ca86SChristoph Lameter } 20115da7ca86SChristoph Lameter 201200ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 2013480eccf9SLee Schermerhorn /* 201404ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 2015b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 2016b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 2017b46e14acSFabian Frederick * @gfp_flags: for requested zone 2018b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2019b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 2020480eccf9SLee Schermerhorn * 202104ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 202252cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 202352cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 202452cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 2025c0ff7453SMiao Xie * 2026d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2027480eccf9SLee Schermerhorn */ 202804ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 202904ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20305da7ca86SChristoph Lameter { 203104ec6264SVlastimil Babka int nid; 20325da7ca86SChristoph Lameter 2033dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 203419770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 20355da7ca86SChristoph Lameter 203652cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 203704ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 203804ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 203952cd3b07SLee Schermerhorn } else { 204004ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 204152cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 204252cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 2043480eccf9SLee Schermerhorn } 204404ec6264SVlastimil Babka return nid; 20455da7ca86SChristoph Lameter } 204606808b08SLee Schermerhorn 204706808b08SLee Schermerhorn /* 204806808b08SLee Schermerhorn * init_nodemask_of_mempolicy 204906808b08SLee Schermerhorn * 205006808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 205106808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 205206808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 205306808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 205406808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 205506808b08SLee Schermerhorn * of non-default mempolicy. 205606808b08SLee Schermerhorn * 205706808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 205806808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 205906808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 206006808b08SLee Schermerhorn * 206106808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 206206808b08SLee Schermerhorn */ 206306808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 206406808b08SLee Schermerhorn { 206506808b08SLee Schermerhorn struct mempolicy *mempolicy; 206606808b08SLee Schermerhorn int nid; 206706808b08SLee Schermerhorn 206806808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 206906808b08SLee Schermerhorn return false; 207006808b08SLee Schermerhorn 2071c0ff7453SMiao Xie task_lock(current); 207206808b08SLee Schermerhorn mempolicy = current->mempolicy; 207306808b08SLee Schermerhorn switch (mempolicy->mode) { 207406808b08SLee Schermerhorn case MPOL_PREFERRED: 207506808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 207606808b08SLee Schermerhorn nid = numa_node_id(); 207706808b08SLee Schermerhorn else 207806808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 207906808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 208006808b08SLee Schermerhorn break; 208106808b08SLee Schermerhorn 208206808b08SLee Schermerhorn case MPOL_BIND: 208306808b08SLee Schermerhorn case MPOL_INTERLEAVE: 208406808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 208506808b08SLee Schermerhorn break; 208606808b08SLee Schermerhorn 208706808b08SLee Schermerhorn default: 208806808b08SLee Schermerhorn BUG(); 208906808b08SLee Schermerhorn } 2090c0ff7453SMiao Xie task_unlock(current); 209106808b08SLee Schermerhorn 209206808b08SLee Schermerhorn return true; 209306808b08SLee Schermerhorn } 209400ac59adSChen, Kenneth W #endif 20955da7ca86SChristoph Lameter 20966f48d0ebSDavid Rientjes /* 20976f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 20986f48d0ebSDavid Rientjes * 20996f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 21006f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 2101f0953a1bSIngo Molnar * nodemask for 'bind' or 'interleave' policy. For 'preferred' or 'local' 21026f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 21036f48d0ebSDavid Rientjes * 21046f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 21056f48d0ebSDavid Rientjes */ 21066f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 21076f48d0ebSDavid Rientjes const nodemask_t *mask) 21086f48d0ebSDavid Rientjes { 21096f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 21106f48d0ebSDavid Rientjes bool ret = true; 21116f48d0ebSDavid Rientjes 21126f48d0ebSDavid Rientjes if (!mask) 21136f48d0ebSDavid Rientjes return ret; 21146f48d0ebSDavid Rientjes task_lock(tsk); 21156f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 21166f48d0ebSDavid Rientjes if (!mempolicy) 21176f48d0ebSDavid Rientjes goto out; 21186f48d0ebSDavid Rientjes 21196f48d0ebSDavid Rientjes switch (mempolicy->mode) { 21206f48d0ebSDavid Rientjes case MPOL_PREFERRED: 21216f48d0ebSDavid Rientjes /* 21226f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 21236f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 21246f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 21256f48d0ebSDavid Rientjes * nodes in mask. 21266f48d0ebSDavid Rientjes */ 21276f48d0ebSDavid Rientjes break; 21286f48d0ebSDavid Rientjes case MPOL_BIND: 21296f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 21306f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 21316f48d0ebSDavid Rientjes break; 21326f48d0ebSDavid Rientjes default: 21336f48d0ebSDavid Rientjes BUG(); 21346f48d0ebSDavid Rientjes } 21356f48d0ebSDavid Rientjes out: 21366f48d0ebSDavid Rientjes task_unlock(tsk); 21376f48d0ebSDavid Rientjes return ret; 21386f48d0ebSDavid Rientjes } 21396f48d0ebSDavid Rientjes 21401da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 21411da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2142662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2143662f3a0bSAndi Kleen unsigned nid) 21441da177e4SLinus Torvalds { 21451da177e4SLinus Torvalds struct page *page; 21461da177e4SLinus Torvalds 214784172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, nid, NULL); 21484518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21494518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21504518085eSKemi Wang return page; 2151de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2152de55c8b2SAndrey Ryabinin preempt_disable(); 2153*f19298b9SMel Gorman __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2154de55c8b2SAndrey Ryabinin preempt_enable(); 2155de55c8b2SAndrey Ryabinin } 21561da177e4SLinus Torvalds return page; 21571da177e4SLinus Torvalds } 21581da177e4SLinus Torvalds 21591da177e4SLinus Torvalds /** 21600bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 2161eb350739SMatthew Wilcox (Oracle) * @gfp: GFP flags. 21620bbbc0b3SAndrea Arcangeli * @order: Order of the GFP allocation. 21631da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 2164eb350739SMatthew Wilcox (Oracle) * @addr: Virtual address of the allocation. Must be inside @vma. 2165be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 2166eb350739SMatthew Wilcox (Oracle) * @hugepage: For hugepages try only the preferred node if possible. 21671da177e4SLinus Torvalds * 2168eb350739SMatthew Wilcox (Oracle) * Allocate a page for a specific address in @vma, using the appropriate 2169eb350739SMatthew Wilcox (Oracle) * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2170eb350739SMatthew Wilcox (Oracle) * of the mm_struct of the VMA to prevent it from going away. Should be 2171eb350739SMatthew Wilcox (Oracle) * used for all allocations for pages that will be mapped into user space. 2172eb350739SMatthew Wilcox (Oracle) * 2173eb350739SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 21741da177e4SLinus Torvalds */ 2175eb350739SMatthew Wilcox (Oracle) struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 217619deb769SDavid Rientjes unsigned long addr, int node, bool hugepage) 21771da177e4SLinus Torvalds { 2178cc9a6c87SMel Gorman struct mempolicy *pol; 2179c0ff7453SMiao Xie struct page *page; 218004ec6264SVlastimil Babka int preferred_nid; 2181be97a41bSVlastimil Babka nodemask_t *nmask; 21821da177e4SLinus Torvalds 2183dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2184cc9a6c87SMel Gorman 2185be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 21861da177e4SLinus Torvalds unsigned nid; 21875da7ca86SChristoph Lameter 21888eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 218952cd3b07SLee Schermerhorn mpol_cond_put(pol); 21900bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2191be97a41bSVlastimil Babka goto out; 21921da177e4SLinus Torvalds } 21931da177e4SLinus Torvalds 219419deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 219519deb769SDavid Rientjes int hpage_node = node; 219619deb769SDavid Rientjes 219719deb769SDavid Rientjes /* 219819deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 219919deb769SDavid Rientjes * allows the current node (or other explicitly preferred 220019deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 220119deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 220219deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 220319deb769SDavid Rientjes * 220419deb769SDavid Rientjes * If the policy is interleave, or does not allow the current 220519deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 220619deb769SDavid Rientjes */ 220719deb769SDavid Rientjes if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) 220819deb769SDavid Rientjes hpage_node = pol->v.preferred_node; 220919deb769SDavid Rientjes 221019deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 221119deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 221219deb769SDavid Rientjes mpol_cond_put(pol); 2213cc638f32SVlastimil Babka /* 2214cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2215cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2216cc638f32SVlastimil Babka */ 221719deb769SDavid Rientjes page = __alloc_pages_node(hpage_node, 2218cc638f32SVlastimil Babka gfp | __GFP_THISNODE | __GFP_NORETRY, order); 221976e654ccSDavid Rientjes 222076e654ccSDavid Rientjes /* 222176e654ccSDavid Rientjes * If hugepage allocations are configured to always 222276e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 222376e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2224cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 222576e654ccSDavid Rientjes */ 222676e654ccSDavid Rientjes if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 222776e654ccSDavid Rientjes page = __alloc_pages_node(hpage_node, 2228cc638f32SVlastimil Babka gfp, order); 222976e654ccSDavid Rientjes 223019deb769SDavid Rientjes goto out; 223119deb769SDavid Rientjes } 223219deb769SDavid Rientjes } 223319deb769SDavid Rientjes 2234077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 223504ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 223684172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, preferred_nid, nmask); 2237d51e9894SVlastimil Babka mpol_cond_put(pol); 2238be97a41bSVlastimil Babka out: 2239077fcf11SAneesh Kumar K.V return page; 2240077fcf11SAneesh Kumar K.V } 224169262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma); 2242077fcf11SAneesh Kumar K.V 22431da177e4SLinus Torvalds /** 2244d7f946d0SMatthew Wilcox (Oracle) * alloc_pages - Allocate pages. 22456421ec76SMatthew Wilcox (Oracle) * @gfp: GFP flags. 22466421ec76SMatthew Wilcox (Oracle) * @order: Power of two of number of pages to allocate. 22471da177e4SLinus Torvalds * 22486421ec76SMatthew Wilcox (Oracle) * Allocate 1 << @order contiguous pages. The physical address of the 22496421ec76SMatthew Wilcox (Oracle) * first page is naturally aligned (eg an order-3 allocation will be aligned 22506421ec76SMatthew Wilcox (Oracle) * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 22516421ec76SMatthew Wilcox (Oracle) * process is honoured when in process context. 22521da177e4SLinus Torvalds * 22536421ec76SMatthew Wilcox (Oracle) * Context: Can be called from any context, providing the appropriate GFP 22546421ec76SMatthew Wilcox (Oracle) * flags are used. 22556421ec76SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 22561da177e4SLinus Torvalds */ 2257d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order) 22581da177e4SLinus Torvalds { 22598d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2260c0ff7453SMiao Xie struct page *page; 22611da177e4SLinus Torvalds 22628d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22638d90274bSOleg Nesterov pol = get_task_policy(current); 226452cd3b07SLee Schermerhorn 226552cd3b07SLee Schermerhorn /* 226652cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 226752cd3b07SLee Schermerhorn * nor system default_policy 226852cd3b07SLee Schermerhorn */ 226945c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2270c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2271c0ff7453SMiao Xie else 227284172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, 227304ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 22745c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2275cc9a6c87SMel Gorman 2276c0ff7453SMiao Xie return page; 22771da177e4SLinus Torvalds } 2278d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages); 22791da177e4SLinus Torvalds 2280ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2281ef0855d3SOleg Nesterov { 2282ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2283ef0855d3SOleg Nesterov 2284ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2285ef0855d3SOleg Nesterov return PTR_ERR(pol); 2286ef0855d3SOleg Nesterov dst->vm_policy = pol; 2287ef0855d3SOleg Nesterov return 0; 2288ef0855d3SOleg Nesterov } 2289ef0855d3SOleg Nesterov 22904225399aSPaul Jackson /* 2291846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 22924225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 22934225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 22944225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 22954225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2296708c1bbcSMiao Xie * 2297708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2298708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 22994225399aSPaul Jackson */ 23004225399aSPaul Jackson 2301846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2302846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 23031da177e4SLinus Torvalds { 23041da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 23051da177e4SLinus Torvalds 23061da177e4SLinus Torvalds if (!new) 23071da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2308708c1bbcSMiao Xie 2309708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2310708c1bbcSMiao Xie if (old == current->mempolicy) { 2311708c1bbcSMiao Xie task_lock(current); 2312708c1bbcSMiao Xie *new = *old; 2313708c1bbcSMiao Xie task_unlock(current); 2314708c1bbcSMiao Xie } else 2315708c1bbcSMiao Xie *new = *old; 2316708c1bbcSMiao Xie 23174225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 23184225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2319213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 23204225399aSPaul Jackson } 23211da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 23221da177e4SLinus Torvalds return new; 23231da177e4SLinus Torvalds } 23241da177e4SLinus Torvalds 23251da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2326fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 23271da177e4SLinus Torvalds { 23281da177e4SLinus Torvalds if (!a || !b) 2329fcfb4dccSKOSAKI Motohiro return false; 233045c4745aSLee Schermerhorn if (a->mode != b->mode) 2331fcfb4dccSKOSAKI Motohiro return false; 233219800502SBob Liu if (a->flags != b->flags) 2333fcfb4dccSKOSAKI Motohiro return false; 233419800502SBob Liu if (mpol_store_user_nodemask(a)) 233519800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2336fcfb4dccSKOSAKI Motohiro return false; 233719800502SBob Liu 233845c4745aSLee Schermerhorn switch (a->mode) { 233919770b32SMel Gorman case MPOL_BIND: 23401da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2341fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 23421da177e4SLinus Torvalds case MPOL_PREFERRED: 23438970a63eSYisheng Xie /* a's ->flags is the same as b's */ 23448970a63eSYisheng Xie if (a->flags & MPOL_F_LOCAL) 23458970a63eSYisheng Xie return true; 234675719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 23471da177e4SLinus Torvalds default: 23481da177e4SLinus Torvalds BUG(); 2349fcfb4dccSKOSAKI Motohiro return false; 23501da177e4SLinus Torvalds } 23511da177e4SLinus Torvalds } 23521da177e4SLinus Torvalds 23531da177e4SLinus Torvalds /* 23541da177e4SLinus Torvalds * Shared memory backing store policy support. 23551da177e4SLinus Torvalds * 23561da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 23571da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 23584a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 23591da177e4SLinus Torvalds * for any accesses to the tree. 23601da177e4SLinus Torvalds */ 23611da177e4SLinus Torvalds 23624a8c7bb5SNathan Zimmer /* 23634a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 23644a8c7bb5SNathan Zimmer * reading or for writing 23654a8c7bb5SNathan Zimmer */ 23661da177e4SLinus Torvalds static struct sp_node * 23671da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 23681da177e4SLinus Torvalds { 23691da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 23701da177e4SLinus Torvalds 23711da177e4SLinus Torvalds while (n) { 23721da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 23731da177e4SLinus Torvalds 23741da177e4SLinus Torvalds if (start >= p->end) 23751da177e4SLinus Torvalds n = n->rb_right; 23761da177e4SLinus Torvalds else if (end <= p->start) 23771da177e4SLinus Torvalds n = n->rb_left; 23781da177e4SLinus Torvalds else 23791da177e4SLinus Torvalds break; 23801da177e4SLinus Torvalds } 23811da177e4SLinus Torvalds if (!n) 23821da177e4SLinus Torvalds return NULL; 23831da177e4SLinus Torvalds for (;;) { 23841da177e4SLinus Torvalds struct sp_node *w = NULL; 23851da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 23861da177e4SLinus Torvalds if (!prev) 23871da177e4SLinus Torvalds break; 23881da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 23891da177e4SLinus Torvalds if (w->end <= start) 23901da177e4SLinus Torvalds break; 23911da177e4SLinus Torvalds n = prev; 23921da177e4SLinus Torvalds } 23931da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 23941da177e4SLinus Torvalds } 23951da177e4SLinus Torvalds 23964a8c7bb5SNathan Zimmer /* 23974a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 23984a8c7bb5SNathan Zimmer * writing. 23994a8c7bb5SNathan Zimmer */ 24001da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 24011da177e4SLinus Torvalds { 24021da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 24031da177e4SLinus Torvalds struct rb_node *parent = NULL; 24041da177e4SLinus Torvalds struct sp_node *nd; 24051da177e4SLinus Torvalds 24061da177e4SLinus Torvalds while (*p) { 24071da177e4SLinus Torvalds parent = *p; 24081da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 24091da177e4SLinus Torvalds if (new->start < nd->start) 24101da177e4SLinus Torvalds p = &(*p)->rb_left; 24111da177e4SLinus Torvalds else if (new->end > nd->end) 24121da177e4SLinus Torvalds p = &(*p)->rb_right; 24131da177e4SLinus Torvalds else 24141da177e4SLinus Torvalds BUG(); 24151da177e4SLinus Torvalds } 24161da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 24171da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2418140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 241945c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 24201da177e4SLinus Torvalds } 24211da177e4SLinus Torvalds 24221da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 24231da177e4SLinus Torvalds struct mempolicy * 24241da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 24251da177e4SLinus Torvalds { 24261da177e4SLinus Torvalds struct mempolicy *pol = NULL; 24271da177e4SLinus Torvalds struct sp_node *sn; 24281da177e4SLinus Torvalds 24291da177e4SLinus Torvalds if (!sp->root.rb_node) 24301da177e4SLinus Torvalds return NULL; 24314a8c7bb5SNathan Zimmer read_lock(&sp->lock); 24321da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 24331da177e4SLinus Torvalds if (sn) { 24341da177e4SLinus Torvalds mpol_get(sn->policy); 24351da177e4SLinus Torvalds pol = sn->policy; 24361da177e4SLinus Torvalds } 24374a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 24381da177e4SLinus Torvalds return pol; 24391da177e4SLinus Torvalds } 24401da177e4SLinus Torvalds 244163f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 244263f74ca2SKOSAKI Motohiro { 244363f74ca2SKOSAKI Motohiro mpol_put(n->policy); 244463f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 244563f74ca2SKOSAKI Motohiro } 244663f74ca2SKOSAKI Motohiro 2447771fb4d8SLee Schermerhorn /** 2448771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2449771fb4d8SLee Schermerhorn * 2450b46e14acSFabian Frederick * @page: page to be checked 2451b46e14acSFabian Frederick * @vma: vm area where page mapped 2452b46e14acSFabian Frederick * @addr: virtual address where page mapped 2453771fb4d8SLee Schermerhorn * 2454771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 24555f076944SMatthew Wilcox (Oracle) * node id. Policy determination "mimics" alloc_page_vma(). 2456771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 24575f076944SMatthew Wilcox (Oracle) * 24585f076944SMatthew Wilcox (Oracle) * Return: -1 if the page is in a node that is valid for this policy, or a 24595f076944SMatthew Wilcox (Oracle) * suitable node ID to allocate a replacement page from. 2460771fb4d8SLee Schermerhorn */ 2461771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2462771fb4d8SLee Schermerhorn { 2463771fb4d8SLee Schermerhorn struct mempolicy *pol; 2464c33d6c06SMel Gorman struct zoneref *z; 2465771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2466771fb4d8SLee Schermerhorn unsigned long pgoff; 246790572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 246890572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 246998fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2470771fb4d8SLee Schermerhorn int ret = -1; 2471771fb4d8SLee Schermerhorn 2472dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2473771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2474771fb4d8SLee Schermerhorn goto out; 2475771fb4d8SLee Schermerhorn 2476771fb4d8SLee Schermerhorn switch (pol->mode) { 2477771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2478771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2479771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 248098c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2481771fb4d8SLee Schermerhorn break; 2482771fb4d8SLee Schermerhorn 2483771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2484771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2485771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2486771fb4d8SLee Schermerhorn else 2487771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2488771fb4d8SLee Schermerhorn break; 2489771fb4d8SLee Schermerhorn 2490771fb4d8SLee Schermerhorn case MPOL_BIND: 2491bda420b9SHuang Ying /* Optimize placement among multiple nodes via NUMA balancing */ 2492bda420b9SHuang Ying if (pol->flags & MPOL_F_MORON) { 2493bda420b9SHuang Ying if (node_isset(thisnid, pol->v.nodes)) 2494bda420b9SHuang Ying break; 2495bda420b9SHuang Ying goto out; 2496bda420b9SHuang Ying } 2497c33d6c06SMel Gorman 2498771fb4d8SLee Schermerhorn /* 2499771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2500771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2501771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2502771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2503771fb4d8SLee Schermerhorn */ 2504771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2505771fb4d8SLee Schermerhorn goto out; 2506c33d6c06SMel Gorman z = first_zones_zonelist( 2507771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2508771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2509c33d6c06SMel Gorman &pol->v.nodes); 2510c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2511771fb4d8SLee Schermerhorn break; 2512771fb4d8SLee Schermerhorn 2513771fb4d8SLee Schermerhorn default: 2514771fb4d8SLee Schermerhorn BUG(); 2515771fb4d8SLee Schermerhorn } 25165606e387SMel Gorman 25175606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2518e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 251990572890SPeter Zijlstra polnid = thisnid; 25205606e387SMel Gorman 252110f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2522de1c9ce6SRik van Riel goto out; 2523de1c9ce6SRik van Riel } 2524e42c8ff2SMel Gorman 2525771fb4d8SLee Schermerhorn if (curnid != polnid) 2526771fb4d8SLee Schermerhorn ret = polnid; 2527771fb4d8SLee Schermerhorn out: 2528771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2529771fb4d8SLee Schermerhorn 2530771fb4d8SLee Schermerhorn return ret; 2531771fb4d8SLee Schermerhorn } 2532771fb4d8SLee Schermerhorn 2533c11600e4SDavid Rientjes /* 2534c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2535c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2536c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2537c11600e4SDavid Rientjes * policy. 2538c11600e4SDavid Rientjes */ 2539c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2540c11600e4SDavid Rientjes { 2541c11600e4SDavid Rientjes struct mempolicy *pol; 2542c11600e4SDavid Rientjes 2543c11600e4SDavid Rientjes task_lock(task); 2544c11600e4SDavid Rientjes pol = task->mempolicy; 2545c11600e4SDavid Rientjes task->mempolicy = NULL; 2546c11600e4SDavid Rientjes task_unlock(task); 2547c11600e4SDavid Rientjes mpol_put(pol); 2548c11600e4SDavid Rientjes } 2549c11600e4SDavid Rientjes 25501da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 25511da177e4SLinus Torvalds { 2552140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 25531da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 255463f74ca2SKOSAKI Motohiro sp_free(n); 25551da177e4SLinus Torvalds } 25561da177e4SLinus Torvalds 255742288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 255842288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 255942288fe3SMel Gorman { 256042288fe3SMel Gorman node->start = start; 256142288fe3SMel Gorman node->end = end; 256242288fe3SMel Gorman node->policy = pol; 256342288fe3SMel Gorman } 256442288fe3SMel Gorman 2565dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2566dbcb0f19SAdrian Bunk struct mempolicy *pol) 25671da177e4SLinus Torvalds { 2568869833f2SKOSAKI Motohiro struct sp_node *n; 2569869833f2SKOSAKI Motohiro struct mempolicy *newpol; 25701da177e4SLinus Torvalds 2571869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 25721da177e4SLinus Torvalds if (!n) 25731da177e4SLinus Torvalds return NULL; 2574869833f2SKOSAKI Motohiro 2575869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2576869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2577869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2578869833f2SKOSAKI Motohiro return NULL; 2579869833f2SKOSAKI Motohiro } 2580869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 258142288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2582869833f2SKOSAKI Motohiro 25831da177e4SLinus Torvalds return n; 25841da177e4SLinus Torvalds } 25851da177e4SLinus Torvalds 25861da177e4SLinus Torvalds /* Replace a policy range. */ 25871da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 25881da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 25891da177e4SLinus Torvalds { 2590b22d127aSMel Gorman struct sp_node *n; 259142288fe3SMel Gorman struct sp_node *n_new = NULL; 259242288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2593b22d127aSMel Gorman int ret = 0; 25941da177e4SLinus Torvalds 259542288fe3SMel Gorman restart: 25964a8c7bb5SNathan Zimmer write_lock(&sp->lock); 25971da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 25981da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 25991da177e4SLinus Torvalds while (n && n->start < end) { 26001da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 26011da177e4SLinus Torvalds if (n->start >= start) { 26021da177e4SLinus Torvalds if (n->end <= end) 26031da177e4SLinus Torvalds sp_delete(sp, n); 26041da177e4SLinus Torvalds else 26051da177e4SLinus Torvalds n->start = end; 26061da177e4SLinus Torvalds } else { 26071da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 26081da177e4SLinus Torvalds if (n->end > end) { 260942288fe3SMel Gorman if (!n_new) 261042288fe3SMel Gorman goto alloc_new; 261142288fe3SMel Gorman 261242288fe3SMel Gorman *mpol_new = *n->policy; 261342288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 26147880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 26151da177e4SLinus Torvalds n->end = start; 26165ca39575SHillf Danton sp_insert(sp, n_new); 261742288fe3SMel Gorman n_new = NULL; 261842288fe3SMel Gorman mpol_new = NULL; 26191da177e4SLinus Torvalds break; 26201da177e4SLinus Torvalds } else 26211da177e4SLinus Torvalds n->end = start; 26221da177e4SLinus Torvalds } 26231da177e4SLinus Torvalds if (!next) 26241da177e4SLinus Torvalds break; 26251da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 26261da177e4SLinus Torvalds } 26271da177e4SLinus Torvalds if (new) 26281da177e4SLinus Torvalds sp_insert(sp, new); 26294a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 263042288fe3SMel Gorman ret = 0; 263142288fe3SMel Gorman 263242288fe3SMel Gorman err_out: 263342288fe3SMel Gorman if (mpol_new) 263442288fe3SMel Gorman mpol_put(mpol_new); 263542288fe3SMel Gorman if (n_new) 263642288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 263742288fe3SMel Gorman 2638b22d127aSMel Gorman return ret; 263942288fe3SMel Gorman 264042288fe3SMel Gorman alloc_new: 26414a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 264242288fe3SMel Gorman ret = -ENOMEM; 264342288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 264442288fe3SMel Gorman if (!n_new) 264542288fe3SMel Gorman goto err_out; 264642288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 264742288fe3SMel Gorman if (!mpol_new) 264842288fe3SMel Gorman goto err_out; 264942288fe3SMel Gorman goto restart; 26501da177e4SLinus Torvalds } 26511da177e4SLinus Torvalds 265271fe804bSLee Schermerhorn /** 265371fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 265471fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 265571fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 265671fe804bSLee Schermerhorn * 265771fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 265871fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 265971fe804bSLee Schermerhorn * This must be released on exit. 26604bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 266171fe804bSLee Schermerhorn */ 266271fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 26637339ff83SRobin Holt { 266458568d2aSMiao Xie int ret; 266558568d2aSMiao Xie 266671fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 26674a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 26687339ff83SRobin Holt 266971fe804bSLee Schermerhorn if (mpol) { 26707339ff83SRobin Holt struct vm_area_struct pvma; 267171fe804bSLee Schermerhorn struct mempolicy *new; 26724bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 26737339ff83SRobin Holt 26744bfc4495SKAMEZAWA Hiroyuki if (!scratch) 26755c0c1654SLee Schermerhorn goto put_mpol; 267671fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 267771fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 267815d77835SLee Schermerhorn if (IS_ERR(new)) 26790cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 268058568d2aSMiao Xie 268158568d2aSMiao Xie task_lock(current); 26824bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 268358568d2aSMiao Xie task_unlock(current); 268415d77835SLee Schermerhorn if (ret) 26855c0c1654SLee Schermerhorn goto put_new; 268671fe804bSLee Schermerhorn 268771fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 26882c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 268971fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 269071fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 269115d77835SLee Schermerhorn 26925c0c1654SLee Schermerhorn put_new: 269371fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 26940cae3457SDan Carpenter free_scratch: 26954bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 26965c0c1654SLee Schermerhorn put_mpol: 26975c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 26987339ff83SRobin Holt } 26997339ff83SRobin Holt } 27007339ff83SRobin Holt 27011da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 27021da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 27031da177e4SLinus Torvalds { 27041da177e4SLinus Torvalds int err; 27051da177e4SLinus Torvalds struct sp_node *new = NULL; 27061da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 27071da177e4SLinus Torvalds 2708028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 27091da177e4SLinus Torvalds vma->vm_pgoff, 271045c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2711028fec41SDavid Rientjes npol ? npol->flags : -1, 271200ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 27131da177e4SLinus Torvalds 27141da177e4SLinus Torvalds if (npol) { 27151da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 27161da177e4SLinus Torvalds if (!new) 27171da177e4SLinus Torvalds return -ENOMEM; 27181da177e4SLinus Torvalds } 27191da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 27201da177e4SLinus Torvalds if (err && new) 272163f74ca2SKOSAKI Motohiro sp_free(new); 27221da177e4SLinus Torvalds return err; 27231da177e4SLinus Torvalds } 27241da177e4SLinus Torvalds 27251da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 27261da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 27271da177e4SLinus Torvalds { 27281da177e4SLinus Torvalds struct sp_node *n; 27291da177e4SLinus Torvalds struct rb_node *next; 27301da177e4SLinus Torvalds 27311da177e4SLinus Torvalds if (!p->root.rb_node) 27321da177e4SLinus Torvalds return; 27334a8c7bb5SNathan Zimmer write_lock(&p->lock); 27341da177e4SLinus Torvalds next = rb_first(&p->root); 27351da177e4SLinus Torvalds while (next) { 27361da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27371da177e4SLinus Torvalds next = rb_next(&n->nd); 273863f74ca2SKOSAKI Motohiro sp_delete(p, n); 27391da177e4SLinus Torvalds } 27404a8c7bb5SNathan Zimmer write_unlock(&p->lock); 27411da177e4SLinus Torvalds } 27421da177e4SLinus Torvalds 27431a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2744c297663cSMel Gorman static int __initdata numabalancing_override; 27451a687c2eSMel Gorman 27461a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 27471a687c2eSMel Gorman { 27481a687c2eSMel Gorman bool numabalancing_default = false; 27491a687c2eSMel Gorman 27501a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 27511a687c2eSMel Gorman numabalancing_default = true; 27521a687c2eSMel Gorman 2753c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2754c297663cSMel Gorman if (numabalancing_override) 2755c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2756c297663cSMel Gorman 2757b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2758756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2759c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 27601a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 27611a687c2eSMel Gorman } 27621a687c2eSMel Gorman } 27631a687c2eSMel Gorman 27641a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 27651a687c2eSMel Gorman { 27661a687c2eSMel Gorman int ret = 0; 27671a687c2eSMel Gorman if (!str) 27681a687c2eSMel Gorman goto out; 27691a687c2eSMel Gorman 27701a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2771c297663cSMel Gorman numabalancing_override = 1; 27721a687c2eSMel Gorman ret = 1; 27731a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2774c297663cSMel Gorman numabalancing_override = -1; 27751a687c2eSMel Gorman ret = 1; 27761a687c2eSMel Gorman } 27771a687c2eSMel Gorman out: 27781a687c2eSMel Gorman if (!ret) 27794a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 27801a687c2eSMel Gorman 27811a687c2eSMel Gorman return ret; 27821a687c2eSMel Gorman } 27831a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 27841a687c2eSMel Gorman #else 27851a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 27861a687c2eSMel Gorman { 27871a687c2eSMel Gorman } 27881a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 27891a687c2eSMel Gorman 27901da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 27911da177e4SLinus Torvalds void __init numa_policy_init(void) 27921da177e4SLinus Torvalds { 2793b71636e2SPaul Mundt nodemask_t interleave_nodes; 2794b71636e2SPaul Mundt unsigned long largest = 0; 2795b71636e2SPaul Mundt int nid, prefer = 0; 2796b71636e2SPaul Mundt 27971da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 27981da177e4SLinus Torvalds sizeof(struct mempolicy), 279920c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 28001da177e4SLinus Torvalds 28011da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 28021da177e4SLinus Torvalds sizeof(struct sp_node), 280320c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 28041da177e4SLinus Torvalds 28055606e387SMel Gorman for_each_node(nid) { 28065606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 28075606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 28085606e387SMel Gorman .mode = MPOL_PREFERRED, 28095606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 28105606e387SMel Gorman .v = { .preferred_node = nid, }, 28115606e387SMel Gorman }; 28125606e387SMel Gorman } 28135606e387SMel Gorman 2814b71636e2SPaul Mundt /* 2815b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2816b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2817b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2818b71636e2SPaul Mundt */ 2819b71636e2SPaul Mundt nodes_clear(interleave_nodes); 282001f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2821b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 28221da177e4SLinus Torvalds 2823b71636e2SPaul Mundt /* Preserve the largest node */ 2824b71636e2SPaul Mundt if (largest < total_pages) { 2825b71636e2SPaul Mundt largest = total_pages; 2826b71636e2SPaul Mundt prefer = nid; 2827b71636e2SPaul Mundt } 2828b71636e2SPaul Mundt 2829b71636e2SPaul Mundt /* Interleave this node? */ 2830b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2831b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2832b71636e2SPaul Mundt } 2833b71636e2SPaul Mundt 2834b71636e2SPaul Mundt /* All too small, use the largest */ 2835b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2836b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2837b71636e2SPaul Mundt 2838028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2839b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 28401a687c2eSMel Gorman 28411a687c2eSMel Gorman check_numabalancing_enable(); 28421da177e4SLinus Torvalds } 28431da177e4SLinus Torvalds 28448bccd85fSChristoph Lameter /* Reset policy of current process to default */ 28451da177e4SLinus Torvalds void numa_default_policy(void) 28461da177e4SLinus Torvalds { 2847028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 28481da177e4SLinus Torvalds } 284968860ec1SPaul Jackson 28504225399aSPaul Jackson /* 2851095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2852095f1fc4SLee Schermerhorn */ 2853095f1fc4SLee Schermerhorn 2854095f1fc4SLee Schermerhorn /* 2855f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 28561a75a6c8SChristoph Lameter */ 2857345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2858345ace9cSLee Schermerhorn { 2859345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2860345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2861345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2862345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2863d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2864345ace9cSLee Schermerhorn }; 28651a75a6c8SChristoph Lameter 2866095f1fc4SLee Schermerhorn 2867095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2868095f1fc4SLee Schermerhorn /** 2869f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2870095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 287171fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2872095f1fc4SLee Schermerhorn * 2873095f1fc4SLee Schermerhorn * Format of input: 2874095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2875095f1fc4SLee Schermerhorn * 287671fe804bSLee Schermerhorn * On success, returns 0, else 1 2877095f1fc4SLee Schermerhorn */ 2878a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2879095f1fc4SLee Schermerhorn { 288071fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2881f2a07f40SHugh Dickins unsigned short mode_flags; 288271fe804bSLee Schermerhorn nodemask_t nodes; 2883095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2884095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2885dedf2c73Szhong jiang int err = 1, mode; 2886095f1fc4SLee Schermerhorn 2887c7a91bc7SDan Carpenter if (flags) 2888c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 2889c7a91bc7SDan Carpenter 2890095f1fc4SLee Schermerhorn if (nodelist) { 2891095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2892095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 289371fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2894095f1fc4SLee Schermerhorn goto out; 289501f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2896095f1fc4SLee Schermerhorn goto out; 289771fe804bSLee Schermerhorn } else 289871fe804bSLee Schermerhorn nodes_clear(nodes); 289971fe804bSLee Schermerhorn 2900dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 2901dedf2c73Szhong jiang if (mode < 0) 2902095f1fc4SLee Schermerhorn goto out; 2903095f1fc4SLee Schermerhorn 290471fe804bSLee Schermerhorn switch (mode) { 2905095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 290671fe804bSLee Schermerhorn /* 2907aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 2908aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 2909aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 291071fe804bSLee Schermerhorn */ 2911095f1fc4SLee Schermerhorn if (nodelist) { 2912095f1fc4SLee Schermerhorn char *rest = nodelist; 2913095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2914095f1fc4SLee Schermerhorn rest++; 2915926f2ae0SKOSAKI Motohiro if (*rest) 2916926f2ae0SKOSAKI Motohiro goto out; 2917aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 2918aa9f7d51SRandy Dunlap goto out; 2919095f1fc4SLee Schermerhorn } 2920095f1fc4SLee Schermerhorn break; 2921095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2922095f1fc4SLee Schermerhorn /* 2923095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2924095f1fc4SLee Schermerhorn */ 2925095f1fc4SLee Schermerhorn if (!nodelist) 292601f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 29273f226aa1SLee Schermerhorn break; 292871fe804bSLee Schermerhorn case MPOL_LOCAL: 29293f226aa1SLee Schermerhorn /* 293071fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 29313f226aa1SLee Schermerhorn */ 293271fe804bSLee Schermerhorn if (nodelist) 29333f226aa1SLee Schermerhorn goto out; 293471fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 29353f226aa1SLee Schermerhorn break; 2936413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2937413b43deSRavikiran G Thirumalai /* 2938413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2939413b43deSRavikiran G Thirumalai */ 2940413b43deSRavikiran G Thirumalai if (!nodelist) 2941413b43deSRavikiran G Thirumalai err = 0; 2942413b43deSRavikiran G Thirumalai goto out; 2943d69b2e63SKOSAKI Motohiro case MPOL_BIND: 294471fe804bSLee Schermerhorn /* 2945d69b2e63SKOSAKI Motohiro * Insist on a nodelist 294671fe804bSLee Schermerhorn */ 2947d69b2e63SKOSAKI Motohiro if (!nodelist) 2948d69b2e63SKOSAKI Motohiro goto out; 2949095f1fc4SLee Schermerhorn } 2950095f1fc4SLee Schermerhorn 295171fe804bSLee Schermerhorn mode_flags = 0; 2952095f1fc4SLee Schermerhorn if (flags) { 2953095f1fc4SLee Schermerhorn /* 2954095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2955095f1fc4SLee Schermerhorn * mode flags. 2956095f1fc4SLee Schermerhorn */ 2957095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 295871fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2959095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 296071fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2961095f1fc4SLee Schermerhorn else 2962926f2ae0SKOSAKI Motohiro goto out; 2963095f1fc4SLee Schermerhorn } 296471fe804bSLee Schermerhorn 296571fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 296671fe804bSLee Schermerhorn if (IS_ERR(new)) 2967926f2ae0SKOSAKI Motohiro goto out; 2968926f2ae0SKOSAKI Motohiro 2969f2a07f40SHugh Dickins /* 2970f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2971f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2972f2a07f40SHugh Dickins */ 2973f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2974f2a07f40SHugh Dickins new->v.nodes = nodes; 2975f2a07f40SHugh Dickins else if (nodelist) 2976f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2977f2a07f40SHugh Dickins else 2978f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2979f2a07f40SHugh Dickins 2980f2a07f40SHugh Dickins /* 2981f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2982f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2983f2a07f40SHugh Dickins */ 2984e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2985f2a07f40SHugh Dickins 2986926f2ae0SKOSAKI Motohiro err = 0; 298771fe804bSLee Schermerhorn 2988095f1fc4SLee Schermerhorn out: 2989095f1fc4SLee Schermerhorn /* Restore string for error message */ 2990095f1fc4SLee Schermerhorn if (nodelist) 2991095f1fc4SLee Schermerhorn *--nodelist = ':'; 2992095f1fc4SLee Schermerhorn if (flags) 2993095f1fc4SLee Schermerhorn *--flags = '='; 299471fe804bSLee Schermerhorn if (!err) 299571fe804bSLee Schermerhorn *mpol = new; 2996095f1fc4SLee Schermerhorn return err; 2997095f1fc4SLee Schermerhorn } 2998095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2999095f1fc4SLee Schermerhorn 300071fe804bSLee Schermerhorn /** 300171fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 300271fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 300371fe804bSLee Schermerhorn * @maxlen: length of @buffer 300471fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 300571fe804bSLee Schermerhorn * 3006948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 3007948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3008948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 30091a75a6c8SChristoph Lameter */ 3010948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 30111a75a6c8SChristoph Lameter { 30121a75a6c8SChristoph Lameter char *p = buffer; 3013948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 3014948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 3015948927eeSDavid Rientjes unsigned short flags = 0; 30161a75a6c8SChristoph Lameter 30178790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3018bea904d5SLee Schermerhorn mode = pol->mode; 3019948927eeSDavid Rientjes flags = pol->flags; 3020948927eeSDavid Rientjes } 3021bea904d5SLee Schermerhorn 30221a75a6c8SChristoph Lameter switch (mode) { 30231a75a6c8SChristoph Lameter case MPOL_DEFAULT: 30241a75a6c8SChristoph Lameter break; 30251a75a6c8SChristoph Lameter case MPOL_PREFERRED: 3026fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 3027f2a07f40SHugh Dickins mode = MPOL_LOCAL; 302853f2556bSLee Schermerhorn else 3029fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 30301a75a6c8SChristoph Lameter break; 30311a75a6c8SChristoph Lameter case MPOL_BIND: 30321a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 30331a75a6c8SChristoph Lameter nodes = pol->v.nodes; 30341a75a6c8SChristoph Lameter break; 30351a75a6c8SChristoph Lameter default: 3036948927eeSDavid Rientjes WARN_ON_ONCE(1); 3037948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 3038948927eeSDavid Rientjes return; 30391a75a6c8SChristoph Lameter } 30401a75a6c8SChristoph Lameter 3041b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 30421a75a6c8SChristoph Lameter 3043fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3044948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3045f5b087b5SDavid Rientjes 30462291990aSLee Schermerhorn /* 30472291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 30482291990aSLee Schermerhorn */ 3049f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 30502291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 30512291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 30522291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3053f5b087b5SDavid Rientjes } 3054f5b087b5SDavid Rientjes 30559e763e0fSTejun Heo if (!nodes_empty(nodes)) 30569e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 30579e763e0fSTejun Heo nodemask_pr_args(&nodes)); 30581a75a6c8SChristoph Lameter } 3059