11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 711da177e4SLinus Torvalds #include <linux/mm.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 766e84f315SIngo Molnar #include <linux/sched/mm.h> 776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 78f719ff9bSIngo Molnar #include <linux/sched/task.h> 791da177e4SLinus Torvalds #include <linux/nodemask.h> 801da177e4SLinus Torvalds #include <linux/cpuset.h> 811da177e4SLinus Torvalds #include <linux/slab.h> 821da177e4SLinus Torvalds #include <linux/string.h> 83b95f1b31SPaul Gortmaker #include <linux/export.h> 84b488893aSPavel Emelyanov #include <linux/nsproxy.h> 851da177e4SLinus Torvalds #include <linux/interrupt.h> 861da177e4SLinus Torvalds #include <linux/init.h> 871da177e4SLinus Torvalds #include <linux/compat.h> 88dc9aa5b9SChristoph Lameter #include <linux/swap.h> 891a75a6c8SChristoph Lameter #include <linux/seq_file.h> 901a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 91b20a3503SChristoph Lameter #include <linux/migrate.h> 9262b61f61SHugh Dickins #include <linux/ksm.h> 9395a402c3SChristoph Lameter #include <linux/rmap.h> 9486c3a764SDavid Quigley #include <linux/security.h> 95dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 96095f1fc4SLee Schermerhorn #include <linux/ctype.h> 976d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 98b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 99b1de0d13SMitchel Humpherys #include <linux/printk.h> 100c8633798SNaoya Horiguchi #include <linux/swapops.h> 101dc9aa5b9SChristoph Lameter 1021da177e4SLinus Torvalds #include <asm/tlbflush.h> 1037c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1041da177e4SLinus Torvalds 10562695a84SNick Piggin #include "internal.h" 10662695a84SNick Piggin 10738e35860SChristoph Lameter /* Internal flags */ 108dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10938e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 110dc9aa5b9SChristoph Lameter 111fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 112fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1131da177e4SLinus Torvalds 1141da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1151da177e4SLinus Torvalds policied. */ 1166267276fSChristoph Lameter enum zone_type policy_zone = 0; 1171da177e4SLinus Torvalds 118bea904d5SLee Schermerhorn /* 119bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 120bea904d5SLee Schermerhorn */ 121e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1221da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 123bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 124fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1251da177e4SLinus Torvalds }; 1261da177e4SLinus Torvalds 1275606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1285606e387SMel Gorman 12974d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1305606e387SMel Gorman { 1315606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 132f15ca78eSOleg Nesterov int node; 1335606e387SMel Gorman 134f15ca78eSOleg Nesterov if (pol) 135f15ca78eSOleg Nesterov return pol; 1365606e387SMel Gorman 137f15ca78eSOleg Nesterov node = numa_node_id(); 1381da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1391da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 140f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 141f15ca78eSOleg Nesterov if (pol->mode) 142f15ca78eSOleg Nesterov return pol; 1431da6f0e1SJianguo Wu } 1445606e387SMel Gorman 145f15ca78eSOleg Nesterov return &default_policy; 1465606e387SMel Gorman } 1475606e387SMel Gorman 14837012946SDavid Rientjes static const struct mempolicy_operations { 14937012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 150213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 15137012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 15237012946SDavid Rientjes 153f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 154f5b087b5SDavid Rientjes { 1556d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1564c50bc01SDavid Rientjes } 1574c50bc01SDavid Rientjes 1584c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1594c50bc01SDavid Rientjes const nodemask_t *rel) 1604c50bc01SDavid Rientjes { 1614c50bc01SDavid Rientjes nodemask_t tmp; 1624c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1634c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 164f5b087b5SDavid Rientjes } 165f5b087b5SDavid Rientjes 16637012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 16737012946SDavid Rientjes { 16837012946SDavid Rientjes if (nodes_empty(*nodes)) 16937012946SDavid Rientjes return -EINVAL; 17037012946SDavid Rientjes pol->v.nodes = *nodes; 17137012946SDavid Rientjes return 0; 17237012946SDavid Rientjes } 17337012946SDavid Rientjes 17437012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 17537012946SDavid Rientjes { 17637012946SDavid Rientjes if (!nodes) 177fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 17837012946SDavid Rientjes else if (nodes_empty(*nodes)) 17937012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 18037012946SDavid Rientjes else 18137012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 18237012946SDavid Rientjes return 0; 18337012946SDavid Rientjes } 18437012946SDavid Rientjes 18537012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 18637012946SDavid Rientjes { 187859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 18837012946SDavid Rientjes return -EINVAL; 18937012946SDavid Rientjes pol->v.nodes = *nodes; 19037012946SDavid Rientjes return 0; 19137012946SDavid Rientjes } 19237012946SDavid Rientjes 19358568d2aSMiao Xie /* 19458568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 19558568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 19658568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 19758568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 19858568d2aSMiao Xie * 19958568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 20058568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 20158568d2aSMiao Xie */ 2024bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2034bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 20458568d2aSMiao Xie { 20558568d2aSMiao Xie int ret; 20658568d2aSMiao Xie 20758568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 20858568d2aSMiao Xie if (pol == NULL) 20958568d2aSMiao Xie return 0; 21001f13bd6SLai Jiangshan /* Check N_MEMORY */ 2114bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 21201f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 21358568d2aSMiao Xie 21458568d2aSMiao Xie VM_BUG_ON(!nodes); 21558568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 21658568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 21758568d2aSMiao Xie else { 21858568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2194bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 22058568d2aSMiao Xie else 2214bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2224bfc4495SKAMEZAWA Hiroyuki 22358568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 22458568d2aSMiao Xie pol->w.user_nodemask = *nodes; 22558568d2aSMiao Xie else 22658568d2aSMiao Xie pol->w.cpuset_mems_allowed = 22758568d2aSMiao Xie cpuset_current_mems_allowed; 22858568d2aSMiao Xie } 22958568d2aSMiao Xie 2304bfc4495SKAMEZAWA Hiroyuki if (nodes) 2314bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2324bfc4495SKAMEZAWA Hiroyuki else 2334bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 23458568d2aSMiao Xie return ret; 23558568d2aSMiao Xie } 23658568d2aSMiao Xie 23758568d2aSMiao Xie /* 23858568d2aSMiao Xie * This function just creates a new policy, does some check and simple 23958568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 24058568d2aSMiao Xie */ 241028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 242028fec41SDavid Rientjes nodemask_t *nodes) 2431da177e4SLinus Torvalds { 2441da177e4SLinus Torvalds struct mempolicy *policy; 2451da177e4SLinus Torvalds 246028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 24700ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 248140d5a49SPaul Mundt 2493e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2503e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 25137012946SDavid Rientjes return ERR_PTR(-EINVAL); 252d3a71033SLee Schermerhorn return NULL; 25337012946SDavid Rientjes } 2543e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2553e1f0645SDavid Rientjes 2563e1f0645SDavid Rientjes /* 2573e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2583e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2593e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2603e1f0645SDavid Rientjes */ 2613e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2623e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2633e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2643e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2653e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2663e1f0645SDavid Rientjes } 267479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2688d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2698d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2708d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 271479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 272479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2733e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2743e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2751da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2761da177e4SLinus Torvalds if (!policy) 2771da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2781da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 27945c4745aSLee Schermerhorn policy->mode = mode; 28037012946SDavid Rientjes policy->flags = flags; 2813e1f0645SDavid Rientjes 28237012946SDavid Rientjes return policy; 28337012946SDavid Rientjes } 28437012946SDavid Rientjes 28552cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 28652cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 28752cd3b07SLee Schermerhorn { 28852cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 28952cd3b07SLee Schermerhorn return; 29052cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 29152cd3b07SLee Schermerhorn } 29252cd3b07SLee Schermerhorn 293213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 29437012946SDavid Rientjes { 29537012946SDavid Rientjes } 29637012946SDavid Rientjes 297213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 2981d0d2680SDavid Rientjes { 2991d0d2680SDavid Rientjes nodemask_t tmp; 3001d0d2680SDavid Rientjes 30137012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 30237012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 30337012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 30437012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3051d0d2680SDavid Rientjes else { 306213980c0SVlastimil Babka nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, 307213980c0SVlastimil Babka *nodes); 308213980c0SVlastimil Babka pol->w.cpuset_mems_allowed = tmp; 3091d0d2680SDavid Rientjes } 31037012946SDavid Rientjes 311708c1bbcSMiao Xie if (nodes_empty(tmp)) 312708c1bbcSMiao Xie tmp = *nodes; 313708c1bbcSMiao Xie 3141d0d2680SDavid Rientjes pol->v.nodes = tmp; 31537012946SDavid Rientjes } 31637012946SDavid Rientjes 31737012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 318213980c0SVlastimil Babka const nodemask_t *nodes) 31937012946SDavid Rientjes { 32037012946SDavid Rientjes nodemask_t tmp; 32137012946SDavid Rientjes 32237012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3231d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3241d0d2680SDavid Rientjes 325fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3261d0d2680SDavid Rientjes pol->v.preferred_node = node; 327fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 328fc36b8d3SLee Schermerhorn } else 329fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 33037012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 33137012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3321d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 333fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3341d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 33537012946SDavid Rientjes pol->w.cpuset_mems_allowed, 33637012946SDavid Rientjes *nodes); 33737012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3381d0d2680SDavid Rientjes } 3391d0d2680SDavid Rientjes } 34037012946SDavid Rientjes 341708c1bbcSMiao Xie /* 342708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 343708c1bbcSMiao Xie * 344213980c0SVlastimil Babka * Per-vma policies are protected by mmap_sem. Allocations using per-task 345213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 346213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 347708c1bbcSMiao Xie */ 348213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 34937012946SDavid Rientjes { 35037012946SDavid Rientjes if (!pol) 35137012946SDavid Rientjes return; 352213980c0SVlastimil Babka if (!mpol_store_user_nodemask(pol) && 35337012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35437012946SDavid Rientjes return; 355708c1bbcSMiao Xie 356213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3571d0d2680SDavid Rientjes } 3581d0d2680SDavid Rientjes 3591d0d2680SDavid Rientjes /* 3601d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3611d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36258568d2aSMiao Xie * 36358568d2aSMiao Xie * Called with task's alloc_lock held. 3641d0d2680SDavid Rientjes */ 3651d0d2680SDavid Rientjes 366213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3671d0d2680SDavid Rientjes { 368213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3691d0d2680SDavid Rientjes } 3701d0d2680SDavid Rientjes 3711d0d2680SDavid Rientjes /* 3721d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3731d0d2680SDavid Rientjes * 3741d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 3751d0d2680SDavid Rientjes */ 3761d0d2680SDavid Rientjes 3771d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3781d0d2680SDavid Rientjes { 3791d0d2680SDavid Rientjes struct vm_area_struct *vma; 3801d0d2680SDavid Rientjes 3811d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 3821d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 383213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 3841d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 3851d0d2680SDavid Rientjes } 3861d0d2680SDavid Rientjes 38737012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 38837012946SDavid Rientjes [MPOL_DEFAULT] = { 38937012946SDavid Rientjes .rebind = mpol_rebind_default, 39037012946SDavid Rientjes }, 39137012946SDavid Rientjes [MPOL_INTERLEAVE] = { 39237012946SDavid Rientjes .create = mpol_new_interleave, 39337012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39437012946SDavid Rientjes }, 39537012946SDavid Rientjes [MPOL_PREFERRED] = { 39637012946SDavid Rientjes .create = mpol_new_preferred, 39737012946SDavid Rientjes .rebind = mpol_rebind_preferred, 39837012946SDavid Rientjes }, 39937012946SDavid Rientjes [MPOL_BIND] = { 40037012946SDavid Rientjes .create = mpol_new_bind, 40137012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40237012946SDavid Rientjes }, 40337012946SDavid Rientjes }; 40437012946SDavid Rientjes 405fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 406fc301289SChristoph Lameter unsigned long flags); 4071a75a6c8SChristoph Lameter 4086f4576e3SNaoya Horiguchi struct queue_pages { 4096f4576e3SNaoya Horiguchi struct list_head *pagelist; 4106f4576e3SNaoya Horiguchi unsigned long flags; 4116f4576e3SNaoya Horiguchi nodemask_t *nmask; 4126f4576e3SNaoya Horiguchi struct vm_area_struct *prev; 4136f4576e3SNaoya Horiguchi }; 4146f4576e3SNaoya Horiguchi 41598094945SNaoya Horiguchi /* 41688aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 41788aaa2a1SNaoya Horiguchi * 41888aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 41988aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 42088aaa2a1SNaoya Horiguchi */ 42188aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 42288aaa2a1SNaoya Horiguchi struct queue_pages *qp) 42388aaa2a1SNaoya Horiguchi { 42488aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 42588aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 42688aaa2a1SNaoya Horiguchi 42788aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 42888aaa2a1SNaoya Horiguchi } 42988aaa2a1SNaoya Horiguchi 430c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 431c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 432c8633798SNaoya Horiguchi { 433c8633798SNaoya Horiguchi int ret = 0; 434c8633798SNaoya Horiguchi struct page *page; 435c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 436c8633798SNaoya Horiguchi unsigned long flags; 437c8633798SNaoya Horiguchi 438c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 439c8633798SNaoya Horiguchi ret = 1; 440c8633798SNaoya Horiguchi goto unlock; 441c8633798SNaoya Horiguchi } 442c8633798SNaoya Horiguchi page = pmd_page(*pmd); 443c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 444c8633798SNaoya Horiguchi spin_unlock(ptl); 445c8633798SNaoya Horiguchi __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 446c8633798SNaoya Horiguchi goto out; 447c8633798SNaoya Horiguchi } 448c8633798SNaoya Horiguchi if (!thp_migration_supported()) { 449c8633798SNaoya Horiguchi get_page(page); 450c8633798SNaoya Horiguchi spin_unlock(ptl); 451c8633798SNaoya Horiguchi lock_page(page); 452c8633798SNaoya Horiguchi ret = split_huge_page(page); 453c8633798SNaoya Horiguchi unlock_page(page); 454c8633798SNaoya Horiguchi put_page(page); 455c8633798SNaoya Horiguchi goto out; 456c8633798SNaoya Horiguchi } 457c8633798SNaoya Horiguchi if (!queue_pages_required(page, qp)) { 458c8633798SNaoya Horiguchi ret = 1; 459c8633798SNaoya Horiguchi goto unlock; 460c8633798SNaoya Horiguchi } 461c8633798SNaoya Horiguchi 462c8633798SNaoya Horiguchi ret = 1; 463c8633798SNaoya Horiguchi flags = qp->flags; 464c8633798SNaoya Horiguchi /* go to thp migration */ 465c8633798SNaoya Horiguchi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 466c8633798SNaoya Horiguchi migrate_page_add(page, qp->pagelist, flags); 467c8633798SNaoya Horiguchi unlock: 468c8633798SNaoya Horiguchi spin_unlock(ptl); 469c8633798SNaoya Horiguchi out: 470c8633798SNaoya Horiguchi return ret; 471c8633798SNaoya Horiguchi } 472c8633798SNaoya Horiguchi 47388aaa2a1SNaoya Horiguchi /* 47498094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 47598094945SNaoya Horiguchi * and move them to the pagelist if they do. 47698094945SNaoya Horiguchi */ 4776f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 4786f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 4791da177e4SLinus Torvalds { 4806f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 4816f4576e3SNaoya Horiguchi struct page *page; 4826f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 4836f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 484c8633798SNaoya Horiguchi int ret; 48591612e0dSHugh Dickins pte_t *pte; 486705e87c0SHugh Dickins spinlock_t *ptl; 487941150a3SHugh Dickins 488c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 489c8633798SNaoya Horiguchi if (ptl) { 490c8633798SNaoya Horiguchi ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 491248db92dSKirill A. Shutemov if (ret) 4926f4576e3SNaoya Horiguchi return 0; 493248db92dSKirill A. Shutemov } 49491612e0dSHugh Dickins 495337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 496337d9abfSNaoya Horiguchi return 0; 497248db92dSKirill A. Shutemov retry: 4986f4576e3SNaoya Horiguchi pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 4996f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 50091612e0dSHugh Dickins if (!pte_present(*pte)) 50191612e0dSHugh Dickins continue; 5026aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5036aab341eSLinus Torvalds if (!page) 50491612e0dSHugh Dickins continue; 505053837fcSNick Piggin /* 50662b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 50762b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 508053837fcSNick Piggin */ 509b79bc0a0SHugh Dickins if (PageReserved(page)) 510f4598c8bSChristoph Lameter continue; 51188aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 51238e35860SChristoph Lameter continue; 513c8633798SNaoya Horiguchi if (PageTransCompound(page) && !thp_migration_supported()) { 514248db92dSKirill A. Shutemov get_page(page); 515248db92dSKirill A. Shutemov pte_unmap_unlock(pte, ptl); 516248db92dSKirill A. Shutemov lock_page(page); 517248db92dSKirill A. Shutemov ret = split_huge_page(page); 518248db92dSKirill A. Shutemov unlock_page(page); 519248db92dSKirill A. Shutemov put_page(page); 520248db92dSKirill A. Shutemov /* Failed to split -- skip. */ 521248db92dSKirill A. Shutemov if (ret) { 522248db92dSKirill A. Shutemov pte = pte_offset_map_lock(walk->mm, pmd, 523248db92dSKirill A. Shutemov addr, &ptl); 524248db92dSKirill A. Shutemov continue; 525248db92dSKirill A. Shutemov } 526248db92dSKirill A. Shutemov goto retry; 527248db92dSKirill A. Shutemov } 52838e35860SChristoph Lameter 5296f4576e3SNaoya Horiguchi migrate_page_add(page, qp->pagelist, flags); 5306f4576e3SNaoya Horiguchi } 5316f4576e3SNaoya Horiguchi pte_unmap_unlock(pte - 1, ptl); 5326f4576e3SNaoya Horiguchi cond_resched(); 5336f4576e3SNaoya Horiguchi return 0; 53491612e0dSHugh Dickins } 53591612e0dSHugh Dickins 5366f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5376f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5386f4576e3SNaoya Horiguchi struct mm_walk *walk) 539e2d8cf40SNaoya Horiguchi { 540e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5416f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5426f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 543e2d8cf40SNaoya Horiguchi struct page *page; 544cb900f41SKirill A. Shutemov spinlock_t *ptl; 545d4c54919SNaoya Horiguchi pte_t entry; 546e2d8cf40SNaoya Horiguchi 5476f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5486f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 549d4c54919SNaoya Horiguchi if (!pte_present(entry)) 550d4c54919SNaoya Horiguchi goto unlock; 551d4c54919SNaoya Horiguchi page = pte_page(entry); 55288aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 553e2d8cf40SNaoya Horiguchi goto unlock; 554e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 555e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 556e2d8cf40SNaoya Horiguchi (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 5576f4576e3SNaoya Horiguchi isolate_huge_page(page, qp->pagelist); 558e2d8cf40SNaoya Horiguchi unlock: 559cb900f41SKirill A. Shutemov spin_unlock(ptl); 560e2d8cf40SNaoya Horiguchi #else 561e2d8cf40SNaoya Horiguchi BUG(); 562e2d8cf40SNaoya Horiguchi #endif 56391612e0dSHugh Dickins return 0; 5641da177e4SLinus Torvalds } 5651da177e4SLinus Torvalds 5665877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 567b24f53a0SLee Schermerhorn /* 5684b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 5694b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 5704b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 5714b10e7d5SMel Gorman * 5724b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 5734b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 5744b10e7d5SMel Gorman * changes to the core. 575b24f53a0SLee Schermerhorn */ 5764b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 5774b10e7d5SMel Gorman unsigned long addr, unsigned long end) 578b24f53a0SLee Schermerhorn { 5794b10e7d5SMel Gorman int nr_updated; 580b24f53a0SLee Schermerhorn 5814d942466SMel Gorman nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); 58203c5a6e1SMel Gorman if (nr_updated) 58303c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 584b24f53a0SLee Schermerhorn 5854b10e7d5SMel Gorman return nr_updated; 586b24f53a0SLee Schermerhorn } 587b24f53a0SLee Schermerhorn #else 588b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 589b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 590b24f53a0SLee Schermerhorn { 591b24f53a0SLee Schermerhorn return 0; 592b24f53a0SLee Schermerhorn } 5935877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 594b24f53a0SLee Schermerhorn 5956f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 5966f4576e3SNaoya Horiguchi struct mm_walk *walk) 5971da177e4SLinus Torvalds { 5986f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5996f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6005b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6016f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 602dc9aa5b9SChristoph Lameter 60377bf45e7SKirill A. Shutemov if (!vma_migratable(vma)) 60448684a65SNaoya Horiguchi return 1; 60548684a65SNaoya Horiguchi 6065b952b3cSAndi Kleen if (endvma > end) 6075b952b3cSAndi Kleen endvma = end; 6085b952b3cSAndi Kleen if (vma->vm_start > start) 6095b952b3cSAndi Kleen start = vma->vm_start; 610b24f53a0SLee Schermerhorn 611b24f53a0SLee Schermerhorn if (!(flags & MPOL_MF_DISCONTIG_OK)) { 612b24f53a0SLee Schermerhorn if (!vma->vm_next && vma->vm_end < end) 613d05f0cdcSHugh Dickins return -EFAULT; 6146f4576e3SNaoya Horiguchi if (qp->prev && qp->prev->vm_end < vma->vm_start) 615d05f0cdcSHugh Dickins return -EFAULT; 616b24f53a0SLee Schermerhorn } 617b24f53a0SLee Schermerhorn 6186f4576e3SNaoya Horiguchi qp->prev = vma; 6196f4576e3SNaoya Horiguchi 620b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6212c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6224355c018SLiang Chen if (!is_vm_hugetlb_page(vma) && 6234355c018SLiang Chen (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && 6244355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 625b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6266f4576e3SNaoya Horiguchi return 1; 627b24f53a0SLee Schermerhorn } 628b24f53a0SLee Schermerhorn 6296f4576e3SNaoya Horiguchi /* queue pages from current vma */ 63077bf45e7SKirill A. Shutemov if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 6316f4576e3SNaoya Horiguchi return 0; 6326f4576e3SNaoya Horiguchi return 1; 6336f4576e3SNaoya Horiguchi } 634b24f53a0SLee Schermerhorn 6356f4576e3SNaoya Horiguchi /* 6366f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 6376f4576e3SNaoya Horiguchi * 6386f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 6396f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 6406f4576e3SNaoya Horiguchi * passed via @private.) 6416f4576e3SNaoya Horiguchi */ 6426f4576e3SNaoya Horiguchi static int 6436f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 6446f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 6456f4576e3SNaoya Horiguchi struct list_head *pagelist) 6466f4576e3SNaoya Horiguchi { 6476f4576e3SNaoya Horiguchi struct queue_pages qp = { 6486f4576e3SNaoya Horiguchi .pagelist = pagelist, 6496f4576e3SNaoya Horiguchi .flags = flags, 6506f4576e3SNaoya Horiguchi .nmask = nodes, 6516f4576e3SNaoya Horiguchi .prev = NULL, 6526f4576e3SNaoya Horiguchi }; 6536f4576e3SNaoya Horiguchi struct mm_walk queue_pages_walk = { 6546f4576e3SNaoya Horiguchi .hugetlb_entry = queue_pages_hugetlb, 6556f4576e3SNaoya Horiguchi .pmd_entry = queue_pages_pte_range, 6566f4576e3SNaoya Horiguchi .test_walk = queue_pages_test_walk, 6576f4576e3SNaoya Horiguchi .mm = mm, 6586f4576e3SNaoya Horiguchi .private = &qp, 6596f4576e3SNaoya Horiguchi }; 6606f4576e3SNaoya Horiguchi 6616f4576e3SNaoya Horiguchi return walk_page_range(start, end, &queue_pages_walk); 6621da177e4SLinus Torvalds } 6631da177e4SLinus Torvalds 664869833f2SKOSAKI Motohiro /* 665869833f2SKOSAKI Motohiro * Apply policy to a single VMA 666869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 667869833f2SKOSAKI Motohiro */ 668869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 669869833f2SKOSAKI Motohiro struct mempolicy *pol) 6708d34694cSKOSAKI Motohiro { 671869833f2SKOSAKI Motohiro int err; 672869833f2SKOSAKI Motohiro struct mempolicy *old; 673869833f2SKOSAKI Motohiro struct mempolicy *new; 6748d34694cSKOSAKI Motohiro 6758d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 6768d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 6778d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 6788d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 6798d34694cSKOSAKI Motohiro 680869833f2SKOSAKI Motohiro new = mpol_dup(pol); 681869833f2SKOSAKI Motohiro if (IS_ERR(new)) 682869833f2SKOSAKI Motohiro return PTR_ERR(new); 683869833f2SKOSAKI Motohiro 684869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 6858d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 686869833f2SKOSAKI Motohiro if (err) 687869833f2SKOSAKI Motohiro goto err_out; 6888d34694cSKOSAKI Motohiro } 689869833f2SKOSAKI Motohiro 690869833f2SKOSAKI Motohiro old = vma->vm_policy; 691869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 692869833f2SKOSAKI Motohiro mpol_put(old); 693869833f2SKOSAKI Motohiro 694869833f2SKOSAKI Motohiro return 0; 695869833f2SKOSAKI Motohiro err_out: 696869833f2SKOSAKI Motohiro mpol_put(new); 6978d34694cSKOSAKI Motohiro return err; 6988d34694cSKOSAKI Motohiro } 6998d34694cSKOSAKI Motohiro 7001da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7019d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7029d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7031da177e4SLinus Torvalds { 7041da177e4SLinus Torvalds struct vm_area_struct *next; 7059d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7069d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7079d8cebd4SKOSAKI Motohiro int err = 0; 708e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7099d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7109d8cebd4SKOSAKI Motohiro unsigned long vmend; 7111da177e4SLinus Torvalds 712097d5910SLinus Torvalds vma = find_vma(mm, start); 7139d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 7149d8cebd4SKOSAKI Motohiro return -EFAULT; 7159d8cebd4SKOSAKI Motohiro 716097d5910SLinus Torvalds prev = vma->vm_prev; 717e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 718e26a5114SKOSAKI Motohiro prev = vma; 719e26a5114SKOSAKI Motohiro 7209d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 7211da177e4SLinus Torvalds next = vma->vm_next; 7229d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 7239d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 7249d8cebd4SKOSAKI Motohiro 725e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 726e26a5114SKOSAKI Motohiro continue; 727e26a5114SKOSAKI Motohiro 728e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 729e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 7309d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 731e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 73219a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 7339d8cebd4SKOSAKI Motohiro if (prev) { 7349d8cebd4SKOSAKI Motohiro vma = prev; 7359d8cebd4SKOSAKI Motohiro next = vma->vm_next; 7363964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 7379d8cebd4SKOSAKI Motohiro continue; 7383964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 7393964acd0SOleg Nesterov goto replace; 7401da177e4SLinus Torvalds } 7419d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 7429d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 7439d8cebd4SKOSAKI Motohiro if (err) 7449d8cebd4SKOSAKI Motohiro goto out; 7459d8cebd4SKOSAKI Motohiro } 7469d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 7479d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 7489d8cebd4SKOSAKI Motohiro if (err) 7499d8cebd4SKOSAKI Motohiro goto out; 7509d8cebd4SKOSAKI Motohiro } 7513964acd0SOleg Nesterov replace: 752869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 7539d8cebd4SKOSAKI Motohiro if (err) 7549d8cebd4SKOSAKI Motohiro goto out; 7559d8cebd4SKOSAKI Motohiro } 7569d8cebd4SKOSAKI Motohiro 7579d8cebd4SKOSAKI Motohiro out: 7581da177e4SLinus Torvalds return err; 7591da177e4SLinus Torvalds } 7601da177e4SLinus Torvalds 7611da177e4SLinus Torvalds /* Set the process memory policy */ 762028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 763028fec41SDavid Rientjes nodemask_t *nodes) 7641da177e4SLinus Torvalds { 76558568d2aSMiao Xie struct mempolicy *new, *old; 7664bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 76758568d2aSMiao Xie int ret; 7681da177e4SLinus Torvalds 7694bfc4495SKAMEZAWA Hiroyuki if (!scratch) 7704bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 771f4e53d91SLee Schermerhorn 7724bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 7734bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 7744bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 7754bfc4495SKAMEZAWA Hiroyuki goto out; 7764bfc4495SKAMEZAWA Hiroyuki } 7772c7c3a7dSOleg Nesterov 77858568d2aSMiao Xie task_lock(current); 7794bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 78058568d2aSMiao Xie if (ret) { 78158568d2aSMiao Xie task_unlock(current); 78258568d2aSMiao Xie mpol_put(new); 7834bfc4495SKAMEZAWA Hiroyuki goto out; 78458568d2aSMiao Xie } 78558568d2aSMiao Xie old = current->mempolicy; 7861da177e4SLinus Torvalds current->mempolicy = new; 78745816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 78845816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 78958568d2aSMiao Xie task_unlock(current); 79058568d2aSMiao Xie mpol_put(old); 7914bfc4495SKAMEZAWA Hiroyuki ret = 0; 7924bfc4495SKAMEZAWA Hiroyuki out: 7934bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 7944bfc4495SKAMEZAWA Hiroyuki return ret; 7951da177e4SLinus Torvalds } 7961da177e4SLinus Torvalds 797bea904d5SLee Schermerhorn /* 798bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 79958568d2aSMiao Xie * 80058568d2aSMiao Xie * Called with task's alloc_lock held 801bea904d5SLee Schermerhorn */ 802bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8031da177e4SLinus Torvalds { 804dfcd3c0dSAndi Kleen nodes_clear(*nodes); 805bea904d5SLee Schermerhorn if (p == &default_policy) 806bea904d5SLee Schermerhorn return; 807bea904d5SLee Schermerhorn 80845c4745aSLee Schermerhorn switch (p->mode) { 80919770b32SMel Gorman case MPOL_BIND: 81019770b32SMel Gorman /* Fall through */ 8111da177e4SLinus Torvalds case MPOL_INTERLEAVE: 812dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 8131da177e4SLinus Torvalds break; 8141da177e4SLinus Torvalds case MPOL_PREFERRED: 815fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 816dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 81753f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 8181da177e4SLinus Torvalds break; 8191da177e4SLinus Torvalds default: 8201da177e4SLinus Torvalds BUG(); 8211da177e4SLinus Torvalds } 8221da177e4SLinus Torvalds } 8231da177e4SLinus Torvalds 824d4edcf0dSDave Hansen static int lookup_node(unsigned long addr) 8251da177e4SLinus Torvalds { 8261da177e4SLinus Torvalds struct page *p; 8271da177e4SLinus Torvalds int err; 8281da177e4SLinus Torvalds 829768ae309SLorenzo Stoakes err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL); 8301da177e4SLinus Torvalds if (err >= 0) { 8311da177e4SLinus Torvalds err = page_to_nid(p); 8321da177e4SLinus Torvalds put_page(p); 8331da177e4SLinus Torvalds } 8341da177e4SLinus Torvalds return err; 8351da177e4SLinus Torvalds } 8361da177e4SLinus Torvalds 8371da177e4SLinus Torvalds /* Retrieve NUMA policy */ 838dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8391da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8401da177e4SLinus Torvalds { 8418bccd85fSChristoph Lameter int err; 8421da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8431da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8441da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 8451da177e4SLinus Torvalds 846754af6f5SLee Schermerhorn if (flags & 847754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8481da177e4SLinus Torvalds return -EINVAL; 849754af6f5SLee Schermerhorn 850754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 851754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 852754af6f5SLee Schermerhorn return -EINVAL; 853754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 85458568d2aSMiao Xie task_lock(current); 855754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 85658568d2aSMiao Xie task_unlock(current); 857754af6f5SLee Schermerhorn return 0; 858754af6f5SLee Schermerhorn } 859754af6f5SLee Schermerhorn 8601da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 861bea904d5SLee Schermerhorn /* 862bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 863bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 864bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 865bea904d5SLee Schermerhorn */ 8661da177e4SLinus Torvalds down_read(&mm->mmap_sem); 8671da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 8681da177e4SLinus Torvalds if (!vma) { 8691da177e4SLinus Torvalds up_read(&mm->mmap_sem); 8701da177e4SLinus Torvalds return -EFAULT; 8711da177e4SLinus Torvalds } 8721da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 8731da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 8741da177e4SLinus Torvalds else 8751da177e4SLinus Torvalds pol = vma->vm_policy; 8761da177e4SLinus Torvalds } else if (addr) 8771da177e4SLinus Torvalds return -EINVAL; 8781da177e4SLinus Torvalds 8791da177e4SLinus Torvalds if (!pol) 880bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 8811da177e4SLinus Torvalds 8821da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 8831da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 884d4edcf0dSDave Hansen err = lookup_node(addr); 8851da177e4SLinus Torvalds if (err < 0) 8861da177e4SLinus Torvalds goto out; 8878bccd85fSChristoph Lameter *policy = err; 8881da177e4SLinus Torvalds } else if (pol == current->mempolicy && 88945c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 89045816682SVlastimil Babka *policy = next_node_in(current->il_prev, pol->v.nodes); 8911da177e4SLinus Torvalds } else { 8921da177e4SLinus Torvalds err = -EINVAL; 8931da177e4SLinus Torvalds goto out; 8941da177e4SLinus Torvalds } 895bea904d5SLee Schermerhorn } else { 896bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 897bea904d5SLee Schermerhorn pol->mode; 898d79df630SDavid Rientjes /* 899d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 900d79df630SDavid Rientjes * the policy to userspace. 901d79df630SDavid Rientjes */ 902d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 903bea904d5SLee Schermerhorn } 9041da177e4SLinus Torvalds 9051da177e4SLinus Torvalds err = 0; 90658568d2aSMiao Xie if (nmask) { 907c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 908c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 909c6b6ef8bSLee Schermerhorn } else { 91058568d2aSMiao Xie task_lock(current); 911bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 91258568d2aSMiao Xie task_unlock(current); 91358568d2aSMiao Xie } 914c6b6ef8bSLee Schermerhorn } 9151da177e4SLinus Torvalds 9161da177e4SLinus Torvalds out: 91752cd3b07SLee Schermerhorn mpol_cond_put(pol); 9181da177e4SLinus Torvalds if (vma) 9191da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 9201da177e4SLinus Torvalds return err; 9211da177e4SLinus Torvalds } 9221da177e4SLinus Torvalds 923b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 9248bccd85fSChristoph Lameter /* 925c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 9266ce3c4c0SChristoph Lameter */ 927fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 928fc301289SChristoph Lameter unsigned long flags) 9296ce3c4c0SChristoph Lameter { 930c8633798SNaoya Horiguchi struct page *head = compound_head(page); 9316ce3c4c0SChristoph Lameter /* 932fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9336ce3c4c0SChristoph Lameter */ 934c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 935c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 936c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 937c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 938c8633798SNaoya Horiguchi NR_ISOLATED_ANON + page_is_file_cache(head), 939c8633798SNaoya Horiguchi hpage_nr_pages(head)); 94062695a84SNick Piggin } 94162695a84SNick Piggin } 9426ce3c4c0SChristoph Lameter } 9436ce3c4c0SChristoph Lameter 944742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 94595a402c3SChristoph Lameter { 946e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 947e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 948e2d8cf40SNaoya Horiguchi node); 949c8633798SNaoya Horiguchi else if (thp_migration_supported() && PageTransHuge(page)) { 950c8633798SNaoya Horiguchi struct page *thp; 951c8633798SNaoya Horiguchi 952c8633798SNaoya Horiguchi thp = alloc_pages_node(node, 953c8633798SNaoya Horiguchi (GFP_TRANSHUGE | __GFP_THISNODE), 954c8633798SNaoya Horiguchi HPAGE_PMD_ORDER); 955c8633798SNaoya Horiguchi if (!thp) 956c8633798SNaoya Horiguchi return NULL; 957c8633798SNaoya Horiguchi prep_transhuge_page(thp); 958c8633798SNaoya Horiguchi return thp; 959c8633798SNaoya Horiguchi } else 96096db800fSVlastimil Babka return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | 961b360edb4SDavid Rientjes __GFP_THISNODE, 0); 96295a402c3SChristoph Lameter } 96395a402c3SChristoph Lameter 9646ce3c4c0SChristoph Lameter /* 9657e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 9667e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 9677e2ab150SChristoph Lameter */ 968dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 969dbcb0f19SAdrian Bunk int flags) 9707e2ab150SChristoph Lameter { 9717e2ab150SChristoph Lameter nodemask_t nmask; 9727e2ab150SChristoph Lameter LIST_HEAD(pagelist); 9737e2ab150SChristoph Lameter int err = 0; 9747e2ab150SChristoph Lameter 9757e2ab150SChristoph Lameter nodes_clear(nmask); 9767e2ab150SChristoph Lameter node_set(source, nmask); 9777e2ab150SChristoph Lameter 97808270807SMinchan Kim /* 97908270807SMinchan Kim * This does not "check" the range but isolates all pages that 98008270807SMinchan Kim * need migration. Between passing in the full user address 98108270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 98208270807SMinchan Kim */ 98308270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 98498094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 9857e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 9867e2ab150SChristoph Lameter 987cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 98868711a74SDavid Rientjes err = migrate_pages(&pagelist, new_node_page, NULL, dest, 9899c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 990cf608ac1SMinchan Kim if (err) 991e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 992cf608ac1SMinchan Kim } 99395a402c3SChristoph Lameter 9947e2ab150SChristoph Lameter return err; 9957e2ab150SChristoph Lameter } 9967e2ab150SChristoph Lameter 9977e2ab150SChristoph Lameter /* 9987e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 9997e2ab150SChristoph Lameter * layout as much as possible. 100039743889SChristoph Lameter * 100139743889SChristoph Lameter * Returns the number of page that could not be moved. 100239743889SChristoph Lameter */ 10030ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10040ce72d4fSAndrew Morton const nodemask_t *to, int flags) 100539743889SChristoph Lameter { 10067e2ab150SChristoph Lameter int busy = 0; 10070aedadf9SChristoph Lameter int err; 10087e2ab150SChristoph Lameter nodemask_t tmp; 100939743889SChristoph Lameter 10100aedadf9SChristoph Lameter err = migrate_prep(); 10110aedadf9SChristoph Lameter if (err) 10120aedadf9SChristoph Lameter return err; 10130aedadf9SChristoph Lameter 101439743889SChristoph Lameter down_read(&mm->mmap_sem); 1015d4984711SChristoph Lameter 10167e2ab150SChristoph Lameter /* 10177e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 10187e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 10197e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 10207e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 10217e2ab150SChristoph Lameter * 10227e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 10237e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 10247e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 10257e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 10267e2ab150SChristoph Lameter * 10277e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 10287e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 10297e2ab150SChristoph Lameter * (nothing left to migrate). 10307e2ab150SChristoph Lameter * 10317e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 10327e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 10337e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 10347e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 10357e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 10367e2ab150SChristoph Lameter * 10377e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 10387e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 10397e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 10407e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1041ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 10427e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 10437e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 10447e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 10457e2ab150SChristoph Lameter */ 10467e2ab150SChristoph Lameter 10470ce72d4fSAndrew Morton tmp = *from; 10487e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 10497e2ab150SChristoph Lameter int s,d; 1050b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 10517e2ab150SChristoph Lameter int dest = 0; 10527e2ab150SChristoph Lameter 10537e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 10544a5b18ccSLarry Woodman 10554a5b18ccSLarry Woodman /* 10564a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 10574a5b18ccSLarry Woodman * node relationship of the pages established between 10584a5b18ccSLarry Woodman * threads and memory areas. 10594a5b18ccSLarry Woodman * 10604a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 10614a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 10624a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 10634a5b18ccSLarry Woodman * copying memory from a node that is in the destination 10644a5b18ccSLarry Woodman * mask. 10654a5b18ccSLarry Woodman * 10664a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 10674a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 10684a5b18ccSLarry Woodman */ 10694a5b18ccSLarry Woodman 10700ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 10710ce72d4fSAndrew Morton (node_isset(s, *to))) 10724a5b18ccSLarry Woodman continue; 10734a5b18ccSLarry Woodman 10740ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 10757e2ab150SChristoph Lameter if (s == d) 10767e2ab150SChristoph Lameter continue; 10777e2ab150SChristoph Lameter 10787e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 10797e2ab150SChristoph Lameter dest = d; 10807e2ab150SChristoph Lameter 10817e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 10827e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 10837e2ab150SChristoph Lameter break; 10847e2ab150SChristoph Lameter } 1085b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 10867e2ab150SChristoph Lameter break; 10877e2ab150SChristoph Lameter 10887e2ab150SChristoph Lameter node_clear(source, tmp); 10897e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 10907e2ab150SChristoph Lameter if (err > 0) 10917e2ab150SChristoph Lameter busy += err; 10927e2ab150SChristoph Lameter if (err < 0) 10937e2ab150SChristoph Lameter break; 109439743889SChristoph Lameter } 109539743889SChristoph Lameter up_read(&mm->mmap_sem); 10967e2ab150SChristoph Lameter if (err < 0) 10977e2ab150SChristoph Lameter return err; 10987e2ab150SChristoph Lameter return busy; 1099b20a3503SChristoph Lameter 110039743889SChristoph Lameter } 110139743889SChristoph Lameter 11023ad33b24SLee Schermerhorn /* 11033ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1104d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 11053ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11063ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11073ad33b24SLee Schermerhorn * is in virtual address order. 11083ad33b24SLee Schermerhorn */ 1109d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 111095a402c3SChristoph Lameter { 1111d05f0cdcSHugh Dickins struct vm_area_struct *vma; 11123ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 111395a402c3SChristoph Lameter 1114d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 11153ad33b24SLee Schermerhorn while (vma) { 11163ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 11173ad33b24SLee Schermerhorn if (address != -EFAULT) 11183ad33b24SLee Schermerhorn break; 11193ad33b24SLee Schermerhorn vma = vma->vm_next; 11203ad33b24SLee Schermerhorn } 11213ad33b24SLee Schermerhorn 112211c731e8SWanpeng Li if (PageHuge(page)) { 1123cc81717eSMichal Hocko BUG_ON(!vma); 112474060e4dSNaoya Horiguchi return alloc_huge_page_noerr(vma, address, 1); 1125c8633798SNaoya Horiguchi } else if (thp_migration_supported() && PageTransHuge(page)) { 1126c8633798SNaoya Horiguchi struct page *thp; 1127c8633798SNaoya Horiguchi 1128c8633798SNaoya Horiguchi thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 1129c8633798SNaoya Horiguchi HPAGE_PMD_ORDER); 1130c8633798SNaoya Horiguchi if (!thp) 1131c8633798SNaoya Horiguchi return NULL; 1132c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1133c8633798SNaoya Horiguchi return thp; 113411c731e8SWanpeng Li } 113511c731e8SWanpeng Li /* 113611c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 113711c731e8SWanpeng Li */ 11380f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 11390f556856SMichal Hocko vma, address); 114095a402c3SChristoph Lameter } 1141b20a3503SChristoph Lameter #else 1142b20a3503SChristoph Lameter 1143b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1144b20a3503SChristoph Lameter unsigned long flags) 1145b20a3503SChristoph Lameter { 1146b20a3503SChristoph Lameter } 1147b20a3503SChristoph Lameter 11480ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11490ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1150b20a3503SChristoph Lameter { 1151b20a3503SChristoph Lameter return -ENOSYS; 1152b20a3503SChristoph Lameter } 115395a402c3SChristoph Lameter 1154d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 115595a402c3SChristoph Lameter { 115695a402c3SChristoph Lameter return NULL; 115795a402c3SChristoph Lameter } 1158b20a3503SChristoph Lameter #endif 1159b20a3503SChristoph Lameter 1160dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1161028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1162028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 11636ce3c4c0SChristoph Lameter { 11646ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 11656ce3c4c0SChristoph Lameter struct mempolicy *new; 11666ce3c4c0SChristoph Lameter unsigned long end; 11676ce3c4c0SChristoph Lameter int err; 11686ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 11696ce3c4c0SChristoph Lameter 1170b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 11716ce3c4c0SChristoph Lameter return -EINVAL; 117274c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 11736ce3c4c0SChristoph Lameter return -EPERM; 11746ce3c4c0SChristoph Lameter 11756ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 11766ce3c4c0SChristoph Lameter return -EINVAL; 11776ce3c4c0SChristoph Lameter 11786ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 11796ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 11806ce3c4c0SChristoph Lameter 11816ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 11826ce3c4c0SChristoph Lameter end = start + len; 11836ce3c4c0SChristoph Lameter 11846ce3c4c0SChristoph Lameter if (end < start) 11856ce3c4c0SChristoph Lameter return -EINVAL; 11866ce3c4c0SChristoph Lameter if (end == start) 11876ce3c4c0SChristoph Lameter return 0; 11886ce3c4c0SChristoph Lameter 1189028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 11906ce3c4c0SChristoph Lameter if (IS_ERR(new)) 11916ce3c4c0SChristoph Lameter return PTR_ERR(new); 11926ce3c4c0SChristoph Lameter 1193b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1194b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1195b24f53a0SLee Schermerhorn 11966ce3c4c0SChristoph Lameter /* 11976ce3c4c0SChristoph Lameter * If we are using the default policy then operation 11986ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 11996ce3c4c0SChristoph Lameter */ 12006ce3c4c0SChristoph Lameter if (!new) 12016ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12026ce3c4c0SChristoph Lameter 1203028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1204028fec41SDavid Rientjes start, start + len, mode, mode_flags, 120500ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12066ce3c4c0SChristoph Lameter 12070aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12080aedadf9SChristoph Lameter 12090aedadf9SChristoph Lameter err = migrate_prep(); 12100aedadf9SChristoph Lameter if (err) 1211b05ca738SKOSAKI Motohiro goto mpol_out; 12120aedadf9SChristoph Lameter } 12134bfc4495SKAMEZAWA Hiroyuki { 12144bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 12154bfc4495SKAMEZAWA Hiroyuki if (scratch) { 12166ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 121758568d2aSMiao Xie task_lock(current); 12184bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 121958568d2aSMiao Xie task_unlock(current); 12204bfc4495SKAMEZAWA Hiroyuki if (err) 122158568d2aSMiao Xie up_write(&mm->mmap_sem); 12224bfc4495SKAMEZAWA Hiroyuki } else 12234bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 12244bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 12254bfc4495SKAMEZAWA Hiroyuki } 1226b05ca738SKOSAKI Motohiro if (err) 1227b05ca738SKOSAKI Motohiro goto mpol_out; 1228b05ca738SKOSAKI Motohiro 1229d05f0cdcSHugh Dickins err = queue_pages_range(mm, start, end, nmask, 12306ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1231d05f0cdcSHugh Dickins if (!err) 12329d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 12337e2ab150SChristoph Lameter 1234b24f53a0SLee Schermerhorn if (!err) { 1235b24f53a0SLee Schermerhorn int nr_failed = 0; 1236b24f53a0SLee Schermerhorn 1237cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1238b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1239d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1240d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1241cf608ac1SMinchan Kim if (nr_failed) 124274060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1243cf608ac1SMinchan Kim } 12446ce3c4c0SChristoph Lameter 1245b24f53a0SLee Schermerhorn if (nr_failed && (flags & MPOL_MF_STRICT)) 12466ce3c4c0SChristoph Lameter err = -EIO; 1247ab8a3e14SKOSAKI Motohiro } else 1248b0e5fd73SJoonsoo Kim putback_movable_pages(&pagelist); 1249b20a3503SChristoph Lameter 12506ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1251b05ca738SKOSAKI Motohiro mpol_out: 1252f0be3d32SLee Schermerhorn mpol_put(new); 12536ce3c4c0SChristoph Lameter return err; 12546ce3c4c0SChristoph Lameter } 12556ce3c4c0SChristoph Lameter 125639743889SChristoph Lameter /* 12578bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 12588bccd85fSChristoph Lameter */ 12598bccd85fSChristoph Lameter 12608bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 126139743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 12628bccd85fSChristoph Lameter unsigned long maxnode) 12638bccd85fSChristoph Lameter { 12648bccd85fSChristoph Lameter unsigned long k; 12658bccd85fSChristoph Lameter unsigned long nlongs; 12668bccd85fSChristoph Lameter unsigned long endmask; 12678bccd85fSChristoph Lameter 12688bccd85fSChristoph Lameter --maxnode; 12698bccd85fSChristoph Lameter nodes_clear(*nodes); 12708bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 12718bccd85fSChristoph Lameter return 0; 1272a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1273636f13c1SChris Wright return -EINVAL; 12748bccd85fSChristoph Lameter 12758bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 12768bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 12778bccd85fSChristoph Lameter endmask = ~0UL; 12788bccd85fSChristoph Lameter else 12798bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 12808bccd85fSChristoph Lameter 12818bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 12828bccd85fSChristoph Lameter if the non supported part is all zero. */ 12838bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 12848bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 12858bccd85fSChristoph Lameter return -EINVAL; 12868bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 12878bccd85fSChristoph Lameter unsigned long t; 12888bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 12898bccd85fSChristoph Lameter return -EFAULT; 12908bccd85fSChristoph Lameter if (k == nlongs - 1) { 12918bccd85fSChristoph Lameter if (t & endmask) 12928bccd85fSChristoph Lameter return -EINVAL; 12938bccd85fSChristoph Lameter } else if (t) 12948bccd85fSChristoph Lameter return -EINVAL; 12958bccd85fSChristoph Lameter } 12968bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 12978bccd85fSChristoph Lameter endmask = ~0UL; 12988bccd85fSChristoph Lameter } 12998bccd85fSChristoph Lameter 13008bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 13018bccd85fSChristoph Lameter return -EFAULT; 13028bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 13038bccd85fSChristoph Lameter return 0; 13048bccd85fSChristoph Lameter } 13058bccd85fSChristoph Lameter 13068bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 13078bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 13088bccd85fSChristoph Lameter nodemask_t *nodes) 13098bccd85fSChristoph Lameter { 13108bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 13118bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 13128bccd85fSChristoph Lameter 13138bccd85fSChristoph Lameter if (copy > nbytes) { 13148bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 13158bccd85fSChristoph Lameter return -EINVAL; 13168bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 13178bccd85fSChristoph Lameter return -EFAULT; 13188bccd85fSChristoph Lameter copy = nbytes; 13198bccd85fSChristoph Lameter } 13208bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 13218bccd85fSChristoph Lameter } 13228bccd85fSChristoph Lameter 1323938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1324f7f28ca9SRasmus Villemoes unsigned long, mode, const unsigned long __user *, nmask, 1325938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 13268bccd85fSChristoph Lameter { 13278bccd85fSChristoph Lameter nodemask_t nodes; 13288bccd85fSChristoph Lameter int err; 1329028fec41SDavid Rientjes unsigned short mode_flags; 13308bccd85fSChristoph Lameter 1331028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1332028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1333a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1334a3b51e01SDavid Rientjes return -EINVAL; 13354c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 13364c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 13374c50bc01SDavid Rientjes return -EINVAL; 13388bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13398bccd85fSChristoph Lameter if (err) 13408bccd85fSChristoph Lameter return err; 1341028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 13428bccd85fSChristoph Lameter } 13438bccd85fSChristoph Lameter 13448bccd85fSChristoph Lameter /* Set the process memory policy */ 134523c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1346938bb9f5SHeiko Carstens unsigned long, maxnode) 13478bccd85fSChristoph Lameter { 13488bccd85fSChristoph Lameter int err; 13498bccd85fSChristoph Lameter nodemask_t nodes; 1350028fec41SDavid Rientjes unsigned short flags; 13518bccd85fSChristoph Lameter 1352028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1353028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1354028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 13558bccd85fSChristoph Lameter return -EINVAL; 13564c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 13574c50bc01SDavid Rientjes return -EINVAL; 13588bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13598bccd85fSChristoph Lameter if (err) 13608bccd85fSChristoph Lameter return err; 1361028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 13628bccd85fSChristoph Lameter } 13638bccd85fSChristoph Lameter 1364938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1365938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1366938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 136739743889SChristoph Lameter { 1368c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 1369596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 137039743889SChristoph Lameter struct task_struct *task; 137139743889SChristoph Lameter nodemask_t task_nodes; 137239743889SChristoph Lameter int err; 1373596d7cfaSKOSAKI Motohiro nodemask_t *old; 1374596d7cfaSKOSAKI Motohiro nodemask_t *new; 1375596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 137639743889SChristoph Lameter 1377596d7cfaSKOSAKI Motohiro if (!scratch) 1378596d7cfaSKOSAKI Motohiro return -ENOMEM; 137939743889SChristoph Lameter 1380596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1381596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1382596d7cfaSKOSAKI Motohiro 1383596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 138439743889SChristoph Lameter if (err) 1385596d7cfaSKOSAKI Motohiro goto out; 1386596d7cfaSKOSAKI Motohiro 1387596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1388596d7cfaSKOSAKI Motohiro if (err) 1389596d7cfaSKOSAKI Motohiro goto out; 139039743889SChristoph Lameter 139139743889SChristoph Lameter /* Find the mm_struct */ 139255cfaa3cSZeng Zhaoming rcu_read_lock(); 1393228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 139439743889SChristoph Lameter if (!task) { 139555cfaa3cSZeng Zhaoming rcu_read_unlock(); 1396596d7cfaSKOSAKI Motohiro err = -ESRCH; 1397596d7cfaSKOSAKI Motohiro goto out; 139839743889SChristoph Lameter } 13993268c63eSChristoph Lameter get_task_struct(task); 140039743889SChristoph Lameter 1401596d7cfaSKOSAKI Motohiro err = -EINVAL; 140239743889SChristoph Lameter 140339743889SChristoph Lameter /* 140439743889SChristoph Lameter * Check if this process has the right to modify the specified 140539743889SChristoph Lameter * process. The right exists if the process has administrative 14067f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 140739743889SChristoph Lameter * userid as the target process. 140839743889SChristoph Lameter */ 1409c69e8d9cSDavid Howells tcred = __task_cred(task); 1410b38a86ebSEric W. Biederman if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1411b38a86ebSEric W. Biederman !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 141274c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1413c69e8d9cSDavid Howells rcu_read_unlock(); 141439743889SChristoph Lameter err = -EPERM; 14153268c63eSChristoph Lameter goto out_put; 141639743889SChristoph Lameter } 1417c69e8d9cSDavid Howells rcu_read_unlock(); 141839743889SChristoph Lameter 141939743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 142039743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1421596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 142239743889SChristoph Lameter err = -EPERM; 14233268c63eSChristoph Lameter goto out_put; 142439743889SChristoph Lameter } 142539743889SChristoph Lameter 142601f13bd6SLai Jiangshan if (!nodes_subset(*new, node_states[N_MEMORY])) { 14273b42d28bSChristoph Lameter err = -EINVAL; 14283268c63eSChristoph Lameter goto out_put; 14293b42d28bSChristoph Lameter } 14303b42d28bSChristoph Lameter 143186c3a764SDavid Quigley err = security_task_movememory(task); 143286c3a764SDavid Quigley if (err) 14333268c63eSChristoph Lameter goto out_put; 143486c3a764SDavid Quigley 14353268c63eSChristoph Lameter mm = get_task_mm(task); 14363268c63eSChristoph Lameter put_task_struct(task); 1437f2a9ef88SSasha Levin 1438f2a9ef88SSasha Levin if (!mm) { 1439f2a9ef88SSasha Levin err = -EINVAL; 1440f2a9ef88SSasha Levin goto out; 1441f2a9ef88SSasha Levin } 1442f2a9ef88SSasha Levin 1443596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 144474c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 14453268c63eSChristoph Lameter 144639743889SChristoph Lameter mmput(mm); 14473268c63eSChristoph Lameter out: 1448596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1449596d7cfaSKOSAKI Motohiro 145039743889SChristoph Lameter return err; 14513268c63eSChristoph Lameter 14523268c63eSChristoph Lameter out_put: 14533268c63eSChristoph Lameter put_task_struct(task); 14543268c63eSChristoph Lameter goto out; 14553268c63eSChristoph Lameter 145639743889SChristoph Lameter } 145739743889SChristoph Lameter 145839743889SChristoph Lameter 14598bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1460938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1461938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1462938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 14638bccd85fSChristoph Lameter { 1464dbcb0f19SAdrian Bunk int err; 1465dbcb0f19SAdrian Bunk int uninitialized_var(pval); 14668bccd85fSChristoph Lameter nodemask_t nodes; 14678bccd85fSChristoph Lameter 14688bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 14698bccd85fSChristoph Lameter return -EINVAL; 14708bccd85fSChristoph Lameter 14718bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 14728bccd85fSChristoph Lameter 14738bccd85fSChristoph Lameter if (err) 14748bccd85fSChristoph Lameter return err; 14758bccd85fSChristoph Lameter 14768bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 14778bccd85fSChristoph Lameter return -EFAULT; 14788bccd85fSChristoph Lameter 14798bccd85fSChristoph Lameter if (nmask) 14808bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 14818bccd85fSChristoph Lameter 14828bccd85fSChristoph Lameter return err; 14838bccd85fSChristoph Lameter } 14848bccd85fSChristoph Lameter 14851da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 14861da177e4SLinus Torvalds 1487c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1488c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1489c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1490c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 14911da177e4SLinus Torvalds { 14921da177e4SLinus Torvalds long err; 14931da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14941da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 14951da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 14961da177e4SLinus Torvalds 14971da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 14981da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 14991da177e4SLinus Torvalds 15001da177e4SLinus Torvalds if (nmask) 15011da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 15021da177e4SLinus Torvalds 15031da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 15041da177e4SLinus Torvalds 15051da177e4SLinus Torvalds if (!err && nmask) { 15062bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 15072bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 15082bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 15091da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 15101da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 15111da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 15121da177e4SLinus Torvalds } 15131da177e4SLinus Torvalds 15141da177e4SLinus Torvalds return err; 15151da177e4SLinus Torvalds } 15161da177e4SLinus Torvalds 1517c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1518c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 15191da177e4SLinus Torvalds { 15201da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15211da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15221da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15231da177e4SLinus Torvalds 15241da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15251da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15261da177e4SLinus Torvalds 15271da177e4SLinus Torvalds if (nmask) { 1528cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 15291da177e4SLinus Torvalds return -EFAULT; 1530cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1531cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1532cf01fb99SChris Salls return -EFAULT; 1533cf01fb99SChris Salls } 15341da177e4SLinus Torvalds 15351da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 15361da177e4SLinus Torvalds } 15371da177e4SLinus Torvalds 1538c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1539c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1540c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 15411da177e4SLinus Torvalds { 15421da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15431da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1544dfcd3c0dSAndi Kleen nodemask_t bm; 15451da177e4SLinus Torvalds 15461da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15471da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15481da177e4SLinus Torvalds 15491da177e4SLinus Torvalds if (nmask) { 1550cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 15511da177e4SLinus Torvalds return -EFAULT; 1552cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1553cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1554cf01fb99SChris Salls return -EFAULT; 1555cf01fb99SChris Salls } 15561da177e4SLinus Torvalds 15571da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 15581da177e4SLinus Torvalds } 15591da177e4SLinus Torvalds 15601da177e4SLinus Torvalds #endif 15611da177e4SLinus Torvalds 156274d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 156374d2c3a0SOleg Nesterov unsigned long addr) 15641da177e4SLinus Torvalds { 15658d90274bSOleg Nesterov struct mempolicy *pol = NULL; 15661da177e4SLinus Torvalds 15671da177e4SLinus Torvalds if (vma) { 1568480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 15698d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 157000442ad0SMel Gorman } else if (vma->vm_policy) { 15711da177e4SLinus Torvalds pol = vma->vm_policy; 157200442ad0SMel Gorman 157300442ad0SMel Gorman /* 157400442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 157500442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 157600442ad0SMel Gorman * count on these policies which will be dropped by 157700442ad0SMel Gorman * mpol_cond_put() later 157800442ad0SMel Gorman */ 157900442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 158000442ad0SMel Gorman mpol_get(pol); 158100442ad0SMel Gorman } 15821da177e4SLinus Torvalds } 1583f15ca78eSOleg Nesterov 158474d2c3a0SOleg Nesterov return pol; 158574d2c3a0SOleg Nesterov } 158674d2c3a0SOleg Nesterov 158774d2c3a0SOleg Nesterov /* 1588dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 158974d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 159074d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 159174d2c3a0SOleg Nesterov * 159274d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1593dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 159474d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 159574d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 159674d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 159774d2c3a0SOleg Nesterov * extra reference for shared policies. 159874d2c3a0SOleg Nesterov */ 1599dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1600dd6eecb9SOleg Nesterov unsigned long addr) 160174d2c3a0SOleg Nesterov { 160274d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 160374d2c3a0SOleg Nesterov 16048d90274bSOleg Nesterov if (!pol) 1605dd6eecb9SOleg Nesterov pol = get_task_policy(current); 16068d90274bSOleg Nesterov 16071da177e4SLinus Torvalds return pol; 16081da177e4SLinus Torvalds } 16091da177e4SLinus Torvalds 16106b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1611fc314724SMel Gorman { 16126b6482bbSOleg Nesterov struct mempolicy *pol; 1613f15ca78eSOleg Nesterov 1614fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1615fc314724SMel Gorman bool ret = false; 1616fc314724SMel Gorman 1617fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1618fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1619fc314724SMel Gorman ret = true; 1620fc314724SMel Gorman mpol_cond_put(pol); 1621fc314724SMel Gorman 1622fc314724SMel Gorman return ret; 16238d90274bSOleg Nesterov } 16248d90274bSOleg Nesterov 1625fc314724SMel Gorman pol = vma->vm_policy; 16268d90274bSOleg Nesterov if (!pol) 16276b6482bbSOleg Nesterov pol = get_task_policy(current); 1628fc314724SMel Gorman 1629fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1630fc314724SMel Gorman } 1631fc314724SMel Gorman 1632d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1633d3eb1570SLai Jiangshan { 1634d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1635d3eb1570SLai Jiangshan 1636d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1637d3eb1570SLai Jiangshan 1638d3eb1570SLai Jiangshan /* 1639d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1640d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1641d3eb1570SLai Jiangshan * 1642d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1643d3eb1570SLai Jiangshan * so if the following test faile, it implies 1644d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1645d3eb1570SLai Jiangshan */ 1646d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1647d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1648d3eb1570SLai Jiangshan 1649d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1650d3eb1570SLai Jiangshan } 1651d3eb1570SLai Jiangshan 165252cd3b07SLee Schermerhorn /* 165352cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 165452cd3b07SLee Schermerhorn * page allocation 165552cd3b07SLee Schermerhorn */ 165652cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 165719770b32SMel Gorman { 165819770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 165945c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1660d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 166119770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 166219770b32SMel Gorman return &policy->v.nodes; 166319770b32SMel Gorman 166419770b32SMel Gorman return NULL; 166519770b32SMel Gorman } 166619770b32SMel Gorman 166704ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */ 166804ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy, 16692f5f9486SAndi Kleen int nd) 16701da177e4SLinus Torvalds { 16716d840958SMichal Hocko if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) 16721da177e4SLinus Torvalds nd = policy->v.preferred_node; 16736d840958SMichal Hocko else { 167419770b32SMel Gorman /* 16756d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 16766d840958SMichal Hocko * because we might easily break the expectation to stay on the 16776d840958SMichal Hocko * requested node and not break the policy. 167819770b32SMel Gorman */ 16796d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 16801da177e4SLinus Torvalds } 16816d840958SMichal Hocko 168204ec6264SVlastimil Babka return nd; 16831da177e4SLinus Torvalds } 16841da177e4SLinus Torvalds 16851da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 16861da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 16871da177e4SLinus Torvalds { 168845816682SVlastimil Babka unsigned next; 16891da177e4SLinus Torvalds struct task_struct *me = current; 16901da177e4SLinus Torvalds 169145816682SVlastimil Babka next = next_node_in(me->il_prev, policy->v.nodes); 1692f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 169345816682SVlastimil Babka me->il_prev = next; 169445816682SVlastimil Babka return next; 16951da177e4SLinus Torvalds } 16961da177e4SLinus Torvalds 1697dc85da15SChristoph Lameter /* 1698dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1699dc85da15SChristoph Lameter * next slab entry. 1700dc85da15SChristoph Lameter */ 17012a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1702dc85da15SChristoph Lameter { 1703e7b691b0SAndi Kleen struct mempolicy *policy; 17042a389610SDavid Rientjes int node = numa_mem_id(); 1705e7b691b0SAndi Kleen 1706e7b691b0SAndi Kleen if (in_interrupt()) 17072a389610SDavid Rientjes return node; 1708e7b691b0SAndi Kleen 1709e7b691b0SAndi Kleen policy = current->mempolicy; 1710fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 17112a389610SDavid Rientjes return node; 1712765c4507SChristoph Lameter 1713bea904d5SLee Schermerhorn switch (policy->mode) { 1714bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1715fc36b8d3SLee Schermerhorn /* 1716fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1717fc36b8d3SLee Schermerhorn */ 1718bea904d5SLee Schermerhorn return policy->v.preferred_node; 1719bea904d5SLee Schermerhorn 1720dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1721dc85da15SChristoph Lameter return interleave_nodes(policy); 1722dc85da15SChristoph Lameter 1723dd1a239fSMel Gorman case MPOL_BIND: { 1724c33d6c06SMel Gorman struct zoneref *z; 1725c33d6c06SMel Gorman 1726dc85da15SChristoph Lameter /* 1727dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1728dc85da15SChristoph Lameter * first node. 1729dc85da15SChristoph Lameter */ 173019770b32SMel Gorman struct zonelist *zonelist; 173119770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1732c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1733c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1734c33d6c06SMel Gorman &policy->v.nodes); 1735c33d6c06SMel Gorman return z->zone ? z->zone->node : node; 1736dd1a239fSMel Gorman } 1737dc85da15SChristoph Lameter 1738dc85da15SChristoph Lameter default: 1739bea904d5SLee Schermerhorn BUG(); 1740dc85da15SChristoph Lameter } 1741dc85da15SChristoph Lameter } 1742dc85da15SChristoph Lameter 1743fee83b3aSAndrew Morton /* 1744fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1745fee83b3aSAndrew Morton * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1746fee83b3aSAndrew Morton * number of present nodes. 1747fee83b3aSAndrew Morton */ 1748*98c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 17491da177e4SLinus Torvalds { 1750dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1751f5b087b5SDavid Rientjes unsigned target; 1752fee83b3aSAndrew Morton int i; 1753fee83b3aSAndrew Morton int nid; 17541da177e4SLinus Torvalds 1755f5b087b5SDavid Rientjes if (!nnodes) 1756f5b087b5SDavid Rientjes return numa_node_id(); 1757fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1758fee83b3aSAndrew Morton nid = first_node(pol->v.nodes); 1759fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1760dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 17611da177e4SLinus Torvalds return nid; 17621da177e4SLinus Torvalds } 17631da177e4SLinus Torvalds 17645da7ca86SChristoph Lameter /* Determine a node number for interleave */ 17655da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 17665da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 17675da7ca86SChristoph Lameter { 17685da7ca86SChristoph Lameter if (vma) { 17695da7ca86SChristoph Lameter unsigned long off; 17705da7ca86SChristoph Lameter 17713b98b087SNishanth Aravamudan /* 17723b98b087SNishanth Aravamudan * for small pages, there is no difference between 17733b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 17743b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 17753b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 17763b98b087SNishanth Aravamudan * a useful offset. 17773b98b087SNishanth Aravamudan */ 17783b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 17793b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 17805da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 1781*98c70baaSLaurent Dufour return offset_il_node(pol, off); 17825da7ca86SChristoph Lameter } else 17835da7ca86SChristoph Lameter return interleave_nodes(pol); 17845da7ca86SChristoph Lameter } 17855da7ca86SChristoph Lameter 178600ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1787480eccf9SLee Schermerhorn /* 178804ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 1789b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1790b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1791b46e14acSFabian Frederick * @gfp_flags: for requested zone 1792b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1793b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1794480eccf9SLee Schermerhorn * 179504ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 179652cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 179752cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 179852cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1799c0ff7453SMiao Xie * 1800d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 1801480eccf9SLee Schermerhorn */ 180204ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 180304ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 18045da7ca86SChristoph Lameter { 180504ec6264SVlastimil Babka int nid; 18065da7ca86SChristoph Lameter 1807dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 180819770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 18095da7ca86SChristoph Lameter 181052cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 181104ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 181204ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 181352cd3b07SLee Schermerhorn } else { 181404ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 181552cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 181652cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1817480eccf9SLee Schermerhorn } 181804ec6264SVlastimil Babka return nid; 18195da7ca86SChristoph Lameter } 182006808b08SLee Schermerhorn 182106808b08SLee Schermerhorn /* 182206808b08SLee Schermerhorn * init_nodemask_of_mempolicy 182306808b08SLee Schermerhorn * 182406808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 182506808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 182606808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 182706808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 182806808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 182906808b08SLee Schermerhorn * of non-default mempolicy. 183006808b08SLee Schermerhorn * 183106808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 183206808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 183306808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 183406808b08SLee Schermerhorn * 183506808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 183606808b08SLee Schermerhorn */ 183706808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 183806808b08SLee Schermerhorn { 183906808b08SLee Schermerhorn struct mempolicy *mempolicy; 184006808b08SLee Schermerhorn int nid; 184106808b08SLee Schermerhorn 184206808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 184306808b08SLee Schermerhorn return false; 184406808b08SLee Schermerhorn 1845c0ff7453SMiao Xie task_lock(current); 184606808b08SLee Schermerhorn mempolicy = current->mempolicy; 184706808b08SLee Schermerhorn switch (mempolicy->mode) { 184806808b08SLee Schermerhorn case MPOL_PREFERRED: 184906808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 185006808b08SLee Schermerhorn nid = numa_node_id(); 185106808b08SLee Schermerhorn else 185206808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 185306808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 185406808b08SLee Schermerhorn break; 185506808b08SLee Schermerhorn 185606808b08SLee Schermerhorn case MPOL_BIND: 185706808b08SLee Schermerhorn /* Fall through */ 185806808b08SLee Schermerhorn case MPOL_INTERLEAVE: 185906808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 186006808b08SLee Schermerhorn break; 186106808b08SLee Schermerhorn 186206808b08SLee Schermerhorn default: 186306808b08SLee Schermerhorn BUG(); 186406808b08SLee Schermerhorn } 1865c0ff7453SMiao Xie task_unlock(current); 186606808b08SLee Schermerhorn 186706808b08SLee Schermerhorn return true; 186806808b08SLee Schermerhorn } 186900ac59adSChen, Kenneth W #endif 18705da7ca86SChristoph Lameter 18716f48d0ebSDavid Rientjes /* 18726f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 18736f48d0ebSDavid Rientjes * 18746f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 18756f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 18766f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 18776f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 18786f48d0ebSDavid Rientjes * 18796f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 18806f48d0ebSDavid Rientjes */ 18816f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 18826f48d0ebSDavid Rientjes const nodemask_t *mask) 18836f48d0ebSDavid Rientjes { 18846f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 18856f48d0ebSDavid Rientjes bool ret = true; 18866f48d0ebSDavid Rientjes 18876f48d0ebSDavid Rientjes if (!mask) 18886f48d0ebSDavid Rientjes return ret; 18896f48d0ebSDavid Rientjes task_lock(tsk); 18906f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 18916f48d0ebSDavid Rientjes if (!mempolicy) 18926f48d0ebSDavid Rientjes goto out; 18936f48d0ebSDavid Rientjes 18946f48d0ebSDavid Rientjes switch (mempolicy->mode) { 18956f48d0ebSDavid Rientjes case MPOL_PREFERRED: 18966f48d0ebSDavid Rientjes /* 18976f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 18986f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 18996f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 19006f48d0ebSDavid Rientjes * nodes in mask. 19016f48d0ebSDavid Rientjes */ 19026f48d0ebSDavid Rientjes break; 19036f48d0ebSDavid Rientjes case MPOL_BIND: 19046f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 19056f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 19066f48d0ebSDavid Rientjes break; 19076f48d0ebSDavid Rientjes default: 19086f48d0ebSDavid Rientjes BUG(); 19096f48d0ebSDavid Rientjes } 19106f48d0ebSDavid Rientjes out: 19116f48d0ebSDavid Rientjes task_unlock(tsk); 19126f48d0ebSDavid Rientjes return ret; 19136f48d0ebSDavid Rientjes } 19146f48d0ebSDavid Rientjes 19151da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 19161da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1917662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1918662f3a0bSAndi Kleen unsigned nid) 19191da177e4SLinus Torvalds { 19201da177e4SLinus Torvalds struct page *page; 19211da177e4SLinus Torvalds 192204ec6264SVlastimil Babka page = __alloc_pages(gfp, order, nid); 192304ec6264SVlastimil Babka if (page && page_to_nid(page) == nid) 1924ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 19251da177e4SLinus Torvalds return page; 19261da177e4SLinus Torvalds } 19271da177e4SLinus Torvalds 19281da177e4SLinus Torvalds /** 19290bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 19301da177e4SLinus Torvalds * 19311da177e4SLinus Torvalds * @gfp: 19321da177e4SLinus Torvalds * %GFP_USER user allocation. 19331da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 19341da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 19351da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 19361da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 19371da177e4SLinus Torvalds * 19380bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 19391da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 19401da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 1941be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 1942be97a41bSVlastimil Babka * @hugepage: for hugepages try only the preferred node if possible 19431da177e4SLinus Torvalds * 19441da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 19451da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 19461da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 19471da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 1948be97a41bSVlastimil Babka * all allocations for pages that will be mapped into user space. Returns 1949be97a41bSVlastimil Babka * NULL when no page can be allocated. 19501da177e4SLinus Torvalds */ 19511da177e4SLinus Torvalds struct page * 19520bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1953be97a41bSVlastimil Babka unsigned long addr, int node, bool hugepage) 19541da177e4SLinus Torvalds { 1955cc9a6c87SMel Gorman struct mempolicy *pol; 1956c0ff7453SMiao Xie struct page *page; 195704ec6264SVlastimil Babka int preferred_nid; 1958be97a41bSVlastimil Babka nodemask_t *nmask; 19591da177e4SLinus Torvalds 1960dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 1961cc9a6c87SMel Gorman 1962be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 19631da177e4SLinus Torvalds unsigned nid; 19645da7ca86SChristoph Lameter 19658eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 196652cd3b07SLee Schermerhorn mpol_cond_put(pol); 19670bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 1968be97a41bSVlastimil Babka goto out; 19691da177e4SLinus Torvalds } 19701da177e4SLinus Torvalds 19710867a57cSVlastimil Babka if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 19720867a57cSVlastimil Babka int hpage_node = node; 19730867a57cSVlastimil Babka 19740867a57cSVlastimil Babka /* 19750867a57cSVlastimil Babka * For hugepage allocation and non-interleave policy which 19760867a57cSVlastimil Babka * allows the current node (or other explicitly preferred 19770867a57cSVlastimil Babka * node) we only try to allocate from the current/preferred 19780867a57cSVlastimil Babka * node and don't fall back to other nodes, as the cost of 19790867a57cSVlastimil Babka * remote accesses would likely offset THP benefits. 19800867a57cSVlastimil Babka * 19810867a57cSVlastimil Babka * If the policy is interleave, or does not allow the current 19820867a57cSVlastimil Babka * node in its nodemask, we allocate the standard way. 19830867a57cSVlastimil Babka */ 19840867a57cSVlastimil Babka if (pol->mode == MPOL_PREFERRED && 19850867a57cSVlastimil Babka !(pol->flags & MPOL_F_LOCAL)) 19860867a57cSVlastimil Babka hpage_node = pol->v.preferred_node; 19870867a57cSVlastimil Babka 19880867a57cSVlastimil Babka nmask = policy_nodemask(gfp, pol); 19890867a57cSVlastimil Babka if (!nmask || node_isset(hpage_node, *nmask)) { 19900867a57cSVlastimil Babka mpol_cond_put(pol); 199196db800fSVlastimil Babka page = __alloc_pages_node(hpage_node, 19920867a57cSVlastimil Babka gfp | __GFP_THISNODE, order); 19930867a57cSVlastimil Babka goto out; 19940867a57cSVlastimil Babka } 19950867a57cSVlastimil Babka } 19960867a57cSVlastimil Babka 1997077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 199804ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 199904ec6264SVlastimil Babka page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); 2000d51e9894SVlastimil Babka mpol_cond_put(pol); 2001be97a41bSVlastimil Babka out: 2002077fcf11SAneesh Kumar K.V return page; 2003077fcf11SAneesh Kumar K.V } 2004077fcf11SAneesh Kumar K.V 20051da177e4SLinus Torvalds /** 20061da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 20071da177e4SLinus Torvalds * 20081da177e4SLinus Torvalds * @gfp: 20091da177e4SLinus Torvalds * %GFP_USER user allocation, 20101da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 20111da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 20121da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 20131da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 20141da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 20151da177e4SLinus Torvalds * 20161da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 20171da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 20181da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 20191da177e4SLinus Torvalds */ 2020dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 20211da177e4SLinus Torvalds { 20228d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2023c0ff7453SMiao Xie struct page *page; 20241da177e4SLinus Torvalds 20258d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 20268d90274bSOleg Nesterov pol = get_task_policy(current); 202752cd3b07SLee Schermerhorn 202852cd3b07SLee Schermerhorn /* 202952cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 203052cd3b07SLee Schermerhorn * nor system default_policy 203152cd3b07SLee Schermerhorn */ 203245c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2033c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2034c0ff7453SMiao Xie else 2035c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 203604ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 20375c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2038cc9a6c87SMel Gorman 2039c0ff7453SMiao Xie return page; 20401da177e4SLinus Torvalds } 20411da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 20421da177e4SLinus Torvalds 2043ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2044ef0855d3SOleg Nesterov { 2045ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2046ef0855d3SOleg Nesterov 2047ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2048ef0855d3SOleg Nesterov return PTR_ERR(pol); 2049ef0855d3SOleg Nesterov dst->vm_policy = pol; 2050ef0855d3SOleg Nesterov return 0; 2051ef0855d3SOleg Nesterov } 2052ef0855d3SOleg Nesterov 20534225399aSPaul Jackson /* 2054846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 20554225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 20564225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 20574225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 20584225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2059708c1bbcSMiao Xie * 2060708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2061708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 20624225399aSPaul Jackson */ 20634225399aSPaul Jackson 2064846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2065846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 20661da177e4SLinus Torvalds { 20671da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 20681da177e4SLinus Torvalds 20691da177e4SLinus Torvalds if (!new) 20701da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2071708c1bbcSMiao Xie 2072708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2073708c1bbcSMiao Xie if (old == current->mempolicy) { 2074708c1bbcSMiao Xie task_lock(current); 2075708c1bbcSMiao Xie *new = *old; 2076708c1bbcSMiao Xie task_unlock(current); 2077708c1bbcSMiao Xie } else 2078708c1bbcSMiao Xie *new = *old; 2079708c1bbcSMiao Xie 20804225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 20814225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2082213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 20834225399aSPaul Jackson } 20841da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 20851da177e4SLinus Torvalds return new; 20861da177e4SLinus Torvalds } 20871da177e4SLinus Torvalds 20881da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2089fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 20901da177e4SLinus Torvalds { 20911da177e4SLinus Torvalds if (!a || !b) 2092fcfb4dccSKOSAKI Motohiro return false; 209345c4745aSLee Schermerhorn if (a->mode != b->mode) 2094fcfb4dccSKOSAKI Motohiro return false; 209519800502SBob Liu if (a->flags != b->flags) 2096fcfb4dccSKOSAKI Motohiro return false; 209719800502SBob Liu if (mpol_store_user_nodemask(a)) 209819800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2099fcfb4dccSKOSAKI Motohiro return false; 210019800502SBob Liu 210145c4745aSLee Schermerhorn switch (a->mode) { 210219770b32SMel Gorman case MPOL_BIND: 210319770b32SMel Gorman /* Fall through */ 21041da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2105fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 21061da177e4SLinus Torvalds case MPOL_PREFERRED: 210775719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 21081da177e4SLinus Torvalds default: 21091da177e4SLinus Torvalds BUG(); 2110fcfb4dccSKOSAKI Motohiro return false; 21111da177e4SLinus Torvalds } 21121da177e4SLinus Torvalds } 21131da177e4SLinus Torvalds 21141da177e4SLinus Torvalds /* 21151da177e4SLinus Torvalds * Shared memory backing store policy support. 21161da177e4SLinus Torvalds * 21171da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 21181da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 21194a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 21201da177e4SLinus Torvalds * for any accesses to the tree. 21211da177e4SLinus Torvalds */ 21221da177e4SLinus Torvalds 21234a8c7bb5SNathan Zimmer /* 21244a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 21254a8c7bb5SNathan Zimmer * reading or for writing 21264a8c7bb5SNathan Zimmer */ 21271da177e4SLinus Torvalds static struct sp_node * 21281da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 21291da177e4SLinus Torvalds { 21301da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 21311da177e4SLinus Torvalds 21321da177e4SLinus Torvalds while (n) { 21331da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 21341da177e4SLinus Torvalds 21351da177e4SLinus Torvalds if (start >= p->end) 21361da177e4SLinus Torvalds n = n->rb_right; 21371da177e4SLinus Torvalds else if (end <= p->start) 21381da177e4SLinus Torvalds n = n->rb_left; 21391da177e4SLinus Torvalds else 21401da177e4SLinus Torvalds break; 21411da177e4SLinus Torvalds } 21421da177e4SLinus Torvalds if (!n) 21431da177e4SLinus Torvalds return NULL; 21441da177e4SLinus Torvalds for (;;) { 21451da177e4SLinus Torvalds struct sp_node *w = NULL; 21461da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 21471da177e4SLinus Torvalds if (!prev) 21481da177e4SLinus Torvalds break; 21491da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 21501da177e4SLinus Torvalds if (w->end <= start) 21511da177e4SLinus Torvalds break; 21521da177e4SLinus Torvalds n = prev; 21531da177e4SLinus Torvalds } 21541da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 21551da177e4SLinus Torvalds } 21561da177e4SLinus Torvalds 21574a8c7bb5SNathan Zimmer /* 21584a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 21594a8c7bb5SNathan Zimmer * writing. 21604a8c7bb5SNathan Zimmer */ 21611da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 21621da177e4SLinus Torvalds { 21631da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 21641da177e4SLinus Torvalds struct rb_node *parent = NULL; 21651da177e4SLinus Torvalds struct sp_node *nd; 21661da177e4SLinus Torvalds 21671da177e4SLinus Torvalds while (*p) { 21681da177e4SLinus Torvalds parent = *p; 21691da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 21701da177e4SLinus Torvalds if (new->start < nd->start) 21711da177e4SLinus Torvalds p = &(*p)->rb_left; 21721da177e4SLinus Torvalds else if (new->end > nd->end) 21731da177e4SLinus Torvalds p = &(*p)->rb_right; 21741da177e4SLinus Torvalds else 21751da177e4SLinus Torvalds BUG(); 21761da177e4SLinus Torvalds } 21771da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 21781da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2179140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 218045c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 21811da177e4SLinus Torvalds } 21821da177e4SLinus Torvalds 21831da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 21841da177e4SLinus Torvalds struct mempolicy * 21851da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 21861da177e4SLinus Torvalds { 21871da177e4SLinus Torvalds struct mempolicy *pol = NULL; 21881da177e4SLinus Torvalds struct sp_node *sn; 21891da177e4SLinus Torvalds 21901da177e4SLinus Torvalds if (!sp->root.rb_node) 21911da177e4SLinus Torvalds return NULL; 21924a8c7bb5SNathan Zimmer read_lock(&sp->lock); 21931da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 21941da177e4SLinus Torvalds if (sn) { 21951da177e4SLinus Torvalds mpol_get(sn->policy); 21961da177e4SLinus Torvalds pol = sn->policy; 21971da177e4SLinus Torvalds } 21984a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 21991da177e4SLinus Torvalds return pol; 22001da177e4SLinus Torvalds } 22011da177e4SLinus Torvalds 220263f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 220363f74ca2SKOSAKI Motohiro { 220463f74ca2SKOSAKI Motohiro mpol_put(n->policy); 220563f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 220663f74ca2SKOSAKI Motohiro } 220763f74ca2SKOSAKI Motohiro 2208771fb4d8SLee Schermerhorn /** 2209771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2210771fb4d8SLee Schermerhorn * 2211b46e14acSFabian Frederick * @page: page to be checked 2212b46e14acSFabian Frederick * @vma: vm area where page mapped 2213b46e14acSFabian Frederick * @addr: virtual address where page mapped 2214771fb4d8SLee Schermerhorn * 2215771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2216771fb4d8SLee Schermerhorn * node id. 2217771fb4d8SLee Schermerhorn * 2218771fb4d8SLee Schermerhorn * Returns: 2219771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2220771fb4d8SLee Schermerhorn * node - node id where the page should be 2221771fb4d8SLee Schermerhorn * 2222771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2223771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2224771fb4d8SLee Schermerhorn */ 2225771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2226771fb4d8SLee Schermerhorn { 2227771fb4d8SLee Schermerhorn struct mempolicy *pol; 2228c33d6c06SMel Gorman struct zoneref *z; 2229771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2230771fb4d8SLee Schermerhorn unsigned long pgoff; 223190572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 223290572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 2233771fb4d8SLee Schermerhorn int polnid = -1; 2234771fb4d8SLee Schermerhorn int ret = -1; 2235771fb4d8SLee Schermerhorn 2236771fb4d8SLee Schermerhorn BUG_ON(!vma); 2237771fb4d8SLee Schermerhorn 2238dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2239771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2240771fb4d8SLee Schermerhorn goto out; 2241771fb4d8SLee Schermerhorn 2242771fb4d8SLee Schermerhorn switch (pol->mode) { 2243771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2244771fb4d8SLee Schermerhorn BUG_ON(addr >= vma->vm_end); 2245771fb4d8SLee Schermerhorn BUG_ON(addr < vma->vm_start); 2246771fb4d8SLee Schermerhorn 2247771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2248771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2249*98c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2250771fb4d8SLee Schermerhorn break; 2251771fb4d8SLee Schermerhorn 2252771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2253771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2254771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2255771fb4d8SLee Schermerhorn else 2256771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2257771fb4d8SLee Schermerhorn break; 2258771fb4d8SLee Schermerhorn 2259771fb4d8SLee Schermerhorn case MPOL_BIND: 2260c33d6c06SMel Gorman 2261771fb4d8SLee Schermerhorn /* 2262771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2263771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2264771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2265771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2266771fb4d8SLee Schermerhorn */ 2267771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2268771fb4d8SLee Schermerhorn goto out; 2269c33d6c06SMel Gorman z = first_zones_zonelist( 2270771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2271771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2272c33d6c06SMel Gorman &pol->v.nodes); 2273c33d6c06SMel Gorman polnid = z->zone->node; 2274771fb4d8SLee Schermerhorn break; 2275771fb4d8SLee Schermerhorn 2276771fb4d8SLee Schermerhorn default: 2277771fb4d8SLee Schermerhorn BUG(); 2278771fb4d8SLee Schermerhorn } 22795606e387SMel Gorman 22805606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2281e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 228290572890SPeter Zijlstra polnid = thisnid; 22835606e387SMel Gorman 228410f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2285de1c9ce6SRik van Riel goto out; 2286de1c9ce6SRik van Riel } 2287e42c8ff2SMel Gorman 2288771fb4d8SLee Schermerhorn if (curnid != polnid) 2289771fb4d8SLee Schermerhorn ret = polnid; 2290771fb4d8SLee Schermerhorn out: 2291771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2292771fb4d8SLee Schermerhorn 2293771fb4d8SLee Schermerhorn return ret; 2294771fb4d8SLee Schermerhorn } 2295771fb4d8SLee Schermerhorn 2296c11600e4SDavid Rientjes /* 2297c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2298c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2299c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2300c11600e4SDavid Rientjes * policy. 2301c11600e4SDavid Rientjes */ 2302c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2303c11600e4SDavid Rientjes { 2304c11600e4SDavid Rientjes struct mempolicy *pol; 2305c11600e4SDavid Rientjes 2306c11600e4SDavid Rientjes task_lock(task); 2307c11600e4SDavid Rientjes pol = task->mempolicy; 2308c11600e4SDavid Rientjes task->mempolicy = NULL; 2309c11600e4SDavid Rientjes task_unlock(task); 2310c11600e4SDavid Rientjes mpol_put(pol); 2311c11600e4SDavid Rientjes } 2312c11600e4SDavid Rientjes 23131da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 23141da177e4SLinus Torvalds { 2315140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 23161da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 231763f74ca2SKOSAKI Motohiro sp_free(n); 23181da177e4SLinus Torvalds } 23191da177e4SLinus Torvalds 232042288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 232142288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 232242288fe3SMel Gorman { 232342288fe3SMel Gorman node->start = start; 232442288fe3SMel Gorman node->end = end; 232542288fe3SMel Gorman node->policy = pol; 232642288fe3SMel Gorman } 232742288fe3SMel Gorman 2328dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2329dbcb0f19SAdrian Bunk struct mempolicy *pol) 23301da177e4SLinus Torvalds { 2331869833f2SKOSAKI Motohiro struct sp_node *n; 2332869833f2SKOSAKI Motohiro struct mempolicy *newpol; 23331da177e4SLinus Torvalds 2334869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 23351da177e4SLinus Torvalds if (!n) 23361da177e4SLinus Torvalds return NULL; 2337869833f2SKOSAKI Motohiro 2338869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2339869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2340869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2341869833f2SKOSAKI Motohiro return NULL; 2342869833f2SKOSAKI Motohiro } 2343869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 234442288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2345869833f2SKOSAKI Motohiro 23461da177e4SLinus Torvalds return n; 23471da177e4SLinus Torvalds } 23481da177e4SLinus Torvalds 23491da177e4SLinus Torvalds /* Replace a policy range. */ 23501da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 23511da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 23521da177e4SLinus Torvalds { 2353b22d127aSMel Gorman struct sp_node *n; 235442288fe3SMel Gorman struct sp_node *n_new = NULL; 235542288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2356b22d127aSMel Gorman int ret = 0; 23571da177e4SLinus Torvalds 235842288fe3SMel Gorman restart: 23594a8c7bb5SNathan Zimmer write_lock(&sp->lock); 23601da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 23611da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 23621da177e4SLinus Torvalds while (n && n->start < end) { 23631da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 23641da177e4SLinus Torvalds if (n->start >= start) { 23651da177e4SLinus Torvalds if (n->end <= end) 23661da177e4SLinus Torvalds sp_delete(sp, n); 23671da177e4SLinus Torvalds else 23681da177e4SLinus Torvalds n->start = end; 23691da177e4SLinus Torvalds } else { 23701da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 23711da177e4SLinus Torvalds if (n->end > end) { 237242288fe3SMel Gorman if (!n_new) 237342288fe3SMel Gorman goto alloc_new; 237442288fe3SMel Gorman 237542288fe3SMel Gorman *mpol_new = *n->policy; 237642288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 23777880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 23781da177e4SLinus Torvalds n->end = start; 23795ca39575SHillf Danton sp_insert(sp, n_new); 238042288fe3SMel Gorman n_new = NULL; 238142288fe3SMel Gorman mpol_new = NULL; 23821da177e4SLinus Torvalds break; 23831da177e4SLinus Torvalds } else 23841da177e4SLinus Torvalds n->end = start; 23851da177e4SLinus Torvalds } 23861da177e4SLinus Torvalds if (!next) 23871da177e4SLinus Torvalds break; 23881da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 23891da177e4SLinus Torvalds } 23901da177e4SLinus Torvalds if (new) 23911da177e4SLinus Torvalds sp_insert(sp, new); 23924a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 239342288fe3SMel Gorman ret = 0; 239442288fe3SMel Gorman 239542288fe3SMel Gorman err_out: 239642288fe3SMel Gorman if (mpol_new) 239742288fe3SMel Gorman mpol_put(mpol_new); 239842288fe3SMel Gorman if (n_new) 239942288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 240042288fe3SMel Gorman 2401b22d127aSMel Gorman return ret; 240242288fe3SMel Gorman 240342288fe3SMel Gorman alloc_new: 24044a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 240542288fe3SMel Gorman ret = -ENOMEM; 240642288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 240742288fe3SMel Gorman if (!n_new) 240842288fe3SMel Gorman goto err_out; 240942288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 241042288fe3SMel Gorman if (!mpol_new) 241142288fe3SMel Gorman goto err_out; 241242288fe3SMel Gorman goto restart; 24131da177e4SLinus Torvalds } 24141da177e4SLinus Torvalds 241571fe804bSLee Schermerhorn /** 241671fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 241771fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 241871fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 241971fe804bSLee Schermerhorn * 242071fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 242171fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 242271fe804bSLee Schermerhorn * This must be released on exit. 24234bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 242471fe804bSLee Schermerhorn */ 242571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 24267339ff83SRobin Holt { 242758568d2aSMiao Xie int ret; 242858568d2aSMiao Xie 242971fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 24304a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 24317339ff83SRobin Holt 243271fe804bSLee Schermerhorn if (mpol) { 24337339ff83SRobin Holt struct vm_area_struct pvma; 243471fe804bSLee Schermerhorn struct mempolicy *new; 24354bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 24367339ff83SRobin Holt 24374bfc4495SKAMEZAWA Hiroyuki if (!scratch) 24385c0c1654SLee Schermerhorn goto put_mpol; 243971fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 244071fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 244115d77835SLee Schermerhorn if (IS_ERR(new)) 24420cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 244358568d2aSMiao Xie 244458568d2aSMiao Xie task_lock(current); 24454bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 244658568d2aSMiao Xie task_unlock(current); 244715d77835SLee Schermerhorn if (ret) 24485c0c1654SLee Schermerhorn goto put_new; 244971fe804bSLee Schermerhorn 245071fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 24517339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 245271fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 245371fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 245415d77835SLee Schermerhorn 24555c0c1654SLee Schermerhorn put_new: 245671fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 24570cae3457SDan Carpenter free_scratch: 24584bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 24595c0c1654SLee Schermerhorn put_mpol: 24605c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 24617339ff83SRobin Holt } 24627339ff83SRobin Holt } 24637339ff83SRobin Holt 24641da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 24651da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 24661da177e4SLinus Torvalds { 24671da177e4SLinus Torvalds int err; 24681da177e4SLinus Torvalds struct sp_node *new = NULL; 24691da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 24701da177e4SLinus Torvalds 2471028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 24721da177e4SLinus Torvalds vma->vm_pgoff, 247345c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2474028fec41SDavid Rientjes npol ? npol->flags : -1, 247500ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 24761da177e4SLinus Torvalds 24771da177e4SLinus Torvalds if (npol) { 24781da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 24791da177e4SLinus Torvalds if (!new) 24801da177e4SLinus Torvalds return -ENOMEM; 24811da177e4SLinus Torvalds } 24821da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 24831da177e4SLinus Torvalds if (err && new) 248463f74ca2SKOSAKI Motohiro sp_free(new); 24851da177e4SLinus Torvalds return err; 24861da177e4SLinus Torvalds } 24871da177e4SLinus Torvalds 24881da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 24891da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 24901da177e4SLinus Torvalds { 24911da177e4SLinus Torvalds struct sp_node *n; 24921da177e4SLinus Torvalds struct rb_node *next; 24931da177e4SLinus Torvalds 24941da177e4SLinus Torvalds if (!p->root.rb_node) 24951da177e4SLinus Torvalds return; 24964a8c7bb5SNathan Zimmer write_lock(&p->lock); 24971da177e4SLinus Torvalds next = rb_first(&p->root); 24981da177e4SLinus Torvalds while (next) { 24991da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25001da177e4SLinus Torvalds next = rb_next(&n->nd); 250163f74ca2SKOSAKI Motohiro sp_delete(p, n); 25021da177e4SLinus Torvalds } 25034a8c7bb5SNathan Zimmer write_unlock(&p->lock); 25041da177e4SLinus Torvalds } 25051da177e4SLinus Torvalds 25061a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2507c297663cSMel Gorman static int __initdata numabalancing_override; 25081a687c2eSMel Gorman 25091a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 25101a687c2eSMel Gorman { 25111a687c2eSMel Gorman bool numabalancing_default = false; 25121a687c2eSMel Gorman 25131a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 25141a687c2eSMel Gorman numabalancing_default = true; 25151a687c2eSMel Gorman 2516c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2517c297663cSMel Gorman if (numabalancing_override) 2518c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2519c297663cSMel Gorman 2520b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2521756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2522c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 25231a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 25241a687c2eSMel Gorman } 25251a687c2eSMel Gorman } 25261a687c2eSMel Gorman 25271a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 25281a687c2eSMel Gorman { 25291a687c2eSMel Gorman int ret = 0; 25301a687c2eSMel Gorman if (!str) 25311a687c2eSMel Gorman goto out; 25321a687c2eSMel Gorman 25331a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2534c297663cSMel Gorman numabalancing_override = 1; 25351a687c2eSMel Gorman ret = 1; 25361a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2537c297663cSMel Gorman numabalancing_override = -1; 25381a687c2eSMel Gorman ret = 1; 25391a687c2eSMel Gorman } 25401a687c2eSMel Gorman out: 25411a687c2eSMel Gorman if (!ret) 25424a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 25431a687c2eSMel Gorman 25441a687c2eSMel Gorman return ret; 25451a687c2eSMel Gorman } 25461a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 25471a687c2eSMel Gorman #else 25481a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 25491a687c2eSMel Gorman { 25501a687c2eSMel Gorman } 25511a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 25521a687c2eSMel Gorman 25531da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 25541da177e4SLinus Torvalds void __init numa_policy_init(void) 25551da177e4SLinus Torvalds { 2556b71636e2SPaul Mundt nodemask_t interleave_nodes; 2557b71636e2SPaul Mundt unsigned long largest = 0; 2558b71636e2SPaul Mundt int nid, prefer = 0; 2559b71636e2SPaul Mundt 25601da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 25611da177e4SLinus Torvalds sizeof(struct mempolicy), 256220c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 25631da177e4SLinus Torvalds 25641da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 25651da177e4SLinus Torvalds sizeof(struct sp_node), 256620c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 25671da177e4SLinus Torvalds 25685606e387SMel Gorman for_each_node(nid) { 25695606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 25705606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 25715606e387SMel Gorman .mode = MPOL_PREFERRED, 25725606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 25735606e387SMel Gorman .v = { .preferred_node = nid, }, 25745606e387SMel Gorman }; 25755606e387SMel Gorman } 25765606e387SMel Gorman 2577b71636e2SPaul Mundt /* 2578b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2579b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2580b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2581b71636e2SPaul Mundt */ 2582b71636e2SPaul Mundt nodes_clear(interleave_nodes); 258301f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2584b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 25851da177e4SLinus Torvalds 2586b71636e2SPaul Mundt /* Preserve the largest node */ 2587b71636e2SPaul Mundt if (largest < total_pages) { 2588b71636e2SPaul Mundt largest = total_pages; 2589b71636e2SPaul Mundt prefer = nid; 2590b71636e2SPaul Mundt } 2591b71636e2SPaul Mundt 2592b71636e2SPaul Mundt /* Interleave this node? */ 2593b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2594b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2595b71636e2SPaul Mundt } 2596b71636e2SPaul Mundt 2597b71636e2SPaul Mundt /* All too small, use the largest */ 2598b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2599b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2600b71636e2SPaul Mundt 2601028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2602b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 26031a687c2eSMel Gorman 26041a687c2eSMel Gorman check_numabalancing_enable(); 26051da177e4SLinus Torvalds } 26061da177e4SLinus Torvalds 26078bccd85fSChristoph Lameter /* Reset policy of current process to default */ 26081da177e4SLinus Torvalds void numa_default_policy(void) 26091da177e4SLinus Torvalds { 2610028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 26111da177e4SLinus Torvalds } 261268860ec1SPaul Jackson 26134225399aSPaul Jackson /* 2614095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2615095f1fc4SLee Schermerhorn */ 2616095f1fc4SLee Schermerhorn 2617095f1fc4SLee Schermerhorn /* 2618f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 26191a75a6c8SChristoph Lameter */ 2620345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2621345ace9cSLee Schermerhorn { 2622345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2623345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2624345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2625345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2626d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2627345ace9cSLee Schermerhorn }; 26281a75a6c8SChristoph Lameter 2629095f1fc4SLee Schermerhorn 2630095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2631095f1fc4SLee Schermerhorn /** 2632f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2633095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 263471fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2635095f1fc4SLee Schermerhorn * 2636095f1fc4SLee Schermerhorn * Format of input: 2637095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2638095f1fc4SLee Schermerhorn * 263971fe804bSLee Schermerhorn * On success, returns 0, else 1 2640095f1fc4SLee Schermerhorn */ 2641a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2642095f1fc4SLee Schermerhorn { 264371fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2644b4652e84SLee Schermerhorn unsigned short mode; 2645f2a07f40SHugh Dickins unsigned short mode_flags; 264671fe804bSLee Schermerhorn nodemask_t nodes; 2647095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2648095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2649095f1fc4SLee Schermerhorn int err = 1; 2650095f1fc4SLee Schermerhorn 2651095f1fc4SLee Schermerhorn if (nodelist) { 2652095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2653095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 265471fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2655095f1fc4SLee Schermerhorn goto out; 265601f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2657095f1fc4SLee Schermerhorn goto out; 265871fe804bSLee Schermerhorn } else 265971fe804bSLee Schermerhorn nodes_clear(nodes); 266071fe804bSLee Schermerhorn 2661095f1fc4SLee Schermerhorn if (flags) 2662095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2663095f1fc4SLee Schermerhorn 2664479e2802SPeter Zijlstra for (mode = 0; mode < MPOL_MAX; mode++) { 2665345ace9cSLee Schermerhorn if (!strcmp(str, policy_modes[mode])) { 2666095f1fc4SLee Schermerhorn break; 2667095f1fc4SLee Schermerhorn } 2668095f1fc4SLee Schermerhorn } 2669a720094dSMel Gorman if (mode >= MPOL_MAX) 2670095f1fc4SLee Schermerhorn goto out; 2671095f1fc4SLee Schermerhorn 267271fe804bSLee Schermerhorn switch (mode) { 2673095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 267471fe804bSLee Schermerhorn /* 267571fe804bSLee Schermerhorn * Insist on a nodelist of one node only 267671fe804bSLee Schermerhorn */ 2677095f1fc4SLee Schermerhorn if (nodelist) { 2678095f1fc4SLee Schermerhorn char *rest = nodelist; 2679095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2680095f1fc4SLee Schermerhorn rest++; 2681926f2ae0SKOSAKI Motohiro if (*rest) 2682926f2ae0SKOSAKI Motohiro goto out; 2683095f1fc4SLee Schermerhorn } 2684095f1fc4SLee Schermerhorn break; 2685095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2686095f1fc4SLee Schermerhorn /* 2687095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2688095f1fc4SLee Schermerhorn */ 2689095f1fc4SLee Schermerhorn if (!nodelist) 269001f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 26913f226aa1SLee Schermerhorn break; 269271fe804bSLee Schermerhorn case MPOL_LOCAL: 26933f226aa1SLee Schermerhorn /* 269471fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 26953f226aa1SLee Schermerhorn */ 269671fe804bSLee Schermerhorn if (nodelist) 26973f226aa1SLee Schermerhorn goto out; 269871fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 26993f226aa1SLee Schermerhorn break; 2700413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2701413b43deSRavikiran G Thirumalai /* 2702413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2703413b43deSRavikiran G Thirumalai */ 2704413b43deSRavikiran G Thirumalai if (!nodelist) 2705413b43deSRavikiran G Thirumalai err = 0; 2706413b43deSRavikiran G Thirumalai goto out; 2707d69b2e63SKOSAKI Motohiro case MPOL_BIND: 270871fe804bSLee Schermerhorn /* 2709d69b2e63SKOSAKI Motohiro * Insist on a nodelist 271071fe804bSLee Schermerhorn */ 2711d69b2e63SKOSAKI Motohiro if (!nodelist) 2712d69b2e63SKOSAKI Motohiro goto out; 2713095f1fc4SLee Schermerhorn } 2714095f1fc4SLee Schermerhorn 271571fe804bSLee Schermerhorn mode_flags = 0; 2716095f1fc4SLee Schermerhorn if (flags) { 2717095f1fc4SLee Schermerhorn /* 2718095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2719095f1fc4SLee Schermerhorn * mode flags. 2720095f1fc4SLee Schermerhorn */ 2721095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 272271fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2723095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 272471fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2725095f1fc4SLee Schermerhorn else 2726926f2ae0SKOSAKI Motohiro goto out; 2727095f1fc4SLee Schermerhorn } 272871fe804bSLee Schermerhorn 272971fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 273071fe804bSLee Schermerhorn if (IS_ERR(new)) 2731926f2ae0SKOSAKI Motohiro goto out; 2732926f2ae0SKOSAKI Motohiro 2733f2a07f40SHugh Dickins /* 2734f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2735f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2736f2a07f40SHugh Dickins */ 2737f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2738f2a07f40SHugh Dickins new->v.nodes = nodes; 2739f2a07f40SHugh Dickins else if (nodelist) 2740f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2741f2a07f40SHugh Dickins else 2742f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2743f2a07f40SHugh Dickins 2744f2a07f40SHugh Dickins /* 2745f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2746f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2747f2a07f40SHugh Dickins */ 2748e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2749f2a07f40SHugh Dickins 2750926f2ae0SKOSAKI Motohiro err = 0; 275171fe804bSLee Schermerhorn 2752095f1fc4SLee Schermerhorn out: 2753095f1fc4SLee Schermerhorn /* Restore string for error message */ 2754095f1fc4SLee Schermerhorn if (nodelist) 2755095f1fc4SLee Schermerhorn *--nodelist = ':'; 2756095f1fc4SLee Schermerhorn if (flags) 2757095f1fc4SLee Schermerhorn *--flags = '='; 275871fe804bSLee Schermerhorn if (!err) 275971fe804bSLee Schermerhorn *mpol = new; 2760095f1fc4SLee Schermerhorn return err; 2761095f1fc4SLee Schermerhorn } 2762095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2763095f1fc4SLee Schermerhorn 276471fe804bSLee Schermerhorn /** 276571fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 276671fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 276771fe804bSLee Schermerhorn * @maxlen: length of @buffer 276871fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 276971fe804bSLee Schermerhorn * 2770948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2771948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2772948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 27731a75a6c8SChristoph Lameter */ 2774948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 27751a75a6c8SChristoph Lameter { 27761a75a6c8SChristoph Lameter char *p = buffer; 2777948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2778948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2779948927eeSDavid Rientjes unsigned short flags = 0; 27801a75a6c8SChristoph Lameter 27818790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2782bea904d5SLee Schermerhorn mode = pol->mode; 2783948927eeSDavid Rientjes flags = pol->flags; 2784948927eeSDavid Rientjes } 2785bea904d5SLee Schermerhorn 27861a75a6c8SChristoph Lameter switch (mode) { 27871a75a6c8SChristoph Lameter case MPOL_DEFAULT: 27881a75a6c8SChristoph Lameter break; 27891a75a6c8SChristoph Lameter case MPOL_PREFERRED: 2790fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 2791f2a07f40SHugh Dickins mode = MPOL_LOCAL; 279253f2556bSLee Schermerhorn else 2793fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 27941a75a6c8SChristoph Lameter break; 27951a75a6c8SChristoph Lameter case MPOL_BIND: 27961a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 27971a75a6c8SChristoph Lameter nodes = pol->v.nodes; 27981a75a6c8SChristoph Lameter break; 27991a75a6c8SChristoph Lameter default: 2800948927eeSDavid Rientjes WARN_ON_ONCE(1); 2801948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 2802948927eeSDavid Rientjes return; 28031a75a6c8SChristoph Lameter } 28041a75a6c8SChristoph Lameter 2805b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 28061a75a6c8SChristoph Lameter 2807fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2808948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 2809f5b087b5SDavid Rientjes 28102291990aSLee Schermerhorn /* 28112291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 28122291990aSLee Schermerhorn */ 2813f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 28142291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 28152291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 28162291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2817f5b087b5SDavid Rientjes } 2818f5b087b5SDavid Rientjes 28199e763e0fSTejun Heo if (!nodes_empty(nodes)) 28209e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 28219e763e0fSTejun Heo nodemask_pr_args(&nodes)); 28221a75a6c8SChristoph Lameter } 2823