11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 711da177e4SLinus Torvalds #include <linux/mm.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 766e84f315SIngo Molnar #include <linux/sched/mm.h> 776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 78f719ff9bSIngo Molnar #include <linux/sched/task.h> 791da177e4SLinus Torvalds #include <linux/nodemask.h> 801da177e4SLinus Torvalds #include <linux/cpuset.h> 811da177e4SLinus Torvalds #include <linux/slab.h> 821da177e4SLinus Torvalds #include <linux/string.h> 83b95f1b31SPaul Gortmaker #include <linux/export.h> 84b488893aSPavel Emelyanov #include <linux/nsproxy.h> 851da177e4SLinus Torvalds #include <linux/interrupt.h> 861da177e4SLinus Torvalds #include <linux/init.h> 871da177e4SLinus Torvalds #include <linux/compat.h> 8831367466SOtto Ebeling #include <linux/ptrace.h> 89dc9aa5b9SChristoph Lameter #include <linux/swap.h> 901a75a6c8SChristoph Lameter #include <linux/seq_file.h> 911a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 92b20a3503SChristoph Lameter #include <linux/migrate.h> 9362b61f61SHugh Dickins #include <linux/ksm.h> 9495a402c3SChristoph Lameter #include <linux/rmap.h> 9586c3a764SDavid Quigley #include <linux/security.h> 96dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 97095f1fc4SLee Schermerhorn #include <linux/ctype.h> 986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 100b1de0d13SMitchel Humpherys #include <linux/printk.h> 101c8633798SNaoya Horiguchi #include <linux/swapops.h> 102dc9aa5b9SChristoph Lameter 1031da177e4SLinus Torvalds #include <asm/tlbflush.h> 1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1051da177e4SLinus Torvalds 10662695a84SNick Piggin #include "internal.h" 10762695a84SNick Piggin 10838e35860SChristoph Lameter /* Internal flags */ 109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 111dc9aa5b9SChristoph Lameter 112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1141da177e4SLinus Torvalds 1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1161da177e4SLinus Torvalds policied. */ 1176267276fSChristoph Lameter enum zone_type policy_zone = 0; 1181da177e4SLinus Torvalds 119bea904d5SLee Schermerhorn /* 120bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 121bea904d5SLee Schermerhorn */ 122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1231da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 124bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 125fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1261da177e4SLinus Torvalds }; 1271da177e4SLinus Torvalds 1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1295606e387SMel Gorman 13074d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1315606e387SMel Gorman { 1325606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 133f15ca78eSOleg Nesterov int node; 1345606e387SMel Gorman 135f15ca78eSOleg Nesterov if (pol) 136f15ca78eSOleg Nesterov return pol; 1375606e387SMel Gorman 138f15ca78eSOleg Nesterov node = numa_node_id(); 1391da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1401da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 141f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 142f15ca78eSOleg Nesterov if (pol->mode) 143f15ca78eSOleg Nesterov return pol; 1441da6f0e1SJianguo Wu } 1455606e387SMel Gorman 146f15ca78eSOleg Nesterov return &default_policy; 1475606e387SMel Gorman } 1485606e387SMel Gorman 14937012946SDavid Rientjes static const struct mempolicy_operations { 15037012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 151213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 15237012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 15337012946SDavid Rientjes 154f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 155f5b087b5SDavid Rientjes { 1566d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1574c50bc01SDavid Rientjes } 1584c50bc01SDavid Rientjes 1594c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1604c50bc01SDavid Rientjes const nodemask_t *rel) 1614c50bc01SDavid Rientjes { 1624c50bc01SDavid Rientjes nodemask_t tmp; 1634c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1644c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 165f5b087b5SDavid Rientjes } 166f5b087b5SDavid Rientjes 16737012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 16837012946SDavid Rientjes { 16937012946SDavid Rientjes if (nodes_empty(*nodes)) 17037012946SDavid Rientjes return -EINVAL; 17137012946SDavid Rientjes pol->v.nodes = *nodes; 17237012946SDavid Rientjes return 0; 17337012946SDavid Rientjes } 17437012946SDavid Rientjes 17537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 17637012946SDavid Rientjes { 17737012946SDavid Rientjes if (!nodes) 178fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 17937012946SDavid Rientjes else if (nodes_empty(*nodes)) 18037012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 18137012946SDavid Rientjes else 18237012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 18337012946SDavid Rientjes return 0; 18437012946SDavid Rientjes } 18537012946SDavid Rientjes 18637012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 18737012946SDavid Rientjes { 188859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 18937012946SDavid Rientjes return -EINVAL; 19037012946SDavid Rientjes pol->v.nodes = *nodes; 19137012946SDavid Rientjes return 0; 19237012946SDavid Rientjes } 19337012946SDavid Rientjes 19458568d2aSMiao Xie /* 19558568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 19658568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 19758568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 19858568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 19958568d2aSMiao Xie * 20058568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 20158568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 20258568d2aSMiao Xie */ 2034bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2044bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 20558568d2aSMiao Xie { 20658568d2aSMiao Xie int ret; 20758568d2aSMiao Xie 20858568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 20958568d2aSMiao Xie if (pol == NULL) 21058568d2aSMiao Xie return 0; 21101f13bd6SLai Jiangshan /* Check N_MEMORY */ 2124bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 21301f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 21458568d2aSMiao Xie 21558568d2aSMiao Xie VM_BUG_ON(!nodes); 21658568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 21758568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 21858568d2aSMiao Xie else { 21958568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2204bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 22158568d2aSMiao Xie else 2224bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2234bfc4495SKAMEZAWA Hiroyuki 22458568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 22558568d2aSMiao Xie pol->w.user_nodemask = *nodes; 22658568d2aSMiao Xie else 22758568d2aSMiao Xie pol->w.cpuset_mems_allowed = 22858568d2aSMiao Xie cpuset_current_mems_allowed; 22958568d2aSMiao Xie } 23058568d2aSMiao Xie 2314bfc4495SKAMEZAWA Hiroyuki if (nodes) 2324bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2334bfc4495SKAMEZAWA Hiroyuki else 2344bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 23558568d2aSMiao Xie return ret; 23658568d2aSMiao Xie } 23758568d2aSMiao Xie 23858568d2aSMiao Xie /* 23958568d2aSMiao Xie * This function just creates a new policy, does some check and simple 24058568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 24158568d2aSMiao Xie */ 242028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 243028fec41SDavid Rientjes nodemask_t *nodes) 2441da177e4SLinus Torvalds { 2451da177e4SLinus Torvalds struct mempolicy *policy; 2461da177e4SLinus Torvalds 247028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 24800ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 249140d5a49SPaul Mundt 2503e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2513e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 25237012946SDavid Rientjes return ERR_PTR(-EINVAL); 253d3a71033SLee Schermerhorn return NULL; 25437012946SDavid Rientjes } 2553e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2563e1f0645SDavid Rientjes 2573e1f0645SDavid Rientjes /* 2583e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2593e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2603e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2613e1f0645SDavid Rientjes */ 2623e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2633e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2643e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2653e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2663e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2673e1f0645SDavid Rientjes } 268479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2698d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2708d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2718d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 272479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 273479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2743e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2753e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2761da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2771da177e4SLinus Torvalds if (!policy) 2781da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2791da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 28045c4745aSLee Schermerhorn policy->mode = mode; 28137012946SDavid Rientjes policy->flags = flags; 2823e1f0645SDavid Rientjes 28337012946SDavid Rientjes return policy; 28437012946SDavid Rientjes } 28537012946SDavid Rientjes 28652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 28752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 28852cd3b07SLee Schermerhorn { 28952cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 29052cd3b07SLee Schermerhorn return; 29152cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 29252cd3b07SLee Schermerhorn } 29352cd3b07SLee Schermerhorn 294213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 29537012946SDavid Rientjes { 29637012946SDavid Rientjes } 29737012946SDavid Rientjes 298213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 2991d0d2680SDavid Rientjes { 3001d0d2680SDavid Rientjes nodemask_t tmp; 3011d0d2680SDavid Rientjes 30237012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 30337012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 30437012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 30537012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3061d0d2680SDavid Rientjes else { 307213980c0SVlastimil Babka nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, 308213980c0SVlastimil Babka *nodes); 309213980c0SVlastimil Babka pol->w.cpuset_mems_allowed = tmp; 3101d0d2680SDavid Rientjes } 31137012946SDavid Rientjes 312708c1bbcSMiao Xie if (nodes_empty(tmp)) 313708c1bbcSMiao Xie tmp = *nodes; 314708c1bbcSMiao Xie 3151d0d2680SDavid Rientjes pol->v.nodes = tmp; 31637012946SDavid Rientjes } 31737012946SDavid Rientjes 31837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 319213980c0SVlastimil Babka const nodemask_t *nodes) 32037012946SDavid Rientjes { 32137012946SDavid Rientjes nodemask_t tmp; 32237012946SDavid Rientjes 32337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3241d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3251d0d2680SDavid Rientjes 326fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3271d0d2680SDavid Rientjes pol->v.preferred_node = node; 328fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 329fc36b8d3SLee Schermerhorn } else 330fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 33137012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 33237012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3331d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 334fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3351d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 33637012946SDavid Rientjes pol->w.cpuset_mems_allowed, 33737012946SDavid Rientjes *nodes); 33837012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3391d0d2680SDavid Rientjes } 3401d0d2680SDavid Rientjes } 34137012946SDavid Rientjes 342708c1bbcSMiao Xie /* 343708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 344708c1bbcSMiao Xie * 345213980c0SVlastimil Babka * Per-vma policies are protected by mmap_sem. Allocations using per-task 346213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 347213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 348708c1bbcSMiao Xie */ 349213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35037012946SDavid Rientjes { 35137012946SDavid Rientjes if (!pol) 35237012946SDavid Rientjes return; 353213980c0SVlastimil Babka if (!mpol_store_user_nodemask(pol) && 35437012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35537012946SDavid Rientjes return; 356708c1bbcSMiao Xie 357213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3581d0d2680SDavid Rientjes } 3591d0d2680SDavid Rientjes 3601d0d2680SDavid Rientjes /* 3611d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3621d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36358568d2aSMiao Xie * 36458568d2aSMiao Xie * Called with task's alloc_lock held. 3651d0d2680SDavid Rientjes */ 3661d0d2680SDavid Rientjes 367213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3681d0d2680SDavid Rientjes { 369213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3701d0d2680SDavid Rientjes } 3711d0d2680SDavid Rientjes 3721d0d2680SDavid Rientjes /* 3731d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3741d0d2680SDavid Rientjes * 3751d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 3761d0d2680SDavid Rientjes */ 3771d0d2680SDavid Rientjes 3781d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3791d0d2680SDavid Rientjes { 3801d0d2680SDavid Rientjes struct vm_area_struct *vma; 3811d0d2680SDavid Rientjes 3821d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 3831d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 384213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 3851d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 3861d0d2680SDavid Rientjes } 3871d0d2680SDavid Rientjes 38837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 38937012946SDavid Rientjes [MPOL_DEFAULT] = { 39037012946SDavid Rientjes .rebind = mpol_rebind_default, 39137012946SDavid Rientjes }, 39237012946SDavid Rientjes [MPOL_INTERLEAVE] = { 39337012946SDavid Rientjes .create = mpol_new_interleave, 39437012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39537012946SDavid Rientjes }, 39637012946SDavid Rientjes [MPOL_PREFERRED] = { 39737012946SDavid Rientjes .create = mpol_new_preferred, 39837012946SDavid Rientjes .rebind = mpol_rebind_preferred, 39937012946SDavid Rientjes }, 40037012946SDavid Rientjes [MPOL_BIND] = { 40137012946SDavid Rientjes .create = mpol_new_bind, 40237012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40337012946SDavid Rientjes }, 40437012946SDavid Rientjes }; 40537012946SDavid Rientjes 406fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 407fc301289SChristoph Lameter unsigned long flags); 4081a75a6c8SChristoph Lameter 4096f4576e3SNaoya Horiguchi struct queue_pages { 4106f4576e3SNaoya Horiguchi struct list_head *pagelist; 4116f4576e3SNaoya Horiguchi unsigned long flags; 4126f4576e3SNaoya Horiguchi nodemask_t *nmask; 4136f4576e3SNaoya Horiguchi struct vm_area_struct *prev; 4146f4576e3SNaoya Horiguchi }; 4156f4576e3SNaoya Horiguchi 41698094945SNaoya Horiguchi /* 41788aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 41888aaa2a1SNaoya Horiguchi * 41988aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 42088aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 42188aaa2a1SNaoya Horiguchi */ 42288aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 42388aaa2a1SNaoya Horiguchi struct queue_pages *qp) 42488aaa2a1SNaoya Horiguchi { 42588aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 42688aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 42788aaa2a1SNaoya Horiguchi 42888aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 42988aaa2a1SNaoya Horiguchi } 43088aaa2a1SNaoya Horiguchi 431c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 432c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 433c8633798SNaoya Horiguchi { 434c8633798SNaoya Horiguchi int ret = 0; 435c8633798SNaoya Horiguchi struct page *page; 436c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 437c8633798SNaoya Horiguchi unsigned long flags; 438c8633798SNaoya Horiguchi 439c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 440c8633798SNaoya Horiguchi ret = 1; 441c8633798SNaoya Horiguchi goto unlock; 442c8633798SNaoya Horiguchi } 443c8633798SNaoya Horiguchi page = pmd_page(*pmd); 444c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 445c8633798SNaoya Horiguchi spin_unlock(ptl); 446c8633798SNaoya Horiguchi __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 447c8633798SNaoya Horiguchi goto out; 448c8633798SNaoya Horiguchi } 449c8633798SNaoya Horiguchi if (!thp_migration_supported()) { 450c8633798SNaoya Horiguchi get_page(page); 451c8633798SNaoya Horiguchi spin_unlock(ptl); 452c8633798SNaoya Horiguchi lock_page(page); 453c8633798SNaoya Horiguchi ret = split_huge_page(page); 454c8633798SNaoya Horiguchi unlock_page(page); 455c8633798SNaoya Horiguchi put_page(page); 456c8633798SNaoya Horiguchi goto out; 457c8633798SNaoya Horiguchi } 458c8633798SNaoya Horiguchi if (!queue_pages_required(page, qp)) { 459c8633798SNaoya Horiguchi ret = 1; 460c8633798SNaoya Horiguchi goto unlock; 461c8633798SNaoya Horiguchi } 462c8633798SNaoya Horiguchi 463c8633798SNaoya Horiguchi ret = 1; 464c8633798SNaoya Horiguchi flags = qp->flags; 465c8633798SNaoya Horiguchi /* go to thp migration */ 466c8633798SNaoya Horiguchi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 467c8633798SNaoya Horiguchi migrate_page_add(page, qp->pagelist, flags); 468c8633798SNaoya Horiguchi unlock: 469c8633798SNaoya Horiguchi spin_unlock(ptl); 470c8633798SNaoya Horiguchi out: 471c8633798SNaoya Horiguchi return ret; 472c8633798SNaoya Horiguchi } 473c8633798SNaoya Horiguchi 47488aaa2a1SNaoya Horiguchi /* 47598094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 47698094945SNaoya Horiguchi * and move them to the pagelist if they do. 47798094945SNaoya Horiguchi */ 4786f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 4796f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 4801da177e4SLinus Torvalds { 4816f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 4826f4576e3SNaoya Horiguchi struct page *page; 4836f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 4846f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 485c8633798SNaoya Horiguchi int ret; 48691612e0dSHugh Dickins pte_t *pte; 487705e87c0SHugh Dickins spinlock_t *ptl; 488941150a3SHugh Dickins 489c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 490c8633798SNaoya Horiguchi if (ptl) { 491c8633798SNaoya Horiguchi ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 492248db92dSKirill A. Shutemov if (ret) 4936f4576e3SNaoya Horiguchi return 0; 494248db92dSKirill A. Shutemov } 49591612e0dSHugh Dickins 496337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 497337d9abfSNaoya Horiguchi return 0; 498248db92dSKirill A. Shutemov retry: 4996f4576e3SNaoya Horiguchi pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5006f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 50191612e0dSHugh Dickins if (!pte_present(*pte)) 50291612e0dSHugh Dickins continue; 5036aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5046aab341eSLinus Torvalds if (!page) 50591612e0dSHugh Dickins continue; 506053837fcSNick Piggin /* 50762b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 50862b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 509053837fcSNick Piggin */ 510b79bc0a0SHugh Dickins if (PageReserved(page)) 511f4598c8bSChristoph Lameter continue; 51288aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 51338e35860SChristoph Lameter continue; 514c8633798SNaoya Horiguchi if (PageTransCompound(page) && !thp_migration_supported()) { 515248db92dSKirill A. Shutemov get_page(page); 516248db92dSKirill A. Shutemov pte_unmap_unlock(pte, ptl); 517248db92dSKirill A. Shutemov lock_page(page); 518248db92dSKirill A. Shutemov ret = split_huge_page(page); 519248db92dSKirill A. Shutemov unlock_page(page); 520248db92dSKirill A. Shutemov put_page(page); 521248db92dSKirill A. Shutemov /* Failed to split -- skip. */ 522248db92dSKirill A. Shutemov if (ret) { 523248db92dSKirill A. Shutemov pte = pte_offset_map_lock(walk->mm, pmd, 524248db92dSKirill A. Shutemov addr, &ptl); 525248db92dSKirill A. Shutemov continue; 526248db92dSKirill A. Shutemov } 527248db92dSKirill A. Shutemov goto retry; 528248db92dSKirill A. Shutemov } 52938e35860SChristoph Lameter 5306f4576e3SNaoya Horiguchi migrate_page_add(page, qp->pagelist, flags); 5316f4576e3SNaoya Horiguchi } 5326f4576e3SNaoya Horiguchi pte_unmap_unlock(pte - 1, ptl); 5336f4576e3SNaoya Horiguchi cond_resched(); 5346f4576e3SNaoya Horiguchi return 0; 53591612e0dSHugh Dickins } 53691612e0dSHugh Dickins 5376f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5386f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5396f4576e3SNaoya Horiguchi struct mm_walk *walk) 540e2d8cf40SNaoya Horiguchi { 541e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5426f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5436f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 544e2d8cf40SNaoya Horiguchi struct page *page; 545cb900f41SKirill A. Shutemov spinlock_t *ptl; 546d4c54919SNaoya Horiguchi pte_t entry; 547e2d8cf40SNaoya Horiguchi 5486f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5496f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 550d4c54919SNaoya Horiguchi if (!pte_present(entry)) 551d4c54919SNaoya Horiguchi goto unlock; 552d4c54919SNaoya Horiguchi page = pte_page(entry); 55388aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 554e2d8cf40SNaoya Horiguchi goto unlock; 555e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 556e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 557e2d8cf40SNaoya Horiguchi (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 5586f4576e3SNaoya Horiguchi isolate_huge_page(page, qp->pagelist); 559e2d8cf40SNaoya Horiguchi unlock: 560cb900f41SKirill A. Shutemov spin_unlock(ptl); 561e2d8cf40SNaoya Horiguchi #else 562e2d8cf40SNaoya Horiguchi BUG(); 563e2d8cf40SNaoya Horiguchi #endif 56491612e0dSHugh Dickins return 0; 5651da177e4SLinus Torvalds } 5661da177e4SLinus Torvalds 5675877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 568b24f53a0SLee Schermerhorn /* 5694b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 5704b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 5714b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 5724b10e7d5SMel Gorman * 5734b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 5744b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 5754b10e7d5SMel Gorman * changes to the core. 576b24f53a0SLee Schermerhorn */ 5774b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 5784b10e7d5SMel Gorman unsigned long addr, unsigned long end) 579b24f53a0SLee Schermerhorn { 5804b10e7d5SMel Gorman int nr_updated; 581b24f53a0SLee Schermerhorn 5824d942466SMel Gorman nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); 58303c5a6e1SMel Gorman if (nr_updated) 58403c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 585b24f53a0SLee Schermerhorn 5864b10e7d5SMel Gorman return nr_updated; 587b24f53a0SLee Schermerhorn } 588b24f53a0SLee Schermerhorn #else 589b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 590b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 591b24f53a0SLee Schermerhorn { 592b24f53a0SLee Schermerhorn return 0; 593b24f53a0SLee Schermerhorn } 5945877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 595b24f53a0SLee Schermerhorn 5966f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 5976f4576e3SNaoya Horiguchi struct mm_walk *walk) 5981da177e4SLinus Torvalds { 5996f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6006f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6015b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6026f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 603dc9aa5b9SChristoph Lameter 60477bf45e7SKirill A. Shutemov if (!vma_migratable(vma)) 60548684a65SNaoya Horiguchi return 1; 60648684a65SNaoya Horiguchi 6075b952b3cSAndi Kleen if (endvma > end) 6085b952b3cSAndi Kleen endvma = end; 6095b952b3cSAndi Kleen if (vma->vm_start > start) 6105b952b3cSAndi Kleen start = vma->vm_start; 611b24f53a0SLee Schermerhorn 612b24f53a0SLee Schermerhorn if (!(flags & MPOL_MF_DISCONTIG_OK)) { 613b24f53a0SLee Schermerhorn if (!vma->vm_next && vma->vm_end < end) 614d05f0cdcSHugh Dickins return -EFAULT; 6156f4576e3SNaoya Horiguchi if (qp->prev && qp->prev->vm_end < vma->vm_start) 616d05f0cdcSHugh Dickins return -EFAULT; 617b24f53a0SLee Schermerhorn } 618b24f53a0SLee Schermerhorn 6196f4576e3SNaoya Horiguchi qp->prev = vma; 6206f4576e3SNaoya Horiguchi 621b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6222c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6234355c018SLiang Chen if (!is_vm_hugetlb_page(vma) && 6244355c018SLiang Chen (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && 6254355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 626b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6276f4576e3SNaoya Horiguchi return 1; 628b24f53a0SLee Schermerhorn } 629b24f53a0SLee Schermerhorn 6306f4576e3SNaoya Horiguchi /* queue pages from current vma */ 63177bf45e7SKirill A. Shutemov if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 6326f4576e3SNaoya Horiguchi return 0; 6336f4576e3SNaoya Horiguchi return 1; 6346f4576e3SNaoya Horiguchi } 635b24f53a0SLee Schermerhorn 6366f4576e3SNaoya Horiguchi /* 6376f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 6386f4576e3SNaoya Horiguchi * 6396f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 6406f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 6416f4576e3SNaoya Horiguchi * passed via @private.) 6426f4576e3SNaoya Horiguchi */ 6436f4576e3SNaoya Horiguchi static int 6446f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 6456f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 6466f4576e3SNaoya Horiguchi struct list_head *pagelist) 6476f4576e3SNaoya Horiguchi { 6486f4576e3SNaoya Horiguchi struct queue_pages qp = { 6496f4576e3SNaoya Horiguchi .pagelist = pagelist, 6506f4576e3SNaoya Horiguchi .flags = flags, 6516f4576e3SNaoya Horiguchi .nmask = nodes, 6526f4576e3SNaoya Horiguchi .prev = NULL, 6536f4576e3SNaoya Horiguchi }; 6546f4576e3SNaoya Horiguchi struct mm_walk queue_pages_walk = { 6556f4576e3SNaoya Horiguchi .hugetlb_entry = queue_pages_hugetlb, 6566f4576e3SNaoya Horiguchi .pmd_entry = queue_pages_pte_range, 6576f4576e3SNaoya Horiguchi .test_walk = queue_pages_test_walk, 6586f4576e3SNaoya Horiguchi .mm = mm, 6596f4576e3SNaoya Horiguchi .private = &qp, 6606f4576e3SNaoya Horiguchi }; 6616f4576e3SNaoya Horiguchi 6626f4576e3SNaoya Horiguchi return walk_page_range(start, end, &queue_pages_walk); 6631da177e4SLinus Torvalds } 6641da177e4SLinus Torvalds 665869833f2SKOSAKI Motohiro /* 666869833f2SKOSAKI Motohiro * Apply policy to a single VMA 667869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 668869833f2SKOSAKI Motohiro */ 669869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 670869833f2SKOSAKI Motohiro struct mempolicy *pol) 6718d34694cSKOSAKI Motohiro { 672869833f2SKOSAKI Motohiro int err; 673869833f2SKOSAKI Motohiro struct mempolicy *old; 674869833f2SKOSAKI Motohiro struct mempolicy *new; 6758d34694cSKOSAKI Motohiro 6768d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 6778d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 6788d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 6798d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 6808d34694cSKOSAKI Motohiro 681869833f2SKOSAKI Motohiro new = mpol_dup(pol); 682869833f2SKOSAKI Motohiro if (IS_ERR(new)) 683869833f2SKOSAKI Motohiro return PTR_ERR(new); 684869833f2SKOSAKI Motohiro 685869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 6868d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 687869833f2SKOSAKI Motohiro if (err) 688869833f2SKOSAKI Motohiro goto err_out; 6898d34694cSKOSAKI Motohiro } 690869833f2SKOSAKI Motohiro 691869833f2SKOSAKI Motohiro old = vma->vm_policy; 692869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 693869833f2SKOSAKI Motohiro mpol_put(old); 694869833f2SKOSAKI Motohiro 695869833f2SKOSAKI Motohiro return 0; 696869833f2SKOSAKI Motohiro err_out: 697869833f2SKOSAKI Motohiro mpol_put(new); 6988d34694cSKOSAKI Motohiro return err; 6998d34694cSKOSAKI Motohiro } 7008d34694cSKOSAKI Motohiro 7011da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7029d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7039d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7041da177e4SLinus Torvalds { 7051da177e4SLinus Torvalds struct vm_area_struct *next; 7069d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7079d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7089d8cebd4SKOSAKI Motohiro int err = 0; 709e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7109d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7119d8cebd4SKOSAKI Motohiro unsigned long vmend; 7121da177e4SLinus Torvalds 713097d5910SLinus Torvalds vma = find_vma(mm, start); 7149d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 7159d8cebd4SKOSAKI Motohiro return -EFAULT; 7169d8cebd4SKOSAKI Motohiro 717097d5910SLinus Torvalds prev = vma->vm_prev; 718e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 719e26a5114SKOSAKI Motohiro prev = vma; 720e26a5114SKOSAKI Motohiro 7219d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 7221da177e4SLinus Torvalds next = vma->vm_next; 7239d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 7249d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 7259d8cebd4SKOSAKI Motohiro 726e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 727e26a5114SKOSAKI Motohiro continue; 728e26a5114SKOSAKI Motohiro 729e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 730e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 7319d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 732e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 73319a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 7349d8cebd4SKOSAKI Motohiro if (prev) { 7359d8cebd4SKOSAKI Motohiro vma = prev; 7369d8cebd4SKOSAKI Motohiro next = vma->vm_next; 7373964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 7389d8cebd4SKOSAKI Motohiro continue; 7393964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 7403964acd0SOleg Nesterov goto replace; 7411da177e4SLinus Torvalds } 7429d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 7439d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 7449d8cebd4SKOSAKI Motohiro if (err) 7459d8cebd4SKOSAKI Motohiro goto out; 7469d8cebd4SKOSAKI Motohiro } 7479d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 7489d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 7499d8cebd4SKOSAKI Motohiro if (err) 7509d8cebd4SKOSAKI Motohiro goto out; 7519d8cebd4SKOSAKI Motohiro } 7523964acd0SOleg Nesterov replace: 753869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 7549d8cebd4SKOSAKI Motohiro if (err) 7559d8cebd4SKOSAKI Motohiro goto out; 7569d8cebd4SKOSAKI Motohiro } 7579d8cebd4SKOSAKI Motohiro 7589d8cebd4SKOSAKI Motohiro out: 7591da177e4SLinus Torvalds return err; 7601da177e4SLinus Torvalds } 7611da177e4SLinus Torvalds 7621da177e4SLinus Torvalds /* Set the process memory policy */ 763028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 764028fec41SDavid Rientjes nodemask_t *nodes) 7651da177e4SLinus Torvalds { 76658568d2aSMiao Xie struct mempolicy *new, *old; 7674bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 76858568d2aSMiao Xie int ret; 7691da177e4SLinus Torvalds 7704bfc4495SKAMEZAWA Hiroyuki if (!scratch) 7714bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 772f4e53d91SLee Schermerhorn 7734bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 7744bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 7754bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 7764bfc4495SKAMEZAWA Hiroyuki goto out; 7774bfc4495SKAMEZAWA Hiroyuki } 7782c7c3a7dSOleg Nesterov 77958568d2aSMiao Xie task_lock(current); 7804bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 78158568d2aSMiao Xie if (ret) { 78258568d2aSMiao Xie task_unlock(current); 78358568d2aSMiao Xie mpol_put(new); 7844bfc4495SKAMEZAWA Hiroyuki goto out; 78558568d2aSMiao Xie } 78658568d2aSMiao Xie old = current->mempolicy; 7871da177e4SLinus Torvalds current->mempolicy = new; 78845816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 78945816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 79058568d2aSMiao Xie task_unlock(current); 79158568d2aSMiao Xie mpol_put(old); 7924bfc4495SKAMEZAWA Hiroyuki ret = 0; 7934bfc4495SKAMEZAWA Hiroyuki out: 7944bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 7954bfc4495SKAMEZAWA Hiroyuki return ret; 7961da177e4SLinus Torvalds } 7971da177e4SLinus Torvalds 798bea904d5SLee Schermerhorn /* 799bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 80058568d2aSMiao Xie * 80158568d2aSMiao Xie * Called with task's alloc_lock held 802bea904d5SLee Schermerhorn */ 803bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8041da177e4SLinus Torvalds { 805dfcd3c0dSAndi Kleen nodes_clear(*nodes); 806bea904d5SLee Schermerhorn if (p == &default_policy) 807bea904d5SLee Schermerhorn return; 808bea904d5SLee Schermerhorn 80945c4745aSLee Schermerhorn switch (p->mode) { 81019770b32SMel Gorman case MPOL_BIND: 81119770b32SMel Gorman /* Fall through */ 8121da177e4SLinus Torvalds case MPOL_INTERLEAVE: 813dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 8141da177e4SLinus Torvalds break; 8151da177e4SLinus Torvalds case MPOL_PREFERRED: 816fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 817dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 81853f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 8191da177e4SLinus Torvalds break; 8201da177e4SLinus Torvalds default: 8211da177e4SLinus Torvalds BUG(); 8221da177e4SLinus Torvalds } 8231da177e4SLinus Torvalds } 8241da177e4SLinus Torvalds 825d4edcf0dSDave Hansen static int lookup_node(unsigned long addr) 8261da177e4SLinus Torvalds { 8271da177e4SLinus Torvalds struct page *p; 8281da177e4SLinus Torvalds int err; 8291da177e4SLinus Torvalds 830768ae309SLorenzo Stoakes err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL); 8311da177e4SLinus Torvalds if (err >= 0) { 8321da177e4SLinus Torvalds err = page_to_nid(p); 8331da177e4SLinus Torvalds put_page(p); 8341da177e4SLinus Torvalds } 8351da177e4SLinus Torvalds return err; 8361da177e4SLinus Torvalds } 8371da177e4SLinus Torvalds 8381da177e4SLinus Torvalds /* Retrieve NUMA policy */ 839dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8401da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8411da177e4SLinus Torvalds { 8428bccd85fSChristoph Lameter int err; 8431da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8441da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8451da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 8461da177e4SLinus Torvalds 847754af6f5SLee Schermerhorn if (flags & 848754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8491da177e4SLinus Torvalds return -EINVAL; 850754af6f5SLee Schermerhorn 851754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 852754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 853754af6f5SLee Schermerhorn return -EINVAL; 854754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 85558568d2aSMiao Xie task_lock(current); 856754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 85758568d2aSMiao Xie task_unlock(current); 858754af6f5SLee Schermerhorn return 0; 859754af6f5SLee Schermerhorn } 860754af6f5SLee Schermerhorn 8611da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 862bea904d5SLee Schermerhorn /* 863bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 864bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 865bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 866bea904d5SLee Schermerhorn */ 8671da177e4SLinus Torvalds down_read(&mm->mmap_sem); 8681da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 8691da177e4SLinus Torvalds if (!vma) { 8701da177e4SLinus Torvalds up_read(&mm->mmap_sem); 8711da177e4SLinus Torvalds return -EFAULT; 8721da177e4SLinus Torvalds } 8731da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 8741da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 8751da177e4SLinus Torvalds else 8761da177e4SLinus Torvalds pol = vma->vm_policy; 8771da177e4SLinus Torvalds } else if (addr) 8781da177e4SLinus Torvalds return -EINVAL; 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds if (!pol) 881bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 8821da177e4SLinus Torvalds 8831da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 8841da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 885d4edcf0dSDave Hansen err = lookup_node(addr); 8861da177e4SLinus Torvalds if (err < 0) 8871da177e4SLinus Torvalds goto out; 8888bccd85fSChristoph Lameter *policy = err; 8891da177e4SLinus Torvalds } else if (pol == current->mempolicy && 89045c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 89145816682SVlastimil Babka *policy = next_node_in(current->il_prev, pol->v.nodes); 8921da177e4SLinus Torvalds } else { 8931da177e4SLinus Torvalds err = -EINVAL; 8941da177e4SLinus Torvalds goto out; 8951da177e4SLinus Torvalds } 896bea904d5SLee Schermerhorn } else { 897bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 898bea904d5SLee Schermerhorn pol->mode; 899d79df630SDavid Rientjes /* 900d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 901d79df630SDavid Rientjes * the policy to userspace. 902d79df630SDavid Rientjes */ 903d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 904bea904d5SLee Schermerhorn } 9051da177e4SLinus Torvalds 9061da177e4SLinus Torvalds err = 0; 90758568d2aSMiao Xie if (nmask) { 908c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 909c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 910c6b6ef8bSLee Schermerhorn } else { 91158568d2aSMiao Xie task_lock(current); 912bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 91358568d2aSMiao Xie task_unlock(current); 91458568d2aSMiao Xie } 915c6b6ef8bSLee Schermerhorn } 9161da177e4SLinus Torvalds 9171da177e4SLinus Torvalds out: 91852cd3b07SLee Schermerhorn mpol_cond_put(pol); 9191da177e4SLinus Torvalds if (vma) 9201da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 9211da177e4SLinus Torvalds return err; 9221da177e4SLinus Torvalds } 9231da177e4SLinus Torvalds 924b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 9258bccd85fSChristoph Lameter /* 926c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 9276ce3c4c0SChristoph Lameter */ 928fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 929fc301289SChristoph Lameter unsigned long flags) 9306ce3c4c0SChristoph Lameter { 931c8633798SNaoya Horiguchi struct page *head = compound_head(page); 9326ce3c4c0SChristoph Lameter /* 933fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9346ce3c4c0SChristoph Lameter */ 935c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 936c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 937c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 938c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 939c8633798SNaoya Horiguchi NR_ISOLATED_ANON + page_is_file_cache(head), 940c8633798SNaoya Horiguchi hpage_nr_pages(head)); 94162695a84SNick Piggin } 94262695a84SNick Piggin } 9436ce3c4c0SChristoph Lameter } 9446ce3c4c0SChristoph Lameter 945742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 94695a402c3SChristoph Lameter { 947e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 948e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 949e2d8cf40SNaoya Horiguchi node); 950c8633798SNaoya Horiguchi else if (thp_migration_supported() && PageTransHuge(page)) { 951c8633798SNaoya Horiguchi struct page *thp; 952c8633798SNaoya Horiguchi 953c8633798SNaoya Horiguchi thp = alloc_pages_node(node, 954c8633798SNaoya Horiguchi (GFP_TRANSHUGE | __GFP_THISNODE), 955c8633798SNaoya Horiguchi HPAGE_PMD_ORDER); 956c8633798SNaoya Horiguchi if (!thp) 957c8633798SNaoya Horiguchi return NULL; 958c8633798SNaoya Horiguchi prep_transhuge_page(thp); 959c8633798SNaoya Horiguchi return thp; 960c8633798SNaoya Horiguchi } else 96196db800fSVlastimil Babka return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | 962b360edb4SDavid Rientjes __GFP_THISNODE, 0); 96395a402c3SChristoph Lameter } 96495a402c3SChristoph Lameter 9656ce3c4c0SChristoph Lameter /* 9667e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 9677e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 9687e2ab150SChristoph Lameter */ 969dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 970dbcb0f19SAdrian Bunk int flags) 9717e2ab150SChristoph Lameter { 9727e2ab150SChristoph Lameter nodemask_t nmask; 9737e2ab150SChristoph Lameter LIST_HEAD(pagelist); 9747e2ab150SChristoph Lameter int err = 0; 9757e2ab150SChristoph Lameter 9767e2ab150SChristoph Lameter nodes_clear(nmask); 9777e2ab150SChristoph Lameter node_set(source, nmask); 9787e2ab150SChristoph Lameter 97908270807SMinchan Kim /* 98008270807SMinchan Kim * This does not "check" the range but isolates all pages that 98108270807SMinchan Kim * need migration. Between passing in the full user address 98208270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 98308270807SMinchan Kim */ 98408270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 98598094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 9867e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 9877e2ab150SChristoph Lameter 988cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 98968711a74SDavid Rientjes err = migrate_pages(&pagelist, new_node_page, NULL, dest, 9909c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 991cf608ac1SMinchan Kim if (err) 992e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 993cf608ac1SMinchan Kim } 99495a402c3SChristoph Lameter 9957e2ab150SChristoph Lameter return err; 9967e2ab150SChristoph Lameter } 9977e2ab150SChristoph Lameter 9987e2ab150SChristoph Lameter /* 9997e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10007e2ab150SChristoph Lameter * layout as much as possible. 100139743889SChristoph Lameter * 100239743889SChristoph Lameter * Returns the number of page that could not be moved. 100339743889SChristoph Lameter */ 10040ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10050ce72d4fSAndrew Morton const nodemask_t *to, int flags) 100639743889SChristoph Lameter { 10077e2ab150SChristoph Lameter int busy = 0; 10080aedadf9SChristoph Lameter int err; 10097e2ab150SChristoph Lameter nodemask_t tmp; 101039743889SChristoph Lameter 10110aedadf9SChristoph Lameter err = migrate_prep(); 10120aedadf9SChristoph Lameter if (err) 10130aedadf9SChristoph Lameter return err; 10140aedadf9SChristoph Lameter 101539743889SChristoph Lameter down_read(&mm->mmap_sem); 1016d4984711SChristoph Lameter 10177e2ab150SChristoph Lameter /* 10187e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 10197e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 10207e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 10217e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 10227e2ab150SChristoph Lameter * 10237e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 10247e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 10257e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 10267e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 10277e2ab150SChristoph Lameter * 10287e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 10297e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 10307e2ab150SChristoph Lameter * (nothing left to migrate). 10317e2ab150SChristoph Lameter * 10327e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 10337e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 10347e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 10357e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 10367e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 10377e2ab150SChristoph Lameter * 10387e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 10397e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 10407e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 10417e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1042ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 10437e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 10447e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 10457e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 10467e2ab150SChristoph Lameter */ 10477e2ab150SChristoph Lameter 10480ce72d4fSAndrew Morton tmp = *from; 10497e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 10507e2ab150SChristoph Lameter int s,d; 1051b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 10527e2ab150SChristoph Lameter int dest = 0; 10537e2ab150SChristoph Lameter 10547e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 10554a5b18ccSLarry Woodman 10564a5b18ccSLarry Woodman /* 10574a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 10584a5b18ccSLarry Woodman * node relationship of the pages established between 10594a5b18ccSLarry Woodman * threads and memory areas. 10604a5b18ccSLarry Woodman * 10614a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 10624a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 10634a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 10644a5b18ccSLarry Woodman * copying memory from a node that is in the destination 10654a5b18ccSLarry Woodman * mask. 10664a5b18ccSLarry Woodman * 10674a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 10684a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 10694a5b18ccSLarry Woodman */ 10704a5b18ccSLarry Woodman 10710ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 10720ce72d4fSAndrew Morton (node_isset(s, *to))) 10734a5b18ccSLarry Woodman continue; 10744a5b18ccSLarry Woodman 10750ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 10767e2ab150SChristoph Lameter if (s == d) 10777e2ab150SChristoph Lameter continue; 10787e2ab150SChristoph Lameter 10797e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 10807e2ab150SChristoph Lameter dest = d; 10817e2ab150SChristoph Lameter 10827e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 10837e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 10847e2ab150SChristoph Lameter break; 10857e2ab150SChristoph Lameter } 1086b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 10877e2ab150SChristoph Lameter break; 10887e2ab150SChristoph Lameter 10897e2ab150SChristoph Lameter node_clear(source, tmp); 10907e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 10917e2ab150SChristoph Lameter if (err > 0) 10927e2ab150SChristoph Lameter busy += err; 10937e2ab150SChristoph Lameter if (err < 0) 10947e2ab150SChristoph Lameter break; 109539743889SChristoph Lameter } 109639743889SChristoph Lameter up_read(&mm->mmap_sem); 10977e2ab150SChristoph Lameter if (err < 0) 10987e2ab150SChristoph Lameter return err; 10997e2ab150SChristoph Lameter return busy; 1100b20a3503SChristoph Lameter 110139743889SChristoph Lameter } 110239743889SChristoph Lameter 11033ad33b24SLee Schermerhorn /* 11043ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1105d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 11063ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11073ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11083ad33b24SLee Schermerhorn * is in virtual address order. 11093ad33b24SLee Schermerhorn */ 1110d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 111195a402c3SChristoph Lameter { 1112d05f0cdcSHugh Dickins struct vm_area_struct *vma; 11133ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 111495a402c3SChristoph Lameter 1115d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 11163ad33b24SLee Schermerhorn while (vma) { 11173ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 11183ad33b24SLee Schermerhorn if (address != -EFAULT) 11193ad33b24SLee Schermerhorn break; 11203ad33b24SLee Schermerhorn vma = vma->vm_next; 11213ad33b24SLee Schermerhorn } 11223ad33b24SLee Schermerhorn 112311c731e8SWanpeng Li if (PageHuge(page)) { 1124*389c8178SMichal Hocko return alloc_huge_page_vma(page_hstate(compound_head(page)), 1125*389c8178SMichal Hocko vma, address); 1126c8633798SNaoya Horiguchi } else if (thp_migration_supported() && PageTransHuge(page)) { 1127c8633798SNaoya Horiguchi struct page *thp; 1128c8633798SNaoya Horiguchi 1129c8633798SNaoya Horiguchi thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 1130c8633798SNaoya Horiguchi HPAGE_PMD_ORDER); 1131c8633798SNaoya Horiguchi if (!thp) 1132c8633798SNaoya Horiguchi return NULL; 1133c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1134c8633798SNaoya Horiguchi return thp; 113511c731e8SWanpeng Li } 113611c731e8SWanpeng Li /* 113711c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 113811c731e8SWanpeng Li */ 11390f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 11400f556856SMichal Hocko vma, address); 114195a402c3SChristoph Lameter } 1142b20a3503SChristoph Lameter #else 1143b20a3503SChristoph Lameter 1144b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1145b20a3503SChristoph Lameter unsigned long flags) 1146b20a3503SChristoph Lameter { 1147b20a3503SChristoph Lameter } 1148b20a3503SChristoph Lameter 11490ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11500ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1151b20a3503SChristoph Lameter { 1152b20a3503SChristoph Lameter return -ENOSYS; 1153b20a3503SChristoph Lameter } 115495a402c3SChristoph Lameter 1155d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 115695a402c3SChristoph Lameter { 115795a402c3SChristoph Lameter return NULL; 115895a402c3SChristoph Lameter } 1159b20a3503SChristoph Lameter #endif 1160b20a3503SChristoph Lameter 1161dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1162028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1163028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 11646ce3c4c0SChristoph Lameter { 11656ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 11666ce3c4c0SChristoph Lameter struct mempolicy *new; 11676ce3c4c0SChristoph Lameter unsigned long end; 11686ce3c4c0SChristoph Lameter int err; 11696ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 11706ce3c4c0SChristoph Lameter 1171b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 11726ce3c4c0SChristoph Lameter return -EINVAL; 117374c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 11746ce3c4c0SChristoph Lameter return -EPERM; 11756ce3c4c0SChristoph Lameter 11766ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 11776ce3c4c0SChristoph Lameter return -EINVAL; 11786ce3c4c0SChristoph Lameter 11796ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 11806ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 11816ce3c4c0SChristoph Lameter 11826ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 11836ce3c4c0SChristoph Lameter end = start + len; 11846ce3c4c0SChristoph Lameter 11856ce3c4c0SChristoph Lameter if (end < start) 11866ce3c4c0SChristoph Lameter return -EINVAL; 11876ce3c4c0SChristoph Lameter if (end == start) 11886ce3c4c0SChristoph Lameter return 0; 11896ce3c4c0SChristoph Lameter 1190028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 11916ce3c4c0SChristoph Lameter if (IS_ERR(new)) 11926ce3c4c0SChristoph Lameter return PTR_ERR(new); 11936ce3c4c0SChristoph Lameter 1194b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1195b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1196b24f53a0SLee Schermerhorn 11976ce3c4c0SChristoph Lameter /* 11986ce3c4c0SChristoph Lameter * If we are using the default policy then operation 11996ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12006ce3c4c0SChristoph Lameter */ 12016ce3c4c0SChristoph Lameter if (!new) 12026ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12036ce3c4c0SChristoph Lameter 1204028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1205028fec41SDavid Rientjes start, start + len, mode, mode_flags, 120600ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12076ce3c4c0SChristoph Lameter 12080aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12090aedadf9SChristoph Lameter 12100aedadf9SChristoph Lameter err = migrate_prep(); 12110aedadf9SChristoph Lameter if (err) 1212b05ca738SKOSAKI Motohiro goto mpol_out; 12130aedadf9SChristoph Lameter } 12144bfc4495SKAMEZAWA Hiroyuki { 12154bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 12164bfc4495SKAMEZAWA Hiroyuki if (scratch) { 12176ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 121858568d2aSMiao Xie task_lock(current); 12194bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 122058568d2aSMiao Xie task_unlock(current); 12214bfc4495SKAMEZAWA Hiroyuki if (err) 122258568d2aSMiao Xie up_write(&mm->mmap_sem); 12234bfc4495SKAMEZAWA Hiroyuki } else 12244bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 12254bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 12264bfc4495SKAMEZAWA Hiroyuki } 1227b05ca738SKOSAKI Motohiro if (err) 1228b05ca738SKOSAKI Motohiro goto mpol_out; 1229b05ca738SKOSAKI Motohiro 1230d05f0cdcSHugh Dickins err = queue_pages_range(mm, start, end, nmask, 12316ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1232d05f0cdcSHugh Dickins if (!err) 12339d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 12347e2ab150SChristoph Lameter 1235b24f53a0SLee Schermerhorn if (!err) { 1236b24f53a0SLee Schermerhorn int nr_failed = 0; 1237b24f53a0SLee Schermerhorn 1238cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1239b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1240d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1241d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1242cf608ac1SMinchan Kim if (nr_failed) 124374060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1244cf608ac1SMinchan Kim } 12456ce3c4c0SChristoph Lameter 1246b24f53a0SLee Schermerhorn if (nr_failed && (flags & MPOL_MF_STRICT)) 12476ce3c4c0SChristoph Lameter err = -EIO; 1248ab8a3e14SKOSAKI Motohiro } else 1249b0e5fd73SJoonsoo Kim putback_movable_pages(&pagelist); 1250b20a3503SChristoph Lameter 12516ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1252b05ca738SKOSAKI Motohiro mpol_out: 1253f0be3d32SLee Schermerhorn mpol_put(new); 12546ce3c4c0SChristoph Lameter return err; 12556ce3c4c0SChristoph Lameter } 12566ce3c4c0SChristoph Lameter 125739743889SChristoph Lameter /* 12588bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 12598bccd85fSChristoph Lameter */ 12608bccd85fSChristoph Lameter 12618bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 126239743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 12638bccd85fSChristoph Lameter unsigned long maxnode) 12648bccd85fSChristoph Lameter { 12658bccd85fSChristoph Lameter unsigned long k; 126656521e7aSYisheng Xie unsigned long t; 12678bccd85fSChristoph Lameter unsigned long nlongs; 12688bccd85fSChristoph Lameter unsigned long endmask; 12698bccd85fSChristoph Lameter 12708bccd85fSChristoph Lameter --maxnode; 12718bccd85fSChristoph Lameter nodes_clear(*nodes); 12728bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 12738bccd85fSChristoph Lameter return 0; 1274a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1275636f13c1SChris Wright return -EINVAL; 12768bccd85fSChristoph Lameter 12778bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 12788bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 12798bccd85fSChristoph Lameter endmask = ~0UL; 12808bccd85fSChristoph Lameter else 12818bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 12828bccd85fSChristoph Lameter 128356521e7aSYisheng Xie /* 128456521e7aSYisheng Xie * When the user specified more nodes than supported just check 128556521e7aSYisheng Xie * if the non supported part is all zero. 128656521e7aSYisheng Xie * 128756521e7aSYisheng Xie * If maxnode have more longs than MAX_NUMNODES, check 128856521e7aSYisheng Xie * the bits in that area first. And then go through to 128956521e7aSYisheng Xie * check the rest bits which equal or bigger than MAX_NUMNODES. 129056521e7aSYisheng Xie * Otherwise, just check bits [MAX_NUMNODES, maxnode). 129156521e7aSYisheng Xie */ 12928bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 12938bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 12948bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 12958bccd85fSChristoph Lameter return -EFAULT; 12968bccd85fSChristoph Lameter if (k == nlongs - 1) { 12978bccd85fSChristoph Lameter if (t & endmask) 12988bccd85fSChristoph Lameter return -EINVAL; 12998bccd85fSChristoph Lameter } else if (t) 13008bccd85fSChristoph Lameter return -EINVAL; 13018bccd85fSChristoph Lameter } 13028bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 13038bccd85fSChristoph Lameter endmask = ~0UL; 13048bccd85fSChristoph Lameter } 13058bccd85fSChristoph Lameter 130656521e7aSYisheng Xie if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { 130756521e7aSYisheng Xie unsigned long valid_mask = endmask; 130856521e7aSYisheng Xie 130956521e7aSYisheng Xie valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 131056521e7aSYisheng Xie if (get_user(t, nmask + nlongs - 1)) 131156521e7aSYisheng Xie return -EFAULT; 131256521e7aSYisheng Xie if (t & valid_mask) 131356521e7aSYisheng Xie return -EINVAL; 131456521e7aSYisheng Xie } 131556521e7aSYisheng Xie 13168bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 13178bccd85fSChristoph Lameter return -EFAULT; 13188bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 13198bccd85fSChristoph Lameter return 0; 13208bccd85fSChristoph Lameter } 13218bccd85fSChristoph Lameter 13228bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 13238bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 13248bccd85fSChristoph Lameter nodemask_t *nodes) 13258bccd85fSChristoph Lameter { 13268bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 13278bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 13288bccd85fSChristoph Lameter 13298bccd85fSChristoph Lameter if (copy > nbytes) { 13308bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 13318bccd85fSChristoph Lameter return -EINVAL; 13328bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 13338bccd85fSChristoph Lameter return -EFAULT; 13348bccd85fSChristoph Lameter copy = nbytes; 13358bccd85fSChristoph Lameter } 13368bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 13378bccd85fSChristoph Lameter } 13388bccd85fSChristoph Lameter 1339938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1340f7f28ca9SRasmus Villemoes unsigned long, mode, const unsigned long __user *, nmask, 1341938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 13428bccd85fSChristoph Lameter { 13438bccd85fSChristoph Lameter nodemask_t nodes; 13448bccd85fSChristoph Lameter int err; 1345028fec41SDavid Rientjes unsigned short mode_flags; 13468bccd85fSChristoph Lameter 1347028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1348028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1349a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1350a3b51e01SDavid Rientjes return -EINVAL; 13514c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 13524c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 13534c50bc01SDavid Rientjes return -EINVAL; 13548bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13558bccd85fSChristoph Lameter if (err) 13568bccd85fSChristoph Lameter return err; 1357028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 13588bccd85fSChristoph Lameter } 13598bccd85fSChristoph Lameter 13608bccd85fSChristoph Lameter /* Set the process memory policy */ 136123c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1362938bb9f5SHeiko Carstens unsigned long, maxnode) 13638bccd85fSChristoph Lameter { 13648bccd85fSChristoph Lameter int err; 13658bccd85fSChristoph Lameter nodemask_t nodes; 1366028fec41SDavid Rientjes unsigned short flags; 13678bccd85fSChristoph Lameter 1368028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1369028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1370028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 13718bccd85fSChristoph Lameter return -EINVAL; 13724c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 13734c50bc01SDavid Rientjes return -EINVAL; 13748bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13758bccd85fSChristoph Lameter if (err) 13768bccd85fSChristoph Lameter return err; 1377028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 13788bccd85fSChristoph Lameter } 13798bccd85fSChristoph Lameter 1380938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1381938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1382938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 138339743889SChristoph Lameter { 1384596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 138539743889SChristoph Lameter struct task_struct *task; 138639743889SChristoph Lameter nodemask_t task_nodes; 138739743889SChristoph Lameter int err; 1388596d7cfaSKOSAKI Motohiro nodemask_t *old; 1389596d7cfaSKOSAKI Motohiro nodemask_t *new; 1390596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 139139743889SChristoph Lameter 1392596d7cfaSKOSAKI Motohiro if (!scratch) 1393596d7cfaSKOSAKI Motohiro return -ENOMEM; 139439743889SChristoph Lameter 1395596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1396596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1397596d7cfaSKOSAKI Motohiro 1398596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 139939743889SChristoph Lameter if (err) 1400596d7cfaSKOSAKI Motohiro goto out; 1401596d7cfaSKOSAKI Motohiro 1402596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1403596d7cfaSKOSAKI Motohiro if (err) 1404596d7cfaSKOSAKI Motohiro goto out; 140539743889SChristoph Lameter 140639743889SChristoph Lameter /* Find the mm_struct */ 140755cfaa3cSZeng Zhaoming rcu_read_lock(); 1408228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 140939743889SChristoph Lameter if (!task) { 141055cfaa3cSZeng Zhaoming rcu_read_unlock(); 1411596d7cfaSKOSAKI Motohiro err = -ESRCH; 1412596d7cfaSKOSAKI Motohiro goto out; 141339743889SChristoph Lameter } 14143268c63eSChristoph Lameter get_task_struct(task); 141539743889SChristoph Lameter 1416596d7cfaSKOSAKI Motohiro err = -EINVAL; 141739743889SChristoph Lameter 141839743889SChristoph Lameter /* 141931367466SOtto Ebeling * Check if this process has the right to modify the specified process. 142031367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 142139743889SChristoph Lameter */ 142231367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1423c69e8d9cSDavid Howells rcu_read_unlock(); 142439743889SChristoph Lameter err = -EPERM; 14253268c63eSChristoph Lameter goto out_put; 142639743889SChristoph Lameter } 1427c69e8d9cSDavid Howells rcu_read_unlock(); 142839743889SChristoph Lameter 142939743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 143039743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1431596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 143239743889SChristoph Lameter err = -EPERM; 14333268c63eSChristoph Lameter goto out_put; 143439743889SChristoph Lameter } 143539743889SChristoph Lameter 14360486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 14370486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 14380486a38bSYisheng Xie if (nodes_empty(*new)) 14393268c63eSChristoph Lameter goto out_put; 14400486a38bSYisheng Xie 14410486a38bSYisheng Xie nodes_and(*new, *new, node_states[N_MEMORY]); 14420486a38bSYisheng Xie if (nodes_empty(*new)) 14430486a38bSYisheng Xie goto out_put; 14443b42d28bSChristoph Lameter 144586c3a764SDavid Quigley err = security_task_movememory(task); 144686c3a764SDavid Quigley if (err) 14473268c63eSChristoph Lameter goto out_put; 144886c3a764SDavid Quigley 14493268c63eSChristoph Lameter mm = get_task_mm(task); 14503268c63eSChristoph Lameter put_task_struct(task); 1451f2a9ef88SSasha Levin 1452f2a9ef88SSasha Levin if (!mm) { 1453f2a9ef88SSasha Levin err = -EINVAL; 1454f2a9ef88SSasha Levin goto out; 1455f2a9ef88SSasha Levin } 1456f2a9ef88SSasha Levin 1457596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 145874c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 14593268c63eSChristoph Lameter 146039743889SChristoph Lameter mmput(mm); 14613268c63eSChristoph Lameter out: 1462596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1463596d7cfaSKOSAKI Motohiro 146439743889SChristoph Lameter return err; 14653268c63eSChristoph Lameter 14663268c63eSChristoph Lameter out_put: 14673268c63eSChristoph Lameter put_task_struct(task); 14683268c63eSChristoph Lameter goto out; 14693268c63eSChristoph Lameter 147039743889SChristoph Lameter } 147139743889SChristoph Lameter 147239743889SChristoph Lameter 14738bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1474938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1475938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1476938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 14778bccd85fSChristoph Lameter { 1478dbcb0f19SAdrian Bunk int err; 1479dbcb0f19SAdrian Bunk int uninitialized_var(pval); 14808bccd85fSChristoph Lameter nodemask_t nodes; 14818bccd85fSChristoph Lameter 14828bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 14838bccd85fSChristoph Lameter return -EINVAL; 14848bccd85fSChristoph Lameter 14858bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 14868bccd85fSChristoph Lameter 14878bccd85fSChristoph Lameter if (err) 14888bccd85fSChristoph Lameter return err; 14898bccd85fSChristoph Lameter 14908bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 14918bccd85fSChristoph Lameter return -EFAULT; 14928bccd85fSChristoph Lameter 14938bccd85fSChristoph Lameter if (nmask) 14948bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 14958bccd85fSChristoph Lameter 14968bccd85fSChristoph Lameter return err; 14978bccd85fSChristoph Lameter } 14988bccd85fSChristoph Lameter 14991da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 15001da177e4SLinus Torvalds 1501c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1502c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1503c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1504c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 15051da177e4SLinus Torvalds { 15061da177e4SLinus Torvalds long err; 15071da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15081da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15091da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15101da177e4SLinus Torvalds 15111da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15121da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15131da177e4SLinus Torvalds 15141da177e4SLinus Torvalds if (nmask) 15151da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 15161da177e4SLinus Torvalds 15171da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 15181da177e4SLinus Torvalds 15191da177e4SLinus Torvalds if (!err && nmask) { 15202bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 15212bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 15222bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 15231da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 15241da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 15251da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 15261da177e4SLinus Torvalds } 15271da177e4SLinus Torvalds 15281da177e4SLinus Torvalds return err; 15291da177e4SLinus Torvalds } 15301da177e4SLinus Torvalds 1531c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1532c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 15331da177e4SLinus Torvalds { 15341da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15351da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 15361da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 15371da177e4SLinus Torvalds 15381da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15391da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15401da177e4SLinus Torvalds 15411da177e4SLinus Torvalds if (nmask) { 1542cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 15431da177e4SLinus Torvalds return -EFAULT; 1544cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1545cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1546cf01fb99SChris Salls return -EFAULT; 1547cf01fb99SChris Salls } 15481da177e4SLinus Torvalds 15491da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 15501da177e4SLinus Torvalds } 15511da177e4SLinus Torvalds 1552c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1553c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1554c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 15551da177e4SLinus Torvalds { 15561da177e4SLinus Torvalds unsigned long __user *nm = NULL; 15571da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1558dfcd3c0dSAndi Kleen nodemask_t bm; 15591da177e4SLinus Torvalds 15601da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 15611da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 15621da177e4SLinus Torvalds 15631da177e4SLinus Torvalds if (nmask) { 1564cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 15651da177e4SLinus Torvalds return -EFAULT; 1566cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1567cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1568cf01fb99SChris Salls return -EFAULT; 1569cf01fb99SChris Salls } 15701da177e4SLinus Torvalds 15711da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 15721da177e4SLinus Torvalds } 15731da177e4SLinus Torvalds 15741da177e4SLinus Torvalds #endif 15751da177e4SLinus Torvalds 157674d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 157774d2c3a0SOleg Nesterov unsigned long addr) 15781da177e4SLinus Torvalds { 15798d90274bSOleg Nesterov struct mempolicy *pol = NULL; 15801da177e4SLinus Torvalds 15811da177e4SLinus Torvalds if (vma) { 1582480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 15838d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 158400442ad0SMel Gorman } else if (vma->vm_policy) { 15851da177e4SLinus Torvalds pol = vma->vm_policy; 158600442ad0SMel Gorman 158700442ad0SMel Gorman /* 158800442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 158900442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 159000442ad0SMel Gorman * count on these policies which will be dropped by 159100442ad0SMel Gorman * mpol_cond_put() later 159200442ad0SMel Gorman */ 159300442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 159400442ad0SMel Gorman mpol_get(pol); 159500442ad0SMel Gorman } 15961da177e4SLinus Torvalds } 1597f15ca78eSOleg Nesterov 159874d2c3a0SOleg Nesterov return pol; 159974d2c3a0SOleg Nesterov } 160074d2c3a0SOleg Nesterov 160174d2c3a0SOleg Nesterov /* 1602dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 160374d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 160474d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 160574d2c3a0SOleg Nesterov * 160674d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1607dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 160874d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 160974d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 161074d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 161174d2c3a0SOleg Nesterov * extra reference for shared policies. 161274d2c3a0SOleg Nesterov */ 1613dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1614dd6eecb9SOleg Nesterov unsigned long addr) 161574d2c3a0SOleg Nesterov { 161674d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 161774d2c3a0SOleg Nesterov 16188d90274bSOleg Nesterov if (!pol) 1619dd6eecb9SOleg Nesterov pol = get_task_policy(current); 16208d90274bSOleg Nesterov 16211da177e4SLinus Torvalds return pol; 16221da177e4SLinus Torvalds } 16231da177e4SLinus Torvalds 16246b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1625fc314724SMel Gorman { 16266b6482bbSOleg Nesterov struct mempolicy *pol; 1627f15ca78eSOleg Nesterov 1628fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1629fc314724SMel Gorman bool ret = false; 1630fc314724SMel Gorman 1631fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1632fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1633fc314724SMel Gorman ret = true; 1634fc314724SMel Gorman mpol_cond_put(pol); 1635fc314724SMel Gorman 1636fc314724SMel Gorman return ret; 16378d90274bSOleg Nesterov } 16388d90274bSOleg Nesterov 1639fc314724SMel Gorman pol = vma->vm_policy; 16408d90274bSOleg Nesterov if (!pol) 16416b6482bbSOleg Nesterov pol = get_task_policy(current); 1642fc314724SMel Gorman 1643fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1644fc314724SMel Gorman } 1645fc314724SMel Gorman 1646d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1647d3eb1570SLai Jiangshan { 1648d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1649d3eb1570SLai Jiangshan 1650d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1651d3eb1570SLai Jiangshan 1652d3eb1570SLai Jiangshan /* 1653d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1654d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1655d3eb1570SLai Jiangshan * 1656d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1657d3eb1570SLai Jiangshan * so if the following test faile, it implies 1658d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1659d3eb1570SLai Jiangshan */ 1660d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1661d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1662d3eb1570SLai Jiangshan 1663d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1664d3eb1570SLai Jiangshan } 1665d3eb1570SLai Jiangshan 166652cd3b07SLee Schermerhorn /* 166752cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 166852cd3b07SLee Schermerhorn * page allocation 166952cd3b07SLee Schermerhorn */ 167052cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 167119770b32SMel Gorman { 167219770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 167345c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1674d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 167519770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 167619770b32SMel Gorman return &policy->v.nodes; 167719770b32SMel Gorman 167819770b32SMel Gorman return NULL; 167919770b32SMel Gorman } 168019770b32SMel Gorman 168104ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */ 168204ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy, 16832f5f9486SAndi Kleen int nd) 16841da177e4SLinus Torvalds { 16856d840958SMichal Hocko if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) 16861da177e4SLinus Torvalds nd = policy->v.preferred_node; 16876d840958SMichal Hocko else { 168819770b32SMel Gorman /* 16896d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 16906d840958SMichal Hocko * because we might easily break the expectation to stay on the 16916d840958SMichal Hocko * requested node and not break the policy. 169219770b32SMel Gorman */ 16936d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 16941da177e4SLinus Torvalds } 16956d840958SMichal Hocko 169604ec6264SVlastimil Babka return nd; 16971da177e4SLinus Torvalds } 16981da177e4SLinus Torvalds 16991da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 17001da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 17011da177e4SLinus Torvalds { 170245816682SVlastimil Babka unsigned next; 17031da177e4SLinus Torvalds struct task_struct *me = current; 17041da177e4SLinus Torvalds 170545816682SVlastimil Babka next = next_node_in(me->il_prev, policy->v.nodes); 1706f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 170745816682SVlastimil Babka me->il_prev = next; 170845816682SVlastimil Babka return next; 17091da177e4SLinus Torvalds } 17101da177e4SLinus Torvalds 1711dc85da15SChristoph Lameter /* 1712dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1713dc85da15SChristoph Lameter * next slab entry. 1714dc85da15SChristoph Lameter */ 17152a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1716dc85da15SChristoph Lameter { 1717e7b691b0SAndi Kleen struct mempolicy *policy; 17182a389610SDavid Rientjes int node = numa_mem_id(); 1719e7b691b0SAndi Kleen 1720e7b691b0SAndi Kleen if (in_interrupt()) 17212a389610SDavid Rientjes return node; 1722e7b691b0SAndi Kleen 1723e7b691b0SAndi Kleen policy = current->mempolicy; 1724fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 17252a389610SDavid Rientjes return node; 1726765c4507SChristoph Lameter 1727bea904d5SLee Schermerhorn switch (policy->mode) { 1728bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1729fc36b8d3SLee Schermerhorn /* 1730fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1731fc36b8d3SLee Schermerhorn */ 1732bea904d5SLee Schermerhorn return policy->v.preferred_node; 1733bea904d5SLee Schermerhorn 1734dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1735dc85da15SChristoph Lameter return interleave_nodes(policy); 1736dc85da15SChristoph Lameter 1737dd1a239fSMel Gorman case MPOL_BIND: { 1738c33d6c06SMel Gorman struct zoneref *z; 1739c33d6c06SMel Gorman 1740dc85da15SChristoph Lameter /* 1741dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1742dc85da15SChristoph Lameter * first node. 1743dc85da15SChristoph Lameter */ 174419770b32SMel Gorman struct zonelist *zonelist; 174519770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1746c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1747c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1748c33d6c06SMel Gorman &policy->v.nodes); 1749c33d6c06SMel Gorman return z->zone ? z->zone->node : node; 1750dd1a239fSMel Gorman } 1751dc85da15SChristoph Lameter 1752dc85da15SChristoph Lameter default: 1753bea904d5SLee Schermerhorn BUG(); 1754dc85da15SChristoph Lameter } 1755dc85da15SChristoph Lameter } 1756dc85da15SChristoph Lameter 1757fee83b3aSAndrew Morton /* 1758fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1759fee83b3aSAndrew Morton * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1760fee83b3aSAndrew Morton * number of present nodes. 1761fee83b3aSAndrew Morton */ 176298c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 17631da177e4SLinus Torvalds { 1764dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1765f5b087b5SDavid Rientjes unsigned target; 1766fee83b3aSAndrew Morton int i; 1767fee83b3aSAndrew Morton int nid; 17681da177e4SLinus Torvalds 1769f5b087b5SDavid Rientjes if (!nnodes) 1770f5b087b5SDavid Rientjes return numa_node_id(); 1771fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1772fee83b3aSAndrew Morton nid = first_node(pol->v.nodes); 1773fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1774dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 17751da177e4SLinus Torvalds return nid; 17761da177e4SLinus Torvalds } 17771da177e4SLinus Torvalds 17785da7ca86SChristoph Lameter /* Determine a node number for interleave */ 17795da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 17805da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 17815da7ca86SChristoph Lameter { 17825da7ca86SChristoph Lameter if (vma) { 17835da7ca86SChristoph Lameter unsigned long off; 17845da7ca86SChristoph Lameter 17853b98b087SNishanth Aravamudan /* 17863b98b087SNishanth Aravamudan * for small pages, there is no difference between 17873b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 17883b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 17893b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 17903b98b087SNishanth Aravamudan * a useful offset. 17913b98b087SNishanth Aravamudan */ 17923b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 17933b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 17945da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 179598c70baaSLaurent Dufour return offset_il_node(pol, off); 17965da7ca86SChristoph Lameter } else 17975da7ca86SChristoph Lameter return interleave_nodes(pol); 17985da7ca86SChristoph Lameter } 17995da7ca86SChristoph Lameter 180000ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1801480eccf9SLee Schermerhorn /* 180204ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 1803b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1804b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1805b46e14acSFabian Frederick * @gfp_flags: for requested zone 1806b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1807b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1808480eccf9SLee Schermerhorn * 180904ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 181052cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 181152cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 181252cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1813c0ff7453SMiao Xie * 1814d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 1815480eccf9SLee Schermerhorn */ 181604ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 181704ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 18185da7ca86SChristoph Lameter { 181904ec6264SVlastimil Babka int nid; 18205da7ca86SChristoph Lameter 1821dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 182219770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 18235da7ca86SChristoph Lameter 182452cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 182504ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 182604ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 182752cd3b07SLee Schermerhorn } else { 182804ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 182952cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 183052cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1831480eccf9SLee Schermerhorn } 183204ec6264SVlastimil Babka return nid; 18335da7ca86SChristoph Lameter } 183406808b08SLee Schermerhorn 183506808b08SLee Schermerhorn /* 183606808b08SLee Schermerhorn * init_nodemask_of_mempolicy 183706808b08SLee Schermerhorn * 183806808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 183906808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 184006808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 184106808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 184206808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 184306808b08SLee Schermerhorn * of non-default mempolicy. 184406808b08SLee Schermerhorn * 184506808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 184606808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 184706808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 184806808b08SLee Schermerhorn * 184906808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 185006808b08SLee Schermerhorn */ 185106808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 185206808b08SLee Schermerhorn { 185306808b08SLee Schermerhorn struct mempolicy *mempolicy; 185406808b08SLee Schermerhorn int nid; 185506808b08SLee Schermerhorn 185606808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 185706808b08SLee Schermerhorn return false; 185806808b08SLee Schermerhorn 1859c0ff7453SMiao Xie task_lock(current); 186006808b08SLee Schermerhorn mempolicy = current->mempolicy; 186106808b08SLee Schermerhorn switch (mempolicy->mode) { 186206808b08SLee Schermerhorn case MPOL_PREFERRED: 186306808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 186406808b08SLee Schermerhorn nid = numa_node_id(); 186506808b08SLee Schermerhorn else 186606808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 186706808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 186806808b08SLee Schermerhorn break; 186906808b08SLee Schermerhorn 187006808b08SLee Schermerhorn case MPOL_BIND: 187106808b08SLee Schermerhorn /* Fall through */ 187206808b08SLee Schermerhorn case MPOL_INTERLEAVE: 187306808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 187406808b08SLee Schermerhorn break; 187506808b08SLee Schermerhorn 187606808b08SLee Schermerhorn default: 187706808b08SLee Schermerhorn BUG(); 187806808b08SLee Schermerhorn } 1879c0ff7453SMiao Xie task_unlock(current); 188006808b08SLee Schermerhorn 188106808b08SLee Schermerhorn return true; 188206808b08SLee Schermerhorn } 188300ac59adSChen, Kenneth W #endif 18845da7ca86SChristoph Lameter 18856f48d0ebSDavid Rientjes /* 18866f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 18876f48d0ebSDavid Rientjes * 18886f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 18896f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 18906f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 18916f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 18926f48d0ebSDavid Rientjes * 18936f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 18946f48d0ebSDavid Rientjes */ 18956f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 18966f48d0ebSDavid Rientjes const nodemask_t *mask) 18976f48d0ebSDavid Rientjes { 18986f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 18996f48d0ebSDavid Rientjes bool ret = true; 19006f48d0ebSDavid Rientjes 19016f48d0ebSDavid Rientjes if (!mask) 19026f48d0ebSDavid Rientjes return ret; 19036f48d0ebSDavid Rientjes task_lock(tsk); 19046f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 19056f48d0ebSDavid Rientjes if (!mempolicy) 19066f48d0ebSDavid Rientjes goto out; 19076f48d0ebSDavid Rientjes 19086f48d0ebSDavid Rientjes switch (mempolicy->mode) { 19096f48d0ebSDavid Rientjes case MPOL_PREFERRED: 19106f48d0ebSDavid Rientjes /* 19116f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 19126f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 19136f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 19146f48d0ebSDavid Rientjes * nodes in mask. 19156f48d0ebSDavid Rientjes */ 19166f48d0ebSDavid Rientjes break; 19176f48d0ebSDavid Rientjes case MPOL_BIND: 19186f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 19196f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 19206f48d0ebSDavid Rientjes break; 19216f48d0ebSDavid Rientjes default: 19226f48d0ebSDavid Rientjes BUG(); 19236f48d0ebSDavid Rientjes } 19246f48d0ebSDavid Rientjes out: 19256f48d0ebSDavid Rientjes task_unlock(tsk); 19266f48d0ebSDavid Rientjes return ret; 19276f48d0ebSDavid Rientjes } 19286f48d0ebSDavid Rientjes 19291da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 19301da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1931662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1932662f3a0bSAndi Kleen unsigned nid) 19331da177e4SLinus Torvalds { 19341da177e4SLinus Torvalds struct page *page; 19351da177e4SLinus Torvalds 193604ec6264SVlastimil Babka page = __alloc_pages(gfp, order, nid); 19374518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 19384518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 19394518085eSKemi Wang return page; 1940de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 1941de55c8b2SAndrey Ryabinin preempt_disable(); 1942de55c8b2SAndrey Ryabinin __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT); 1943de55c8b2SAndrey Ryabinin preempt_enable(); 1944de55c8b2SAndrey Ryabinin } 19451da177e4SLinus Torvalds return page; 19461da177e4SLinus Torvalds } 19471da177e4SLinus Torvalds 19481da177e4SLinus Torvalds /** 19490bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 19501da177e4SLinus Torvalds * 19511da177e4SLinus Torvalds * @gfp: 19521da177e4SLinus Torvalds * %GFP_USER user allocation. 19531da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 19541da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 19551da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 19561da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 19571da177e4SLinus Torvalds * 19580bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 19591da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 19601da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 1961be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 1962be97a41bSVlastimil Babka * @hugepage: for hugepages try only the preferred node if possible 19631da177e4SLinus Torvalds * 19641da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 19651da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 19661da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 19671da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 1968be97a41bSVlastimil Babka * all allocations for pages that will be mapped into user space. Returns 1969be97a41bSVlastimil Babka * NULL when no page can be allocated. 19701da177e4SLinus Torvalds */ 19711da177e4SLinus Torvalds struct page * 19720bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1973be97a41bSVlastimil Babka unsigned long addr, int node, bool hugepage) 19741da177e4SLinus Torvalds { 1975cc9a6c87SMel Gorman struct mempolicy *pol; 1976c0ff7453SMiao Xie struct page *page; 197704ec6264SVlastimil Babka int preferred_nid; 1978be97a41bSVlastimil Babka nodemask_t *nmask; 19791da177e4SLinus Torvalds 1980dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 1981cc9a6c87SMel Gorman 1982be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 19831da177e4SLinus Torvalds unsigned nid; 19845da7ca86SChristoph Lameter 19858eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 198652cd3b07SLee Schermerhorn mpol_cond_put(pol); 19870bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 1988be97a41bSVlastimil Babka goto out; 19891da177e4SLinus Torvalds } 19901da177e4SLinus Torvalds 19910867a57cSVlastimil Babka if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 19920867a57cSVlastimil Babka int hpage_node = node; 19930867a57cSVlastimil Babka 19940867a57cSVlastimil Babka /* 19950867a57cSVlastimil Babka * For hugepage allocation and non-interleave policy which 19960867a57cSVlastimil Babka * allows the current node (or other explicitly preferred 19970867a57cSVlastimil Babka * node) we only try to allocate from the current/preferred 19980867a57cSVlastimil Babka * node and don't fall back to other nodes, as the cost of 19990867a57cSVlastimil Babka * remote accesses would likely offset THP benefits. 20000867a57cSVlastimil Babka * 20010867a57cSVlastimil Babka * If the policy is interleave, or does not allow the current 20020867a57cSVlastimil Babka * node in its nodemask, we allocate the standard way. 20030867a57cSVlastimil Babka */ 20040867a57cSVlastimil Babka if (pol->mode == MPOL_PREFERRED && 20050867a57cSVlastimil Babka !(pol->flags & MPOL_F_LOCAL)) 20060867a57cSVlastimil Babka hpage_node = pol->v.preferred_node; 20070867a57cSVlastimil Babka 20080867a57cSVlastimil Babka nmask = policy_nodemask(gfp, pol); 20090867a57cSVlastimil Babka if (!nmask || node_isset(hpage_node, *nmask)) { 20100867a57cSVlastimil Babka mpol_cond_put(pol); 201196db800fSVlastimil Babka page = __alloc_pages_node(hpage_node, 20120867a57cSVlastimil Babka gfp | __GFP_THISNODE, order); 20130867a57cSVlastimil Babka goto out; 20140867a57cSVlastimil Babka } 20150867a57cSVlastimil Babka } 20160867a57cSVlastimil Babka 2017077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 201804ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 201904ec6264SVlastimil Babka page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); 2020d51e9894SVlastimil Babka mpol_cond_put(pol); 2021be97a41bSVlastimil Babka out: 2022077fcf11SAneesh Kumar K.V return page; 2023077fcf11SAneesh Kumar K.V } 2024077fcf11SAneesh Kumar K.V 20251da177e4SLinus Torvalds /** 20261da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 20271da177e4SLinus Torvalds * 20281da177e4SLinus Torvalds * @gfp: 20291da177e4SLinus Torvalds * %GFP_USER user allocation, 20301da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 20311da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 20321da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 20331da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 20341da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 20351da177e4SLinus Torvalds * 20361da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 20371da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 20381da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 20391da177e4SLinus Torvalds */ 2040dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 20411da177e4SLinus Torvalds { 20428d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2043c0ff7453SMiao Xie struct page *page; 20441da177e4SLinus Torvalds 20458d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 20468d90274bSOleg Nesterov pol = get_task_policy(current); 204752cd3b07SLee Schermerhorn 204852cd3b07SLee Schermerhorn /* 204952cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 205052cd3b07SLee Schermerhorn * nor system default_policy 205152cd3b07SLee Schermerhorn */ 205245c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2053c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2054c0ff7453SMiao Xie else 2055c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 205604ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 20575c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2058cc9a6c87SMel Gorman 2059c0ff7453SMiao Xie return page; 20601da177e4SLinus Torvalds } 20611da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 20621da177e4SLinus Torvalds 2063ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2064ef0855d3SOleg Nesterov { 2065ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2066ef0855d3SOleg Nesterov 2067ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2068ef0855d3SOleg Nesterov return PTR_ERR(pol); 2069ef0855d3SOleg Nesterov dst->vm_policy = pol; 2070ef0855d3SOleg Nesterov return 0; 2071ef0855d3SOleg Nesterov } 2072ef0855d3SOleg Nesterov 20734225399aSPaul Jackson /* 2074846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 20754225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 20764225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 20774225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 20784225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2079708c1bbcSMiao Xie * 2080708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2081708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 20824225399aSPaul Jackson */ 20834225399aSPaul Jackson 2084846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2085846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 20861da177e4SLinus Torvalds { 20871da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 20881da177e4SLinus Torvalds 20891da177e4SLinus Torvalds if (!new) 20901da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2091708c1bbcSMiao Xie 2092708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2093708c1bbcSMiao Xie if (old == current->mempolicy) { 2094708c1bbcSMiao Xie task_lock(current); 2095708c1bbcSMiao Xie *new = *old; 2096708c1bbcSMiao Xie task_unlock(current); 2097708c1bbcSMiao Xie } else 2098708c1bbcSMiao Xie *new = *old; 2099708c1bbcSMiao Xie 21004225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 21014225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2102213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 21034225399aSPaul Jackson } 21041da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 21051da177e4SLinus Torvalds return new; 21061da177e4SLinus Torvalds } 21071da177e4SLinus Torvalds 21081da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2109fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 21101da177e4SLinus Torvalds { 21111da177e4SLinus Torvalds if (!a || !b) 2112fcfb4dccSKOSAKI Motohiro return false; 211345c4745aSLee Schermerhorn if (a->mode != b->mode) 2114fcfb4dccSKOSAKI Motohiro return false; 211519800502SBob Liu if (a->flags != b->flags) 2116fcfb4dccSKOSAKI Motohiro return false; 211719800502SBob Liu if (mpol_store_user_nodemask(a)) 211819800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2119fcfb4dccSKOSAKI Motohiro return false; 212019800502SBob Liu 212145c4745aSLee Schermerhorn switch (a->mode) { 212219770b32SMel Gorman case MPOL_BIND: 212319770b32SMel Gorman /* Fall through */ 21241da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2125fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 21261da177e4SLinus Torvalds case MPOL_PREFERRED: 212775719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 21281da177e4SLinus Torvalds default: 21291da177e4SLinus Torvalds BUG(); 2130fcfb4dccSKOSAKI Motohiro return false; 21311da177e4SLinus Torvalds } 21321da177e4SLinus Torvalds } 21331da177e4SLinus Torvalds 21341da177e4SLinus Torvalds /* 21351da177e4SLinus Torvalds * Shared memory backing store policy support. 21361da177e4SLinus Torvalds * 21371da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 21381da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 21394a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 21401da177e4SLinus Torvalds * for any accesses to the tree. 21411da177e4SLinus Torvalds */ 21421da177e4SLinus Torvalds 21434a8c7bb5SNathan Zimmer /* 21444a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 21454a8c7bb5SNathan Zimmer * reading or for writing 21464a8c7bb5SNathan Zimmer */ 21471da177e4SLinus Torvalds static struct sp_node * 21481da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 21491da177e4SLinus Torvalds { 21501da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 21511da177e4SLinus Torvalds 21521da177e4SLinus Torvalds while (n) { 21531da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 21541da177e4SLinus Torvalds 21551da177e4SLinus Torvalds if (start >= p->end) 21561da177e4SLinus Torvalds n = n->rb_right; 21571da177e4SLinus Torvalds else if (end <= p->start) 21581da177e4SLinus Torvalds n = n->rb_left; 21591da177e4SLinus Torvalds else 21601da177e4SLinus Torvalds break; 21611da177e4SLinus Torvalds } 21621da177e4SLinus Torvalds if (!n) 21631da177e4SLinus Torvalds return NULL; 21641da177e4SLinus Torvalds for (;;) { 21651da177e4SLinus Torvalds struct sp_node *w = NULL; 21661da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 21671da177e4SLinus Torvalds if (!prev) 21681da177e4SLinus Torvalds break; 21691da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 21701da177e4SLinus Torvalds if (w->end <= start) 21711da177e4SLinus Torvalds break; 21721da177e4SLinus Torvalds n = prev; 21731da177e4SLinus Torvalds } 21741da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 21751da177e4SLinus Torvalds } 21761da177e4SLinus Torvalds 21774a8c7bb5SNathan Zimmer /* 21784a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 21794a8c7bb5SNathan Zimmer * writing. 21804a8c7bb5SNathan Zimmer */ 21811da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 21821da177e4SLinus Torvalds { 21831da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 21841da177e4SLinus Torvalds struct rb_node *parent = NULL; 21851da177e4SLinus Torvalds struct sp_node *nd; 21861da177e4SLinus Torvalds 21871da177e4SLinus Torvalds while (*p) { 21881da177e4SLinus Torvalds parent = *p; 21891da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 21901da177e4SLinus Torvalds if (new->start < nd->start) 21911da177e4SLinus Torvalds p = &(*p)->rb_left; 21921da177e4SLinus Torvalds else if (new->end > nd->end) 21931da177e4SLinus Torvalds p = &(*p)->rb_right; 21941da177e4SLinus Torvalds else 21951da177e4SLinus Torvalds BUG(); 21961da177e4SLinus Torvalds } 21971da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 21981da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2199140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 220045c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 22011da177e4SLinus Torvalds } 22021da177e4SLinus Torvalds 22031da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 22041da177e4SLinus Torvalds struct mempolicy * 22051da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 22061da177e4SLinus Torvalds { 22071da177e4SLinus Torvalds struct mempolicy *pol = NULL; 22081da177e4SLinus Torvalds struct sp_node *sn; 22091da177e4SLinus Torvalds 22101da177e4SLinus Torvalds if (!sp->root.rb_node) 22111da177e4SLinus Torvalds return NULL; 22124a8c7bb5SNathan Zimmer read_lock(&sp->lock); 22131da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 22141da177e4SLinus Torvalds if (sn) { 22151da177e4SLinus Torvalds mpol_get(sn->policy); 22161da177e4SLinus Torvalds pol = sn->policy; 22171da177e4SLinus Torvalds } 22184a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 22191da177e4SLinus Torvalds return pol; 22201da177e4SLinus Torvalds } 22211da177e4SLinus Torvalds 222263f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 222363f74ca2SKOSAKI Motohiro { 222463f74ca2SKOSAKI Motohiro mpol_put(n->policy); 222563f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 222663f74ca2SKOSAKI Motohiro } 222763f74ca2SKOSAKI Motohiro 2228771fb4d8SLee Schermerhorn /** 2229771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2230771fb4d8SLee Schermerhorn * 2231b46e14acSFabian Frederick * @page: page to be checked 2232b46e14acSFabian Frederick * @vma: vm area where page mapped 2233b46e14acSFabian Frederick * @addr: virtual address where page mapped 2234771fb4d8SLee Schermerhorn * 2235771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2236771fb4d8SLee Schermerhorn * node id. 2237771fb4d8SLee Schermerhorn * 2238771fb4d8SLee Schermerhorn * Returns: 2239771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2240771fb4d8SLee Schermerhorn * node - node id where the page should be 2241771fb4d8SLee Schermerhorn * 2242771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2243771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2244771fb4d8SLee Schermerhorn */ 2245771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2246771fb4d8SLee Schermerhorn { 2247771fb4d8SLee Schermerhorn struct mempolicy *pol; 2248c33d6c06SMel Gorman struct zoneref *z; 2249771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2250771fb4d8SLee Schermerhorn unsigned long pgoff; 225190572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 225290572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 2253771fb4d8SLee Schermerhorn int polnid = -1; 2254771fb4d8SLee Schermerhorn int ret = -1; 2255771fb4d8SLee Schermerhorn 2256dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2257771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2258771fb4d8SLee Schermerhorn goto out; 2259771fb4d8SLee Schermerhorn 2260771fb4d8SLee Schermerhorn switch (pol->mode) { 2261771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2262771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2263771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 226498c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2265771fb4d8SLee Schermerhorn break; 2266771fb4d8SLee Schermerhorn 2267771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2268771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2269771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2270771fb4d8SLee Schermerhorn else 2271771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2272771fb4d8SLee Schermerhorn break; 2273771fb4d8SLee Schermerhorn 2274771fb4d8SLee Schermerhorn case MPOL_BIND: 2275c33d6c06SMel Gorman 2276771fb4d8SLee Schermerhorn /* 2277771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2278771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2279771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2280771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2281771fb4d8SLee Schermerhorn */ 2282771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2283771fb4d8SLee Schermerhorn goto out; 2284c33d6c06SMel Gorman z = first_zones_zonelist( 2285771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2286771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2287c33d6c06SMel Gorman &pol->v.nodes); 2288c33d6c06SMel Gorman polnid = z->zone->node; 2289771fb4d8SLee Schermerhorn break; 2290771fb4d8SLee Schermerhorn 2291771fb4d8SLee Schermerhorn default: 2292771fb4d8SLee Schermerhorn BUG(); 2293771fb4d8SLee Schermerhorn } 22945606e387SMel Gorman 22955606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2296e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 229790572890SPeter Zijlstra polnid = thisnid; 22985606e387SMel Gorman 229910f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2300de1c9ce6SRik van Riel goto out; 2301de1c9ce6SRik van Riel } 2302e42c8ff2SMel Gorman 2303771fb4d8SLee Schermerhorn if (curnid != polnid) 2304771fb4d8SLee Schermerhorn ret = polnid; 2305771fb4d8SLee Schermerhorn out: 2306771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2307771fb4d8SLee Schermerhorn 2308771fb4d8SLee Schermerhorn return ret; 2309771fb4d8SLee Schermerhorn } 2310771fb4d8SLee Schermerhorn 2311c11600e4SDavid Rientjes /* 2312c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2313c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2314c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2315c11600e4SDavid Rientjes * policy. 2316c11600e4SDavid Rientjes */ 2317c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2318c11600e4SDavid Rientjes { 2319c11600e4SDavid Rientjes struct mempolicy *pol; 2320c11600e4SDavid Rientjes 2321c11600e4SDavid Rientjes task_lock(task); 2322c11600e4SDavid Rientjes pol = task->mempolicy; 2323c11600e4SDavid Rientjes task->mempolicy = NULL; 2324c11600e4SDavid Rientjes task_unlock(task); 2325c11600e4SDavid Rientjes mpol_put(pol); 2326c11600e4SDavid Rientjes } 2327c11600e4SDavid Rientjes 23281da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 23291da177e4SLinus Torvalds { 2330140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 23311da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 233263f74ca2SKOSAKI Motohiro sp_free(n); 23331da177e4SLinus Torvalds } 23341da177e4SLinus Torvalds 233542288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 233642288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 233742288fe3SMel Gorman { 233842288fe3SMel Gorman node->start = start; 233942288fe3SMel Gorman node->end = end; 234042288fe3SMel Gorman node->policy = pol; 234142288fe3SMel Gorman } 234242288fe3SMel Gorman 2343dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2344dbcb0f19SAdrian Bunk struct mempolicy *pol) 23451da177e4SLinus Torvalds { 2346869833f2SKOSAKI Motohiro struct sp_node *n; 2347869833f2SKOSAKI Motohiro struct mempolicy *newpol; 23481da177e4SLinus Torvalds 2349869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 23501da177e4SLinus Torvalds if (!n) 23511da177e4SLinus Torvalds return NULL; 2352869833f2SKOSAKI Motohiro 2353869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2354869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2355869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2356869833f2SKOSAKI Motohiro return NULL; 2357869833f2SKOSAKI Motohiro } 2358869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 235942288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2360869833f2SKOSAKI Motohiro 23611da177e4SLinus Torvalds return n; 23621da177e4SLinus Torvalds } 23631da177e4SLinus Torvalds 23641da177e4SLinus Torvalds /* Replace a policy range. */ 23651da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 23661da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 23671da177e4SLinus Torvalds { 2368b22d127aSMel Gorman struct sp_node *n; 236942288fe3SMel Gorman struct sp_node *n_new = NULL; 237042288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2371b22d127aSMel Gorman int ret = 0; 23721da177e4SLinus Torvalds 237342288fe3SMel Gorman restart: 23744a8c7bb5SNathan Zimmer write_lock(&sp->lock); 23751da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 23761da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 23771da177e4SLinus Torvalds while (n && n->start < end) { 23781da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 23791da177e4SLinus Torvalds if (n->start >= start) { 23801da177e4SLinus Torvalds if (n->end <= end) 23811da177e4SLinus Torvalds sp_delete(sp, n); 23821da177e4SLinus Torvalds else 23831da177e4SLinus Torvalds n->start = end; 23841da177e4SLinus Torvalds } else { 23851da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 23861da177e4SLinus Torvalds if (n->end > end) { 238742288fe3SMel Gorman if (!n_new) 238842288fe3SMel Gorman goto alloc_new; 238942288fe3SMel Gorman 239042288fe3SMel Gorman *mpol_new = *n->policy; 239142288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 23927880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 23931da177e4SLinus Torvalds n->end = start; 23945ca39575SHillf Danton sp_insert(sp, n_new); 239542288fe3SMel Gorman n_new = NULL; 239642288fe3SMel Gorman mpol_new = NULL; 23971da177e4SLinus Torvalds break; 23981da177e4SLinus Torvalds } else 23991da177e4SLinus Torvalds n->end = start; 24001da177e4SLinus Torvalds } 24011da177e4SLinus Torvalds if (!next) 24021da177e4SLinus Torvalds break; 24031da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 24041da177e4SLinus Torvalds } 24051da177e4SLinus Torvalds if (new) 24061da177e4SLinus Torvalds sp_insert(sp, new); 24074a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 240842288fe3SMel Gorman ret = 0; 240942288fe3SMel Gorman 241042288fe3SMel Gorman err_out: 241142288fe3SMel Gorman if (mpol_new) 241242288fe3SMel Gorman mpol_put(mpol_new); 241342288fe3SMel Gorman if (n_new) 241442288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 241542288fe3SMel Gorman 2416b22d127aSMel Gorman return ret; 241742288fe3SMel Gorman 241842288fe3SMel Gorman alloc_new: 24194a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 242042288fe3SMel Gorman ret = -ENOMEM; 242142288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 242242288fe3SMel Gorman if (!n_new) 242342288fe3SMel Gorman goto err_out; 242442288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 242542288fe3SMel Gorman if (!mpol_new) 242642288fe3SMel Gorman goto err_out; 242742288fe3SMel Gorman goto restart; 24281da177e4SLinus Torvalds } 24291da177e4SLinus Torvalds 243071fe804bSLee Schermerhorn /** 243171fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 243271fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 243371fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 243471fe804bSLee Schermerhorn * 243571fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 243671fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 243771fe804bSLee Schermerhorn * This must be released on exit. 24384bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 243971fe804bSLee Schermerhorn */ 244071fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 24417339ff83SRobin Holt { 244258568d2aSMiao Xie int ret; 244358568d2aSMiao Xie 244471fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 24454a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 24467339ff83SRobin Holt 244771fe804bSLee Schermerhorn if (mpol) { 24487339ff83SRobin Holt struct vm_area_struct pvma; 244971fe804bSLee Schermerhorn struct mempolicy *new; 24504bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 24517339ff83SRobin Holt 24524bfc4495SKAMEZAWA Hiroyuki if (!scratch) 24535c0c1654SLee Schermerhorn goto put_mpol; 245471fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 245571fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 245615d77835SLee Schermerhorn if (IS_ERR(new)) 24570cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 245858568d2aSMiao Xie 245958568d2aSMiao Xie task_lock(current); 24604bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 246158568d2aSMiao Xie task_unlock(current); 246215d77835SLee Schermerhorn if (ret) 24635c0c1654SLee Schermerhorn goto put_new; 246471fe804bSLee Schermerhorn 246571fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 24667339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 246771fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 246871fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 246915d77835SLee Schermerhorn 24705c0c1654SLee Schermerhorn put_new: 247171fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 24720cae3457SDan Carpenter free_scratch: 24734bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 24745c0c1654SLee Schermerhorn put_mpol: 24755c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 24767339ff83SRobin Holt } 24777339ff83SRobin Holt } 24787339ff83SRobin Holt 24791da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 24801da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 24811da177e4SLinus Torvalds { 24821da177e4SLinus Torvalds int err; 24831da177e4SLinus Torvalds struct sp_node *new = NULL; 24841da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 24851da177e4SLinus Torvalds 2486028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 24871da177e4SLinus Torvalds vma->vm_pgoff, 248845c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2489028fec41SDavid Rientjes npol ? npol->flags : -1, 249000ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 24911da177e4SLinus Torvalds 24921da177e4SLinus Torvalds if (npol) { 24931da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 24941da177e4SLinus Torvalds if (!new) 24951da177e4SLinus Torvalds return -ENOMEM; 24961da177e4SLinus Torvalds } 24971da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 24981da177e4SLinus Torvalds if (err && new) 249963f74ca2SKOSAKI Motohiro sp_free(new); 25001da177e4SLinus Torvalds return err; 25011da177e4SLinus Torvalds } 25021da177e4SLinus Torvalds 25031da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 25041da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 25051da177e4SLinus Torvalds { 25061da177e4SLinus Torvalds struct sp_node *n; 25071da177e4SLinus Torvalds struct rb_node *next; 25081da177e4SLinus Torvalds 25091da177e4SLinus Torvalds if (!p->root.rb_node) 25101da177e4SLinus Torvalds return; 25114a8c7bb5SNathan Zimmer write_lock(&p->lock); 25121da177e4SLinus Torvalds next = rb_first(&p->root); 25131da177e4SLinus Torvalds while (next) { 25141da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25151da177e4SLinus Torvalds next = rb_next(&n->nd); 251663f74ca2SKOSAKI Motohiro sp_delete(p, n); 25171da177e4SLinus Torvalds } 25184a8c7bb5SNathan Zimmer write_unlock(&p->lock); 25191da177e4SLinus Torvalds } 25201da177e4SLinus Torvalds 25211a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2522c297663cSMel Gorman static int __initdata numabalancing_override; 25231a687c2eSMel Gorman 25241a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 25251a687c2eSMel Gorman { 25261a687c2eSMel Gorman bool numabalancing_default = false; 25271a687c2eSMel Gorman 25281a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 25291a687c2eSMel Gorman numabalancing_default = true; 25301a687c2eSMel Gorman 2531c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2532c297663cSMel Gorman if (numabalancing_override) 2533c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2534c297663cSMel Gorman 2535b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2536756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2537c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 25381a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 25391a687c2eSMel Gorman } 25401a687c2eSMel Gorman } 25411a687c2eSMel Gorman 25421a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 25431a687c2eSMel Gorman { 25441a687c2eSMel Gorman int ret = 0; 25451a687c2eSMel Gorman if (!str) 25461a687c2eSMel Gorman goto out; 25471a687c2eSMel Gorman 25481a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2549c297663cSMel Gorman numabalancing_override = 1; 25501a687c2eSMel Gorman ret = 1; 25511a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2552c297663cSMel Gorman numabalancing_override = -1; 25531a687c2eSMel Gorman ret = 1; 25541a687c2eSMel Gorman } 25551a687c2eSMel Gorman out: 25561a687c2eSMel Gorman if (!ret) 25574a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 25581a687c2eSMel Gorman 25591a687c2eSMel Gorman return ret; 25601a687c2eSMel Gorman } 25611a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 25621a687c2eSMel Gorman #else 25631a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 25641a687c2eSMel Gorman { 25651a687c2eSMel Gorman } 25661a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 25671a687c2eSMel Gorman 25681da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 25691da177e4SLinus Torvalds void __init numa_policy_init(void) 25701da177e4SLinus Torvalds { 2571b71636e2SPaul Mundt nodemask_t interleave_nodes; 2572b71636e2SPaul Mundt unsigned long largest = 0; 2573b71636e2SPaul Mundt int nid, prefer = 0; 2574b71636e2SPaul Mundt 25751da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 25761da177e4SLinus Torvalds sizeof(struct mempolicy), 257720c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 25781da177e4SLinus Torvalds 25791da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 25801da177e4SLinus Torvalds sizeof(struct sp_node), 258120c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 25821da177e4SLinus Torvalds 25835606e387SMel Gorman for_each_node(nid) { 25845606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 25855606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 25865606e387SMel Gorman .mode = MPOL_PREFERRED, 25875606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 25885606e387SMel Gorman .v = { .preferred_node = nid, }, 25895606e387SMel Gorman }; 25905606e387SMel Gorman } 25915606e387SMel Gorman 2592b71636e2SPaul Mundt /* 2593b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2594b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2595b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2596b71636e2SPaul Mundt */ 2597b71636e2SPaul Mundt nodes_clear(interleave_nodes); 259801f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2599b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 26001da177e4SLinus Torvalds 2601b71636e2SPaul Mundt /* Preserve the largest node */ 2602b71636e2SPaul Mundt if (largest < total_pages) { 2603b71636e2SPaul Mundt largest = total_pages; 2604b71636e2SPaul Mundt prefer = nid; 2605b71636e2SPaul Mundt } 2606b71636e2SPaul Mundt 2607b71636e2SPaul Mundt /* Interleave this node? */ 2608b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2609b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2610b71636e2SPaul Mundt } 2611b71636e2SPaul Mundt 2612b71636e2SPaul Mundt /* All too small, use the largest */ 2613b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2614b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2615b71636e2SPaul Mundt 2616028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2617b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 26181a687c2eSMel Gorman 26191a687c2eSMel Gorman check_numabalancing_enable(); 26201da177e4SLinus Torvalds } 26211da177e4SLinus Torvalds 26228bccd85fSChristoph Lameter /* Reset policy of current process to default */ 26231da177e4SLinus Torvalds void numa_default_policy(void) 26241da177e4SLinus Torvalds { 2625028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 26261da177e4SLinus Torvalds } 262768860ec1SPaul Jackson 26284225399aSPaul Jackson /* 2629095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2630095f1fc4SLee Schermerhorn */ 2631095f1fc4SLee Schermerhorn 2632095f1fc4SLee Schermerhorn /* 2633f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 26341a75a6c8SChristoph Lameter */ 2635345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2636345ace9cSLee Schermerhorn { 2637345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2638345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2639345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2640345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2641d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2642345ace9cSLee Schermerhorn }; 26431a75a6c8SChristoph Lameter 2644095f1fc4SLee Schermerhorn 2645095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2646095f1fc4SLee Schermerhorn /** 2647f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2648095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 264971fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2650095f1fc4SLee Schermerhorn * 2651095f1fc4SLee Schermerhorn * Format of input: 2652095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2653095f1fc4SLee Schermerhorn * 265471fe804bSLee Schermerhorn * On success, returns 0, else 1 2655095f1fc4SLee Schermerhorn */ 2656a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2657095f1fc4SLee Schermerhorn { 265871fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2659b4652e84SLee Schermerhorn unsigned short mode; 2660f2a07f40SHugh Dickins unsigned short mode_flags; 266171fe804bSLee Schermerhorn nodemask_t nodes; 2662095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2663095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2664095f1fc4SLee Schermerhorn int err = 1; 2665095f1fc4SLee Schermerhorn 2666095f1fc4SLee Schermerhorn if (nodelist) { 2667095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2668095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 266971fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2670095f1fc4SLee Schermerhorn goto out; 267101f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2672095f1fc4SLee Schermerhorn goto out; 267371fe804bSLee Schermerhorn } else 267471fe804bSLee Schermerhorn nodes_clear(nodes); 267571fe804bSLee Schermerhorn 2676095f1fc4SLee Schermerhorn if (flags) 2677095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2678095f1fc4SLee Schermerhorn 2679479e2802SPeter Zijlstra for (mode = 0; mode < MPOL_MAX; mode++) { 2680345ace9cSLee Schermerhorn if (!strcmp(str, policy_modes[mode])) { 2681095f1fc4SLee Schermerhorn break; 2682095f1fc4SLee Schermerhorn } 2683095f1fc4SLee Schermerhorn } 2684a720094dSMel Gorman if (mode >= MPOL_MAX) 2685095f1fc4SLee Schermerhorn goto out; 2686095f1fc4SLee Schermerhorn 268771fe804bSLee Schermerhorn switch (mode) { 2688095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 268971fe804bSLee Schermerhorn /* 269071fe804bSLee Schermerhorn * Insist on a nodelist of one node only 269171fe804bSLee Schermerhorn */ 2692095f1fc4SLee Schermerhorn if (nodelist) { 2693095f1fc4SLee Schermerhorn char *rest = nodelist; 2694095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2695095f1fc4SLee Schermerhorn rest++; 2696926f2ae0SKOSAKI Motohiro if (*rest) 2697926f2ae0SKOSAKI Motohiro goto out; 2698095f1fc4SLee Schermerhorn } 2699095f1fc4SLee Schermerhorn break; 2700095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2701095f1fc4SLee Schermerhorn /* 2702095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2703095f1fc4SLee Schermerhorn */ 2704095f1fc4SLee Schermerhorn if (!nodelist) 270501f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 27063f226aa1SLee Schermerhorn break; 270771fe804bSLee Schermerhorn case MPOL_LOCAL: 27083f226aa1SLee Schermerhorn /* 270971fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 27103f226aa1SLee Schermerhorn */ 271171fe804bSLee Schermerhorn if (nodelist) 27123f226aa1SLee Schermerhorn goto out; 271371fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 27143f226aa1SLee Schermerhorn break; 2715413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2716413b43deSRavikiran G Thirumalai /* 2717413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2718413b43deSRavikiran G Thirumalai */ 2719413b43deSRavikiran G Thirumalai if (!nodelist) 2720413b43deSRavikiran G Thirumalai err = 0; 2721413b43deSRavikiran G Thirumalai goto out; 2722d69b2e63SKOSAKI Motohiro case MPOL_BIND: 272371fe804bSLee Schermerhorn /* 2724d69b2e63SKOSAKI Motohiro * Insist on a nodelist 272571fe804bSLee Schermerhorn */ 2726d69b2e63SKOSAKI Motohiro if (!nodelist) 2727d69b2e63SKOSAKI Motohiro goto out; 2728095f1fc4SLee Schermerhorn } 2729095f1fc4SLee Schermerhorn 273071fe804bSLee Schermerhorn mode_flags = 0; 2731095f1fc4SLee Schermerhorn if (flags) { 2732095f1fc4SLee Schermerhorn /* 2733095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2734095f1fc4SLee Schermerhorn * mode flags. 2735095f1fc4SLee Schermerhorn */ 2736095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 273771fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2738095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 273971fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2740095f1fc4SLee Schermerhorn else 2741926f2ae0SKOSAKI Motohiro goto out; 2742095f1fc4SLee Schermerhorn } 274371fe804bSLee Schermerhorn 274471fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 274571fe804bSLee Schermerhorn if (IS_ERR(new)) 2746926f2ae0SKOSAKI Motohiro goto out; 2747926f2ae0SKOSAKI Motohiro 2748f2a07f40SHugh Dickins /* 2749f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2750f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2751f2a07f40SHugh Dickins */ 2752f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2753f2a07f40SHugh Dickins new->v.nodes = nodes; 2754f2a07f40SHugh Dickins else if (nodelist) 2755f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2756f2a07f40SHugh Dickins else 2757f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2758f2a07f40SHugh Dickins 2759f2a07f40SHugh Dickins /* 2760f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2761f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2762f2a07f40SHugh Dickins */ 2763e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2764f2a07f40SHugh Dickins 2765926f2ae0SKOSAKI Motohiro err = 0; 276671fe804bSLee Schermerhorn 2767095f1fc4SLee Schermerhorn out: 2768095f1fc4SLee Schermerhorn /* Restore string for error message */ 2769095f1fc4SLee Schermerhorn if (nodelist) 2770095f1fc4SLee Schermerhorn *--nodelist = ':'; 2771095f1fc4SLee Schermerhorn if (flags) 2772095f1fc4SLee Schermerhorn *--flags = '='; 277371fe804bSLee Schermerhorn if (!err) 277471fe804bSLee Schermerhorn *mpol = new; 2775095f1fc4SLee Schermerhorn return err; 2776095f1fc4SLee Schermerhorn } 2777095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2778095f1fc4SLee Schermerhorn 277971fe804bSLee Schermerhorn /** 278071fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 278171fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 278271fe804bSLee Schermerhorn * @maxlen: length of @buffer 278371fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 278471fe804bSLee Schermerhorn * 2785948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2786948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2787948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 27881a75a6c8SChristoph Lameter */ 2789948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 27901a75a6c8SChristoph Lameter { 27911a75a6c8SChristoph Lameter char *p = buffer; 2792948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2793948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2794948927eeSDavid Rientjes unsigned short flags = 0; 27951a75a6c8SChristoph Lameter 27968790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2797bea904d5SLee Schermerhorn mode = pol->mode; 2798948927eeSDavid Rientjes flags = pol->flags; 2799948927eeSDavid Rientjes } 2800bea904d5SLee Schermerhorn 28011a75a6c8SChristoph Lameter switch (mode) { 28021a75a6c8SChristoph Lameter case MPOL_DEFAULT: 28031a75a6c8SChristoph Lameter break; 28041a75a6c8SChristoph Lameter case MPOL_PREFERRED: 2805fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 2806f2a07f40SHugh Dickins mode = MPOL_LOCAL; 280753f2556bSLee Schermerhorn else 2808fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 28091a75a6c8SChristoph Lameter break; 28101a75a6c8SChristoph Lameter case MPOL_BIND: 28111a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 28121a75a6c8SChristoph Lameter nodes = pol->v.nodes; 28131a75a6c8SChristoph Lameter break; 28141a75a6c8SChristoph Lameter default: 2815948927eeSDavid Rientjes WARN_ON_ONCE(1); 2816948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 2817948927eeSDavid Rientjes return; 28181a75a6c8SChristoph Lameter } 28191a75a6c8SChristoph Lameter 2820b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 28211a75a6c8SChristoph Lameter 2822fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2823948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 2824f5b087b5SDavid Rientjes 28252291990aSLee Schermerhorn /* 28262291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 28272291990aSLee Schermerhorn */ 2828f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 28292291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 28302291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 28312291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2832f5b087b5SDavid Rientjes } 2833f5b087b5SDavid Rientjes 28349e763e0fSTejun Heo if (!nodes_empty(nodes)) 28359e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 28369e763e0fSTejun Heo nodemask_pr_args(&nodes)); 28371a75a6c8SChristoph Lameter } 2838