146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 71a520110eSChristoph Hellwig #include <linux/pagewalk.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 766e84f315SIngo Molnar #include <linux/sched/mm.h> 776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 78f719ff9bSIngo Molnar #include <linux/sched/task.h> 791da177e4SLinus Torvalds #include <linux/nodemask.h> 801da177e4SLinus Torvalds #include <linux/cpuset.h> 811da177e4SLinus Torvalds #include <linux/slab.h> 821da177e4SLinus Torvalds #include <linux/string.h> 83b95f1b31SPaul Gortmaker #include <linux/export.h> 84b488893aSPavel Emelyanov #include <linux/nsproxy.h> 851da177e4SLinus Torvalds #include <linux/interrupt.h> 861da177e4SLinus Torvalds #include <linux/init.h> 871da177e4SLinus Torvalds #include <linux/compat.h> 8831367466SOtto Ebeling #include <linux/ptrace.h> 89dc9aa5b9SChristoph Lameter #include <linux/swap.h> 901a75a6c8SChristoph Lameter #include <linux/seq_file.h> 911a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 92b20a3503SChristoph Lameter #include <linux/migrate.h> 9362b61f61SHugh Dickins #include <linux/ksm.h> 9495a402c3SChristoph Lameter #include <linux/rmap.h> 9586c3a764SDavid Quigley #include <linux/security.h> 96dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 97095f1fc4SLee Schermerhorn #include <linux/ctype.h> 986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 100b1de0d13SMitchel Humpherys #include <linux/printk.h> 101c8633798SNaoya Horiguchi #include <linux/swapops.h> 102dc9aa5b9SChristoph Lameter 1031da177e4SLinus Torvalds #include <asm/tlbflush.h> 1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1051da177e4SLinus Torvalds 10662695a84SNick Piggin #include "internal.h" 10762695a84SNick Piggin 10838e35860SChristoph Lameter /* Internal flags */ 109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 111dc9aa5b9SChristoph Lameter 112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1141da177e4SLinus Torvalds 1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1161da177e4SLinus Torvalds policied. */ 1176267276fSChristoph Lameter enum zone_type policy_zone = 0; 1181da177e4SLinus Torvalds 119bea904d5SLee Schermerhorn /* 120bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 121bea904d5SLee Schermerhorn */ 122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1231da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 124bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 125fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1261da177e4SLinus Torvalds }; 1271da177e4SLinus Torvalds 1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1295606e387SMel Gorman 13074d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1315606e387SMel Gorman { 1325606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 133f15ca78eSOleg Nesterov int node; 1345606e387SMel Gorman 135f15ca78eSOleg Nesterov if (pol) 136f15ca78eSOleg Nesterov return pol; 1375606e387SMel Gorman 138f15ca78eSOleg Nesterov node = numa_node_id(); 1391da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1401da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 141f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 142f15ca78eSOleg Nesterov if (pol->mode) 143f15ca78eSOleg Nesterov return pol; 1441da6f0e1SJianguo Wu } 1455606e387SMel Gorman 146f15ca78eSOleg Nesterov return &default_policy; 1475606e387SMel Gorman } 1485606e387SMel Gorman 14937012946SDavid Rientjes static const struct mempolicy_operations { 15037012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 151213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 15237012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 15337012946SDavid Rientjes 154f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 155f5b087b5SDavid Rientjes { 1566d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1574c50bc01SDavid Rientjes } 1584c50bc01SDavid Rientjes 1594c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1604c50bc01SDavid Rientjes const nodemask_t *rel) 1614c50bc01SDavid Rientjes { 1624c50bc01SDavid Rientjes nodemask_t tmp; 1634c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1644c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 165f5b087b5SDavid Rientjes } 166f5b087b5SDavid Rientjes 16737012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 16837012946SDavid Rientjes { 16937012946SDavid Rientjes if (nodes_empty(*nodes)) 17037012946SDavid Rientjes return -EINVAL; 17137012946SDavid Rientjes pol->v.nodes = *nodes; 17237012946SDavid Rientjes return 0; 17337012946SDavid Rientjes } 17437012946SDavid Rientjes 17537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 17637012946SDavid Rientjes { 17737012946SDavid Rientjes if (!nodes) 178fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 17937012946SDavid Rientjes else if (nodes_empty(*nodes)) 18037012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 18137012946SDavid Rientjes else 18237012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 18337012946SDavid Rientjes return 0; 18437012946SDavid Rientjes } 18537012946SDavid Rientjes 18637012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 18737012946SDavid Rientjes { 188859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 18937012946SDavid Rientjes return -EINVAL; 19037012946SDavid Rientjes pol->v.nodes = *nodes; 19137012946SDavid Rientjes return 0; 19237012946SDavid Rientjes } 19337012946SDavid Rientjes 19458568d2aSMiao Xie /* 19558568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 19658568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 19758568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 19858568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 19958568d2aSMiao Xie * 20058568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 20158568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 20258568d2aSMiao Xie */ 2034bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2044bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 20558568d2aSMiao Xie { 20658568d2aSMiao Xie int ret; 20758568d2aSMiao Xie 20858568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 20958568d2aSMiao Xie if (pol == NULL) 21058568d2aSMiao Xie return 0; 21101f13bd6SLai Jiangshan /* Check N_MEMORY */ 2124bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 21301f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 21458568d2aSMiao Xie 21558568d2aSMiao Xie VM_BUG_ON(!nodes); 21658568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 21758568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 21858568d2aSMiao Xie else { 21958568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2204bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 22158568d2aSMiao Xie else 2224bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2234bfc4495SKAMEZAWA Hiroyuki 22458568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 22558568d2aSMiao Xie pol->w.user_nodemask = *nodes; 22658568d2aSMiao Xie else 22758568d2aSMiao Xie pol->w.cpuset_mems_allowed = 22858568d2aSMiao Xie cpuset_current_mems_allowed; 22958568d2aSMiao Xie } 23058568d2aSMiao Xie 2314bfc4495SKAMEZAWA Hiroyuki if (nodes) 2324bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2334bfc4495SKAMEZAWA Hiroyuki else 2344bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 23558568d2aSMiao Xie return ret; 23658568d2aSMiao Xie } 23758568d2aSMiao Xie 23858568d2aSMiao Xie /* 23958568d2aSMiao Xie * This function just creates a new policy, does some check and simple 24058568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 24158568d2aSMiao Xie */ 242028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 243028fec41SDavid Rientjes nodemask_t *nodes) 2441da177e4SLinus Torvalds { 2451da177e4SLinus Torvalds struct mempolicy *policy; 2461da177e4SLinus Torvalds 247028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 24800ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 249140d5a49SPaul Mundt 2503e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2513e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 25237012946SDavid Rientjes return ERR_PTR(-EINVAL); 253d3a71033SLee Schermerhorn return NULL; 25437012946SDavid Rientjes } 2553e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2563e1f0645SDavid Rientjes 2573e1f0645SDavid Rientjes /* 2583e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2593e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2603e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2613e1f0645SDavid Rientjes */ 2623e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2633e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2643e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2653e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2663e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2673e1f0645SDavid Rientjes } 268479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2698d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2708d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2718d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 272479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 273479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2743e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2753e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2761da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2771da177e4SLinus Torvalds if (!policy) 2781da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2791da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 28045c4745aSLee Schermerhorn policy->mode = mode; 28137012946SDavid Rientjes policy->flags = flags; 2823e1f0645SDavid Rientjes 28337012946SDavid Rientjes return policy; 28437012946SDavid Rientjes } 28537012946SDavid Rientjes 28652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 28752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 28852cd3b07SLee Schermerhorn { 28952cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 29052cd3b07SLee Schermerhorn return; 29152cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 29252cd3b07SLee Schermerhorn } 29352cd3b07SLee Schermerhorn 294213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 29537012946SDavid Rientjes { 29637012946SDavid Rientjes } 29737012946SDavid Rientjes 298213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 2991d0d2680SDavid Rientjes { 3001d0d2680SDavid Rientjes nodemask_t tmp; 3011d0d2680SDavid Rientjes 30237012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 30337012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 30437012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 30537012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3061d0d2680SDavid Rientjes else { 307213980c0SVlastimil Babka nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, 308213980c0SVlastimil Babka *nodes); 30929b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3101d0d2680SDavid Rientjes } 31137012946SDavid Rientjes 312708c1bbcSMiao Xie if (nodes_empty(tmp)) 313708c1bbcSMiao Xie tmp = *nodes; 314708c1bbcSMiao Xie 3151d0d2680SDavid Rientjes pol->v.nodes = tmp; 31637012946SDavid Rientjes } 31737012946SDavid Rientjes 31837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 319213980c0SVlastimil Babka const nodemask_t *nodes) 32037012946SDavid Rientjes { 32137012946SDavid Rientjes nodemask_t tmp; 32237012946SDavid Rientjes 32337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3241d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3251d0d2680SDavid Rientjes 326fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3271d0d2680SDavid Rientjes pol->v.preferred_node = node; 328fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 329fc36b8d3SLee Schermerhorn } else 330fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 33137012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 33237012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3331d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 334fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3351d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 33637012946SDavid Rientjes pol->w.cpuset_mems_allowed, 33737012946SDavid Rientjes *nodes); 33837012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3391d0d2680SDavid Rientjes } 3401d0d2680SDavid Rientjes } 34137012946SDavid Rientjes 342708c1bbcSMiao Xie /* 343708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 344708c1bbcSMiao Xie * 345213980c0SVlastimil Babka * Per-vma policies are protected by mmap_sem. Allocations using per-task 346213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 347213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 348708c1bbcSMiao Xie */ 349213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35037012946SDavid Rientjes { 35137012946SDavid Rientjes if (!pol) 35237012946SDavid Rientjes return; 3532e25644eSVlastimil Babka if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) && 35437012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35537012946SDavid Rientjes return; 356708c1bbcSMiao Xie 357213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3581d0d2680SDavid Rientjes } 3591d0d2680SDavid Rientjes 3601d0d2680SDavid Rientjes /* 3611d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3621d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36358568d2aSMiao Xie * 36458568d2aSMiao Xie * Called with task's alloc_lock held. 3651d0d2680SDavid Rientjes */ 3661d0d2680SDavid Rientjes 367213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3681d0d2680SDavid Rientjes { 369213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3701d0d2680SDavid Rientjes } 3711d0d2680SDavid Rientjes 3721d0d2680SDavid Rientjes /* 3731d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3741d0d2680SDavid Rientjes * 3751d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 3761d0d2680SDavid Rientjes */ 3771d0d2680SDavid Rientjes 3781d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3791d0d2680SDavid Rientjes { 3801d0d2680SDavid Rientjes struct vm_area_struct *vma; 3811d0d2680SDavid Rientjes 3821d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 3831d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 384213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 3851d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 3861d0d2680SDavid Rientjes } 3871d0d2680SDavid Rientjes 38837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 38937012946SDavid Rientjes [MPOL_DEFAULT] = { 39037012946SDavid Rientjes .rebind = mpol_rebind_default, 39137012946SDavid Rientjes }, 39237012946SDavid Rientjes [MPOL_INTERLEAVE] = { 39337012946SDavid Rientjes .create = mpol_new_interleave, 39437012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39537012946SDavid Rientjes }, 39637012946SDavid Rientjes [MPOL_PREFERRED] = { 39737012946SDavid Rientjes .create = mpol_new_preferred, 39837012946SDavid Rientjes .rebind = mpol_rebind_preferred, 39937012946SDavid Rientjes }, 40037012946SDavid Rientjes [MPOL_BIND] = { 40137012946SDavid Rientjes .create = mpol_new_bind, 40237012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40337012946SDavid Rientjes }, 40437012946SDavid Rientjes }; 40537012946SDavid Rientjes 406a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 407fc301289SChristoph Lameter unsigned long flags); 4081a75a6c8SChristoph Lameter 4096f4576e3SNaoya Horiguchi struct queue_pages { 4106f4576e3SNaoya Horiguchi struct list_head *pagelist; 4116f4576e3SNaoya Horiguchi unsigned long flags; 4126f4576e3SNaoya Horiguchi nodemask_t *nmask; 413f18da660SLi Xinhai unsigned long start; 414f18da660SLi Xinhai unsigned long end; 415f18da660SLi Xinhai struct vm_area_struct *first; 4166f4576e3SNaoya Horiguchi }; 4176f4576e3SNaoya Horiguchi 41898094945SNaoya Horiguchi /* 41988aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 42088aaa2a1SNaoya Horiguchi * 42188aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 42288aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 42388aaa2a1SNaoya Horiguchi */ 42488aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 42588aaa2a1SNaoya Horiguchi struct queue_pages *qp) 42688aaa2a1SNaoya Horiguchi { 42788aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 42888aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 42988aaa2a1SNaoya Horiguchi 43088aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 43188aaa2a1SNaoya Horiguchi } 43288aaa2a1SNaoya Horiguchi 433a7f40cfeSYang Shi /* 434d8835445SYang Shi * queue_pages_pmd() has four possible return values: 435d8835445SYang Shi * 0 - pages are placed on the right node or queued successfully. 436d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 437d8835445SYang Shi * specified. 438d8835445SYang Shi * 2 - THP was split. 439d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 440d8835445SYang Shi * existing page was already on a node that does not follow the 441d8835445SYang Shi * policy. 442a7f40cfeSYang Shi */ 443c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 444c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 445c8633798SNaoya Horiguchi { 446c8633798SNaoya Horiguchi int ret = 0; 447c8633798SNaoya Horiguchi struct page *page; 448c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 449c8633798SNaoya Horiguchi unsigned long flags; 450c8633798SNaoya Horiguchi 451c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 452a7f40cfeSYang Shi ret = -EIO; 453c8633798SNaoya Horiguchi goto unlock; 454c8633798SNaoya Horiguchi } 455c8633798SNaoya Horiguchi page = pmd_page(*pmd); 456c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 457c8633798SNaoya Horiguchi spin_unlock(ptl); 458c8633798SNaoya Horiguchi __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 459d8835445SYang Shi ret = 2; 460c8633798SNaoya Horiguchi goto out; 461c8633798SNaoya Horiguchi } 462d8835445SYang Shi if (!queue_pages_required(page, qp)) 463c8633798SNaoya Horiguchi goto unlock; 464c8633798SNaoya Horiguchi 465c8633798SNaoya Horiguchi flags = qp->flags; 466c8633798SNaoya Horiguchi /* go to thp migration */ 467a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 468a53190a4SYang Shi if (!vma_migratable(walk->vma) || 469a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 470d8835445SYang Shi ret = 1; 471a7f40cfeSYang Shi goto unlock; 472a7f40cfeSYang Shi } 473a7f40cfeSYang Shi } else 474a7f40cfeSYang Shi ret = -EIO; 475c8633798SNaoya Horiguchi unlock: 476c8633798SNaoya Horiguchi spin_unlock(ptl); 477c8633798SNaoya Horiguchi out: 478c8633798SNaoya Horiguchi return ret; 479c8633798SNaoya Horiguchi } 480c8633798SNaoya Horiguchi 48188aaa2a1SNaoya Horiguchi /* 48298094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 48398094945SNaoya Horiguchi * and move them to the pagelist if they do. 484d8835445SYang Shi * 485d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 486d8835445SYang Shi * 0 - pages are placed on the right node or queued successfully. 487d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 488d8835445SYang Shi * specified. 489d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 490d8835445SYang Shi * on a node that does not follow the policy. 49198094945SNaoya Horiguchi */ 4926f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 4936f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 4941da177e4SLinus Torvalds { 4956f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 4966f4576e3SNaoya Horiguchi struct page *page; 4976f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 4986f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 499c8633798SNaoya Horiguchi int ret; 500d8835445SYang Shi bool has_unmovable = false; 50191612e0dSHugh Dickins pte_t *pte; 502705e87c0SHugh Dickins spinlock_t *ptl; 503941150a3SHugh Dickins 504c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 505c8633798SNaoya Horiguchi if (ptl) { 506c8633798SNaoya Horiguchi ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 507d8835445SYang Shi if (ret != 2) 508a7f40cfeSYang Shi return ret; 509248db92dSKirill A. Shutemov } 510d8835445SYang Shi /* THP was split, fall through to pte walk */ 51191612e0dSHugh Dickins 512337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 513337d9abfSNaoya Horiguchi return 0; 51494723aafSMichal Hocko 5156f4576e3SNaoya Horiguchi pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5166f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 51791612e0dSHugh Dickins if (!pte_present(*pte)) 51891612e0dSHugh Dickins continue; 5196aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5206aab341eSLinus Torvalds if (!page) 52191612e0dSHugh Dickins continue; 522053837fcSNick Piggin /* 52362b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 52462b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 525053837fcSNick Piggin */ 526b79bc0a0SHugh Dickins if (PageReserved(page)) 527f4598c8bSChristoph Lameter continue; 52888aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 52938e35860SChristoph Lameter continue; 530a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 531d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 532d8835445SYang Shi if (!vma_migratable(vma)) { 533d8835445SYang Shi has_unmovable = true; 534a7f40cfeSYang Shi break; 535d8835445SYang Shi } 536a53190a4SYang Shi 537a53190a4SYang Shi /* 538a53190a4SYang Shi * Do not abort immediately since there may be 539a53190a4SYang Shi * temporary off LRU pages in the range. Still 540a53190a4SYang Shi * need migrate other LRU pages. 541a53190a4SYang Shi */ 542a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 543a53190a4SYang Shi has_unmovable = true; 544a7f40cfeSYang Shi } else 545a7f40cfeSYang Shi break; 5466f4576e3SNaoya Horiguchi } 5476f4576e3SNaoya Horiguchi pte_unmap_unlock(pte - 1, ptl); 5486f4576e3SNaoya Horiguchi cond_resched(); 549d8835445SYang Shi 550d8835445SYang Shi if (has_unmovable) 551d8835445SYang Shi return 1; 552d8835445SYang Shi 553a7f40cfeSYang Shi return addr != end ? -EIO : 0; 55491612e0dSHugh Dickins } 55591612e0dSHugh Dickins 5566f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5576f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5586f4576e3SNaoya Horiguchi struct mm_walk *walk) 559e2d8cf40SNaoya Horiguchi { 560dcf17635SLi Xinhai int ret = 0; 561e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5626f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 563dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 564e2d8cf40SNaoya Horiguchi struct page *page; 565cb900f41SKirill A. Shutemov spinlock_t *ptl; 566d4c54919SNaoya Horiguchi pte_t entry; 567e2d8cf40SNaoya Horiguchi 5686f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5696f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 570d4c54919SNaoya Horiguchi if (!pte_present(entry)) 571d4c54919SNaoya Horiguchi goto unlock; 572d4c54919SNaoya Horiguchi page = pte_page(entry); 57388aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 574e2d8cf40SNaoya Horiguchi goto unlock; 575dcf17635SLi Xinhai 576dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 577dcf17635SLi Xinhai /* 578dcf17635SLi Xinhai * STRICT alone means only detecting misplaced page and no 579dcf17635SLi Xinhai * need to further check other vma. 580dcf17635SLi Xinhai */ 581dcf17635SLi Xinhai ret = -EIO; 582dcf17635SLi Xinhai goto unlock; 583dcf17635SLi Xinhai } 584dcf17635SLi Xinhai 585dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 586dcf17635SLi Xinhai /* 587dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 588dcf17635SLi Xinhai * stopped walking current vma. 589dcf17635SLi Xinhai * Detecting misplaced page but allow migrating pages which 590dcf17635SLi Xinhai * have been queued. 591dcf17635SLi Xinhai */ 592dcf17635SLi Xinhai ret = 1; 593dcf17635SLi Xinhai goto unlock; 594dcf17635SLi Xinhai } 595dcf17635SLi Xinhai 596e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 597e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 598dcf17635SLi Xinhai (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { 599dcf17635SLi Xinhai if (!isolate_huge_page(page, qp->pagelist) && 600dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 601dcf17635SLi Xinhai /* 602dcf17635SLi Xinhai * Failed to isolate page but allow migrating pages 603dcf17635SLi Xinhai * which have been queued. 604dcf17635SLi Xinhai */ 605dcf17635SLi Xinhai ret = 1; 606dcf17635SLi Xinhai } 607e2d8cf40SNaoya Horiguchi unlock: 608cb900f41SKirill A. Shutemov spin_unlock(ptl); 609e2d8cf40SNaoya Horiguchi #else 610e2d8cf40SNaoya Horiguchi BUG(); 611e2d8cf40SNaoya Horiguchi #endif 612dcf17635SLi Xinhai return ret; 6131da177e4SLinus Torvalds } 6141da177e4SLinus Torvalds 6155877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 616b24f53a0SLee Schermerhorn /* 6174b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6184b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6194b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6204b10e7d5SMel Gorman * 6214b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6224b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6234b10e7d5SMel Gorman * changes to the core. 624b24f53a0SLee Schermerhorn */ 6254b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6264b10e7d5SMel Gorman unsigned long addr, unsigned long end) 627b24f53a0SLee Schermerhorn { 6284b10e7d5SMel Gorman int nr_updated; 629b24f53a0SLee Schermerhorn 6304d942466SMel Gorman nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); 63103c5a6e1SMel Gorman if (nr_updated) 63203c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 633b24f53a0SLee Schermerhorn 6344b10e7d5SMel Gorman return nr_updated; 635b24f53a0SLee Schermerhorn } 636b24f53a0SLee Schermerhorn #else 637b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 638b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 639b24f53a0SLee Schermerhorn { 640b24f53a0SLee Schermerhorn return 0; 641b24f53a0SLee Schermerhorn } 6425877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 643b24f53a0SLee Schermerhorn 6446f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6456f4576e3SNaoya Horiguchi struct mm_walk *walk) 6461da177e4SLinus Torvalds { 6476f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6486f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6495b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6506f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 651dc9aa5b9SChristoph Lameter 652a18b3ac2SLi Xinhai /* range check first */ 653d888fb2bSYang Shi VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma); 654f18da660SLi Xinhai 655f18da660SLi Xinhai if (!qp->first) { 656f18da660SLi Xinhai qp->first = vma; 657f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 658f18da660SLi Xinhai (qp->start < vma->vm_start)) 659f18da660SLi Xinhai /* hole at head side of range */ 660a18b3ac2SLi Xinhai return -EFAULT; 661a18b3ac2SLi Xinhai } 662f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 663f18da660SLi Xinhai ((vma->vm_end < qp->end) && 664f18da660SLi Xinhai (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) 665f18da660SLi Xinhai /* hole at middle or tail of range */ 666f18da660SLi Xinhai return -EFAULT; 667a18b3ac2SLi Xinhai 668a7f40cfeSYang Shi /* 669a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 670a7f40cfeSYang Shi * regardless of vma_migratable 671a7f40cfeSYang Shi */ 672a7f40cfeSYang Shi if (!vma_migratable(vma) && 673a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 67448684a65SNaoya Horiguchi return 1; 67548684a65SNaoya Horiguchi 6765b952b3cSAndi Kleen if (endvma > end) 6775b952b3cSAndi Kleen endvma = end; 678b24f53a0SLee Schermerhorn 679b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6802c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 681*3122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 6824355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 683b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6846f4576e3SNaoya Horiguchi return 1; 685b24f53a0SLee Schermerhorn } 686b24f53a0SLee Schermerhorn 6876f4576e3SNaoya Horiguchi /* queue pages from current vma */ 688a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 6896f4576e3SNaoya Horiguchi return 0; 6906f4576e3SNaoya Horiguchi return 1; 6916f4576e3SNaoya Horiguchi } 692b24f53a0SLee Schermerhorn 6937b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 6947b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 6957b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 6967b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 6977b86ac33SChristoph Hellwig }; 6987b86ac33SChristoph Hellwig 6996f4576e3SNaoya Horiguchi /* 7006f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7016f4576e3SNaoya Horiguchi * 7026f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7036f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 704d8835445SYang Shi * passed via @private. 705d8835445SYang Shi * 706d8835445SYang Shi * queue_pages_range() has three possible return values: 707d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 708d8835445SYang Shi * specified. 709d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 710a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 711a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 712a85dfc30SYang Shi * your accessible address space (-EFAULT) 7136f4576e3SNaoya Horiguchi */ 7146f4576e3SNaoya Horiguchi static int 7156f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7166f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 7176f4576e3SNaoya Horiguchi struct list_head *pagelist) 7186f4576e3SNaoya Horiguchi { 719f18da660SLi Xinhai int err; 7206f4576e3SNaoya Horiguchi struct queue_pages qp = { 7216f4576e3SNaoya Horiguchi .pagelist = pagelist, 7226f4576e3SNaoya Horiguchi .flags = flags, 7236f4576e3SNaoya Horiguchi .nmask = nodes, 724f18da660SLi Xinhai .start = start, 725f18da660SLi Xinhai .end = end, 726f18da660SLi Xinhai .first = NULL, 7276f4576e3SNaoya Horiguchi }; 7286f4576e3SNaoya Horiguchi 729f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 730f18da660SLi Xinhai 731f18da660SLi Xinhai if (!qp.first) 732f18da660SLi Xinhai /* whole range in hole */ 733f18da660SLi Xinhai err = -EFAULT; 734f18da660SLi Xinhai 735f18da660SLi Xinhai return err; 7361da177e4SLinus Torvalds } 7371da177e4SLinus Torvalds 738869833f2SKOSAKI Motohiro /* 739869833f2SKOSAKI Motohiro * Apply policy to a single VMA 740869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 741869833f2SKOSAKI Motohiro */ 742869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 743869833f2SKOSAKI Motohiro struct mempolicy *pol) 7448d34694cSKOSAKI Motohiro { 745869833f2SKOSAKI Motohiro int err; 746869833f2SKOSAKI Motohiro struct mempolicy *old; 747869833f2SKOSAKI Motohiro struct mempolicy *new; 7488d34694cSKOSAKI Motohiro 7498d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7508d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7518d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7528d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7538d34694cSKOSAKI Motohiro 754869833f2SKOSAKI Motohiro new = mpol_dup(pol); 755869833f2SKOSAKI Motohiro if (IS_ERR(new)) 756869833f2SKOSAKI Motohiro return PTR_ERR(new); 757869833f2SKOSAKI Motohiro 758869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7598d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 760869833f2SKOSAKI Motohiro if (err) 761869833f2SKOSAKI Motohiro goto err_out; 7628d34694cSKOSAKI Motohiro } 763869833f2SKOSAKI Motohiro 764869833f2SKOSAKI Motohiro old = vma->vm_policy; 765869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 766869833f2SKOSAKI Motohiro mpol_put(old); 767869833f2SKOSAKI Motohiro 768869833f2SKOSAKI Motohiro return 0; 769869833f2SKOSAKI Motohiro err_out: 770869833f2SKOSAKI Motohiro mpol_put(new); 7718d34694cSKOSAKI Motohiro return err; 7728d34694cSKOSAKI Motohiro } 7738d34694cSKOSAKI Motohiro 7741da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7759d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7769d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7771da177e4SLinus Torvalds { 7781da177e4SLinus Torvalds struct vm_area_struct *next; 7799d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7809d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7819d8cebd4SKOSAKI Motohiro int err = 0; 782e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7839d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7849d8cebd4SKOSAKI Motohiro unsigned long vmend; 7851da177e4SLinus Torvalds 786097d5910SLinus Torvalds vma = find_vma(mm, start); 787f18da660SLi Xinhai VM_BUG_ON(!vma); 7889d8cebd4SKOSAKI Motohiro 789097d5910SLinus Torvalds prev = vma->vm_prev; 790e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 791e26a5114SKOSAKI Motohiro prev = vma; 792e26a5114SKOSAKI Motohiro 7939d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 7941da177e4SLinus Torvalds next = vma->vm_next; 7959d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 7969d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 7979d8cebd4SKOSAKI Motohiro 798e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 799e26a5114SKOSAKI Motohiro continue; 800e26a5114SKOSAKI Motohiro 801e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 802e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 8039d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 804e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 80519a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 8069d8cebd4SKOSAKI Motohiro if (prev) { 8079d8cebd4SKOSAKI Motohiro vma = prev; 8089d8cebd4SKOSAKI Motohiro next = vma->vm_next; 8093964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 8109d8cebd4SKOSAKI Motohiro continue; 8113964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 8123964acd0SOleg Nesterov goto replace; 8131da177e4SLinus Torvalds } 8149d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 8159d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 8169d8cebd4SKOSAKI Motohiro if (err) 8179d8cebd4SKOSAKI Motohiro goto out; 8189d8cebd4SKOSAKI Motohiro } 8199d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 8209d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 8219d8cebd4SKOSAKI Motohiro if (err) 8229d8cebd4SKOSAKI Motohiro goto out; 8239d8cebd4SKOSAKI Motohiro } 8243964acd0SOleg Nesterov replace: 825869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 8269d8cebd4SKOSAKI Motohiro if (err) 8279d8cebd4SKOSAKI Motohiro goto out; 8289d8cebd4SKOSAKI Motohiro } 8299d8cebd4SKOSAKI Motohiro 8309d8cebd4SKOSAKI Motohiro out: 8311da177e4SLinus Torvalds return err; 8321da177e4SLinus Torvalds } 8331da177e4SLinus Torvalds 8341da177e4SLinus Torvalds /* Set the process memory policy */ 835028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 836028fec41SDavid Rientjes nodemask_t *nodes) 8371da177e4SLinus Torvalds { 83858568d2aSMiao Xie struct mempolicy *new, *old; 8394bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 84058568d2aSMiao Xie int ret; 8411da177e4SLinus Torvalds 8424bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8434bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 844f4e53d91SLee Schermerhorn 8454bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8464bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8474bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8484bfc4495SKAMEZAWA Hiroyuki goto out; 8494bfc4495SKAMEZAWA Hiroyuki } 8502c7c3a7dSOleg Nesterov 85158568d2aSMiao Xie task_lock(current); 8524bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 85358568d2aSMiao Xie if (ret) { 85458568d2aSMiao Xie task_unlock(current); 85558568d2aSMiao Xie mpol_put(new); 8564bfc4495SKAMEZAWA Hiroyuki goto out; 85758568d2aSMiao Xie } 85858568d2aSMiao Xie old = current->mempolicy; 8591da177e4SLinus Torvalds current->mempolicy = new; 86045816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 86145816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 86258568d2aSMiao Xie task_unlock(current); 86358568d2aSMiao Xie mpol_put(old); 8644bfc4495SKAMEZAWA Hiroyuki ret = 0; 8654bfc4495SKAMEZAWA Hiroyuki out: 8664bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8674bfc4495SKAMEZAWA Hiroyuki return ret; 8681da177e4SLinus Torvalds } 8691da177e4SLinus Torvalds 870bea904d5SLee Schermerhorn /* 871bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 87258568d2aSMiao Xie * 87358568d2aSMiao Xie * Called with task's alloc_lock held 874bea904d5SLee Schermerhorn */ 875bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8761da177e4SLinus Torvalds { 877dfcd3c0dSAndi Kleen nodes_clear(*nodes); 878bea904d5SLee Schermerhorn if (p == &default_policy) 879bea904d5SLee Schermerhorn return; 880bea904d5SLee Schermerhorn 88145c4745aSLee Schermerhorn switch (p->mode) { 88219770b32SMel Gorman case MPOL_BIND: 88319770b32SMel Gorman /* Fall through */ 8841da177e4SLinus Torvalds case MPOL_INTERLEAVE: 885dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 8861da177e4SLinus Torvalds break; 8871da177e4SLinus Torvalds case MPOL_PREFERRED: 888fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 889dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 89053f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 8911da177e4SLinus Torvalds break; 8921da177e4SLinus Torvalds default: 8931da177e4SLinus Torvalds BUG(); 8941da177e4SLinus Torvalds } 8951da177e4SLinus Torvalds } 8961da177e4SLinus Torvalds 8973b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 8981da177e4SLinus Torvalds { 8991da177e4SLinus Torvalds struct page *p; 9001da177e4SLinus Torvalds int err; 9011da177e4SLinus Torvalds 9023b9aadf7SAndrea Arcangeli int locked = 1; 9033b9aadf7SAndrea Arcangeli err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); 9041da177e4SLinus Torvalds if (err >= 0) { 9051da177e4SLinus Torvalds err = page_to_nid(p); 9061da177e4SLinus Torvalds put_page(p); 9071da177e4SLinus Torvalds } 9083b9aadf7SAndrea Arcangeli if (locked) 9093b9aadf7SAndrea Arcangeli up_read(&mm->mmap_sem); 9101da177e4SLinus Torvalds return err; 9111da177e4SLinus Torvalds } 9121da177e4SLinus Torvalds 9131da177e4SLinus Torvalds /* Retrieve NUMA policy */ 914dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9151da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9161da177e4SLinus Torvalds { 9178bccd85fSChristoph Lameter int err; 9181da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9191da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9203b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9211da177e4SLinus Torvalds 922754af6f5SLee Schermerhorn if (flags & 923754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9241da177e4SLinus Torvalds return -EINVAL; 925754af6f5SLee Schermerhorn 926754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 927754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 928754af6f5SLee Schermerhorn return -EINVAL; 929754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 93058568d2aSMiao Xie task_lock(current); 931754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 93258568d2aSMiao Xie task_unlock(current); 933754af6f5SLee Schermerhorn return 0; 934754af6f5SLee Schermerhorn } 935754af6f5SLee Schermerhorn 9361da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 937bea904d5SLee Schermerhorn /* 938bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 939bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 940bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 941bea904d5SLee Schermerhorn */ 9421da177e4SLinus Torvalds down_read(&mm->mmap_sem); 9431da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 9441da177e4SLinus Torvalds if (!vma) { 9451da177e4SLinus Torvalds up_read(&mm->mmap_sem); 9461da177e4SLinus Torvalds return -EFAULT; 9471da177e4SLinus Torvalds } 9481da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9491da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9501da177e4SLinus Torvalds else 9511da177e4SLinus Torvalds pol = vma->vm_policy; 9521da177e4SLinus Torvalds } else if (addr) 9531da177e4SLinus Torvalds return -EINVAL; 9541da177e4SLinus Torvalds 9551da177e4SLinus Torvalds if (!pol) 956bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9571da177e4SLinus Torvalds 9581da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9591da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9603b9aadf7SAndrea Arcangeli /* 9613b9aadf7SAndrea Arcangeli * Take a refcount on the mpol, lookup_node() 9623b9aadf7SAndrea Arcangeli * wil drop the mmap_sem, so after calling 9633b9aadf7SAndrea Arcangeli * lookup_node() only "pol" remains valid, "vma" 9643b9aadf7SAndrea Arcangeli * is stale. 9653b9aadf7SAndrea Arcangeli */ 9663b9aadf7SAndrea Arcangeli pol_refcount = pol; 9673b9aadf7SAndrea Arcangeli vma = NULL; 9683b9aadf7SAndrea Arcangeli mpol_get(pol); 9693b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9701da177e4SLinus Torvalds if (err < 0) 9711da177e4SLinus Torvalds goto out; 9728bccd85fSChristoph Lameter *policy = err; 9731da177e4SLinus Torvalds } else if (pol == current->mempolicy && 97445c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 97545816682SVlastimil Babka *policy = next_node_in(current->il_prev, pol->v.nodes); 9761da177e4SLinus Torvalds } else { 9771da177e4SLinus Torvalds err = -EINVAL; 9781da177e4SLinus Torvalds goto out; 9791da177e4SLinus Torvalds } 980bea904d5SLee Schermerhorn } else { 981bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 982bea904d5SLee Schermerhorn pol->mode; 983d79df630SDavid Rientjes /* 984d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 985d79df630SDavid Rientjes * the policy to userspace. 986d79df630SDavid Rientjes */ 987d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 988bea904d5SLee Schermerhorn } 9891da177e4SLinus Torvalds 9901da177e4SLinus Torvalds err = 0; 99158568d2aSMiao Xie if (nmask) { 992c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 993c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 994c6b6ef8bSLee Schermerhorn } else { 99558568d2aSMiao Xie task_lock(current); 996bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 99758568d2aSMiao Xie task_unlock(current); 99858568d2aSMiao Xie } 999c6b6ef8bSLee Schermerhorn } 10001da177e4SLinus Torvalds 10011da177e4SLinus Torvalds out: 100252cd3b07SLee Schermerhorn mpol_cond_put(pol); 10031da177e4SLinus Torvalds if (vma) 10043b9aadf7SAndrea Arcangeli up_read(&mm->mmap_sem); 10053b9aadf7SAndrea Arcangeli if (pol_refcount) 10063b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10071da177e4SLinus Torvalds return err; 10081da177e4SLinus Torvalds } 10091da177e4SLinus Torvalds 1010b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10118bccd85fSChristoph Lameter /* 1012c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 10136ce3c4c0SChristoph Lameter */ 1014a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1015fc301289SChristoph Lameter unsigned long flags) 10166ce3c4c0SChristoph Lameter { 1017c8633798SNaoya Horiguchi struct page *head = compound_head(page); 10186ce3c4c0SChristoph Lameter /* 1019fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 10206ce3c4c0SChristoph Lameter */ 1021c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1022c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 1023c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 1024c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 1025c8633798SNaoya Horiguchi NR_ISOLATED_ANON + page_is_file_cache(head), 1026c8633798SNaoya Horiguchi hpage_nr_pages(head)); 1027a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1028a53190a4SYang Shi /* 1029a53190a4SYang Shi * Non-movable page may reach here. And, there may be 1030a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 1031a53190a4SYang Shi * Treat them as unmovable pages since they can't be 1032a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1033a53190a4SYang Shi * should return -EIO for this case too. 1034a53190a4SYang Shi */ 1035a53190a4SYang Shi return -EIO; 103662695a84SNick Piggin } 103762695a84SNick Piggin } 1038a53190a4SYang Shi 1039a53190a4SYang Shi return 0; 10406ce3c4c0SChristoph Lameter } 10416ce3c4c0SChristoph Lameter 1042a49bd4d7SMichal Hocko /* page allocation callback for NUMA node migration */ 1043666feb21SMichal Hocko struct page *alloc_new_node_page(struct page *page, unsigned long node) 104495a402c3SChristoph Lameter { 1045e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 1046e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 1047e2d8cf40SNaoya Horiguchi node); 104894723aafSMichal Hocko else if (PageTransHuge(page)) { 1049c8633798SNaoya Horiguchi struct page *thp; 1050c8633798SNaoya Horiguchi 1051c8633798SNaoya Horiguchi thp = alloc_pages_node(node, 1052c8633798SNaoya Horiguchi (GFP_TRANSHUGE | __GFP_THISNODE), 1053c8633798SNaoya Horiguchi HPAGE_PMD_ORDER); 1054c8633798SNaoya Horiguchi if (!thp) 1055c8633798SNaoya Horiguchi return NULL; 1056c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1057c8633798SNaoya Horiguchi return thp; 1058c8633798SNaoya Horiguchi } else 105996db800fSVlastimil Babka return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | 1060b360edb4SDavid Rientjes __GFP_THISNODE, 0); 106195a402c3SChristoph Lameter } 106295a402c3SChristoph Lameter 10636ce3c4c0SChristoph Lameter /* 10647e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10657e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10667e2ab150SChristoph Lameter */ 1067dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1068dbcb0f19SAdrian Bunk int flags) 10697e2ab150SChristoph Lameter { 10707e2ab150SChristoph Lameter nodemask_t nmask; 10717e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10727e2ab150SChristoph Lameter int err = 0; 10737e2ab150SChristoph Lameter 10747e2ab150SChristoph Lameter nodes_clear(nmask); 10757e2ab150SChristoph Lameter node_set(source, nmask); 10767e2ab150SChristoph Lameter 107708270807SMinchan Kim /* 107808270807SMinchan Kim * This does not "check" the range but isolates all pages that 107908270807SMinchan Kim * need migration. Between passing in the full user address 108008270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 108108270807SMinchan Kim */ 108208270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 108398094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10847e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10857e2ab150SChristoph Lameter 1086cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1087a49bd4d7SMichal Hocko err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest, 10889c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 1089cf608ac1SMinchan Kim if (err) 1090e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1091cf608ac1SMinchan Kim } 109295a402c3SChristoph Lameter 10937e2ab150SChristoph Lameter return err; 10947e2ab150SChristoph Lameter } 10957e2ab150SChristoph Lameter 10967e2ab150SChristoph Lameter /* 10977e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10987e2ab150SChristoph Lameter * layout as much as possible. 109939743889SChristoph Lameter * 110039743889SChristoph Lameter * Returns the number of page that could not be moved. 110139743889SChristoph Lameter */ 11020ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11030ce72d4fSAndrew Morton const nodemask_t *to, int flags) 110439743889SChristoph Lameter { 11057e2ab150SChristoph Lameter int busy = 0; 11060aedadf9SChristoph Lameter int err; 11077e2ab150SChristoph Lameter nodemask_t tmp; 110839743889SChristoph Lameter 11090aedadf9SChristoph Lameter err = migrate_prep(); 11100aedadf9SChristoph Lameter if (err) 11110aedadf9SChristoph Lameter return err; 11120aedadf9SChristoph Lameter 111339743889SChristoph Lameter down_read(&mm->mmap_sem); 1114d4984711SChristoph Lameter 11157e2ab150SChristoph Lameter /* 11167e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11177e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11187e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11197e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11207e2ab150SChristoph Lameter * 11217e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11227e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11237e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11247e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11257e2ab150SChristoph Lameter * 11267e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11277e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11287e2ab150SChristoph Lameter * (nothing left to migrate). 11297e2ab150SChristoph Lameter * 11307e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11317e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11327e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11337e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11347e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11357e2ab150SChristoph Lameter * 11367e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11377e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11387e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11397e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1140ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11417e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11427e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11437e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11447e2ab150SChristoph Lameter */ 11457e2ab150SChristoph Lameter 11460ce72d4fSAndrew Morton tmp = *from; 11477e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11487e2ab150SChristoph Lameter int s,d; 1149b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11507e2ab150SChristoph Lameter int dest = 0; 11517e2ab150SChristoph Lameter 11527e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11534a5b18ccSLarry Woodman 11544a5b18ccSLarry Woodman /* 11554a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11564a5b18ccSLarry Woodman * node relationship of the pages established between 11574a5b18ccSLarry Woodman * threads and memory areas. 11584a5b18ccSLarry Woodman * 11594a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11604a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11614a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11624a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11634a5b18ccSLarry Woodman * mask. 11644a5b18ccSLarry Woodman * 11654a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11664a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11674a5b18ccSLarry Woodman */ 11684a5b18ccSLarry Woodman 11690ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11700ce72d4fSAndrew Morton (node_isset(s, *to))) 11714a5b18ccSLarry Woodman continue; 11724a5b18ccSLarry Woodman 11730ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11747e2ab150SChristoph Lameter if (s == d) 11757e2ab150SChristoph Lameter continue; 11767e2ab150SChristoph Lameter 11777e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11787e2ab150SChristoph Lameter dest = d; 11797e2ab150SChristoph Lameter 11807e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11817e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11827e2ab150SChristoph Lameter break; 11837e2ab150SChristoph Lameter } 1184b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11857e2ab150SChristoph Lameter break; 11867e2ab150SChristoph Lameter 11877e2ab150SChristoph Lameter node_clear(source, tmp); 11887e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11897e2ab150SChristoph Lameter if (err > 0) 11907e2ab150SChristoph Lameter busy += err; 11917e2ab150SChristoph Lameter if (err < 0) 11927e2ab150SChristoph Lameter break; 119339743889SChristoph Lameter } 119439743889SChristoph Lameter up_read(&mm->mmap_sem); 11957e2ab150SChristoph Lameter if (err < 0) 11967e2ab150SChristoph Lameter return err; 11977e2ab150SChristoph Lameter return busy; 1198b20a3503SChristoph Lameter 119939743889SChristoph Lameter } 120039743889SChristoph Lameter 12013ad33b24SLee Schermerhorn /* 12023ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1203d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 12043ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 12053ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 12063ad33b24SLee Schermerhorn * is in virtual address order. 12073ad33b24SLee Schermerhorn */ 1208666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 120995a402c3SChristoph Lameter { 1210d05f0cdcSHugh Dickins struct vm_area_struct *vma; 12113ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 121295a402c3SChristoph Lameter 1213d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 12143ad33b24SLee Schermerhorn while (vma) { 12153ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 12163ad33b24SLee Schermerhorn if (address != -EFAULT) 12173ad33b24SLee Schermerhorn break; 12183ad33b24SLee Schermerhorn vma = vma->vm_next; 12193ad33b24SLee Schermerhorn } 12203ad33b24SLee Schermerhorn 122111c731e8SWanpeng Li if (PageHuge(page)) { 1222389c8178SMichal Hocko return alloc_huge_page_vma(page_hstate(compound_head(page)), 1223389c8178SMichal Hocko vma, address); 122494723aafSMichal Hocko } else if (PageTransHuge(page)) { 1225c8633798SNaoya Horiguchi struct page *thp; 1226c8633798SNaoya Horiguchi 122719deb769SDavid Rientjes thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 122819deb769SDavid Rientjes HPAGE_PMD_ORDER); 1229c8633798SNaoya Horiguchi if (!thp) 1230c8633798SNaoya Horiguchi return NULL; 1231c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1232c8633798SNaoya Horiguchi return thp; 123311c731e8SWanpeng Li } 123411c731e8SWanpeng Li /* 123511c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 123611c731e8SWanpeng Li */ 12370f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 12380f556856SMichal Hocko vma, address); 123995a402c3SChristoph Lameter } 1240b20a3503SChristoph Lameter #else 1241b20a3503SChristoph Lameter 1242a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1243b20a3503SChristoph Lameter unsigned long flags) 1244b20a3503SChristoph Lameter { 1245a53190a4SYang Shi return -EIO; 1246b20a3503SChristoph Lameter } 1247b20a3503SChristoph Lameter 12480ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12490ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1250b20a3503SChristoph Lameter { 1251b20a3503SChristoph Lameter return -ENOSYS; 1252b20a3503SChristoph Lameter } 125395a402c3SChristoph Lameter 1254666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 125595a402c3SChristoph Lameter { 125695a402c3SChristoph Lameter return NULL; 125795a402c3SChristoph Lameter } 1258b20a3503SChristoph Lameter #endif 1259b20a3503SChristoph Lameter 1260dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1261028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1262028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12636ce3c4c0SChristoph Lameter { 12646ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12656ce3c4c0SChristoph Lameter struct mempolicy *new; 12666ce3c4c0SChristoph Lameter unsigned long end; 12676ce3c4c0SChristoph Lameter int err; 1268d8835445SYang Shi int ret; 12696ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12706ce3c4c0SChristoph Lameter 1271b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12726ce3c4c0SChristoph Lameter return -EINVAL; 127374c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12746ce3c4c0SChristoph Lameter return -EPERM; 12756ce3c4c0SChristoph Lameter 12766ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12776ce3c4c0SChristoph Lameter return -EINVAL; 12786ce3c4c0SChristoph Lameter 12796ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12806ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12816ce3c4c0SChristoph Lameter 12826ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12836ce3c4c0SChristoph Lameter end = start + len; 12846ce3c4c0SChristoph Lameter 12856ce3c4c0SChristoph Lameter if (end < start) 12866ce3c4c0SChristoph Lameter return -EINVAL; 12876ce3c4c0SChristoph Lameter if (end == start) 12886ce3c4c0SChristoph Lameter return 0; 12896ce3c4c0SChristoph Lameter 1290028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12916ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12926ce3c4c0SChristoph Lameter return PTR_ERR(new); 12936ce3c4c0SChristoph Lameter 1294b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1295b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1296b24f53a0SLee Schermerhorn 12976ce3c4c0SChristoph Lameter /* 12986ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12996ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 13006ce3c4c0SChristoph Lameter */ 13016ce3c4c0SChristoph Lameter if (!new) 13026ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 13036ce3c4c0SChristoph Lameter 1304028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1305028fec41SDavid Rientjes start, start + len, mode, mode_flags, 130600ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 13076ce3c4c0SChristoph Lameter 13080aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 13090aedadf9SChristoph Lameter 13100aedadf9SChristoph Lameter err = migrate_prep(); 13110aedadf9SChristoph Lameter if (err) 1312b05ca738SKOSAKI Motohiro goto mpol_out; 13130aedadf9SChristoph Lameter } 13144bfc4495SKAMEZAWA Hiroyuki { 13154bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 13164bfc4495SKAMEZAWA Hiroyuki if (scratch) { 13176ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 131858568d2aSMiao Xie task_lock(current); 13194bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 132058568d2aSMiao Xie task_unlock(current); 13214bfc4495SKAMEZAWA Hiroyuki if (err) 132258568d2aSMiao Xie up_write(&mm->mmap_sem); 13234bfc4495SKAMEZAWA Hiroyuki } else 13244bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13254bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13264bfc4495SKAMEZAWA Hiroyuki } 1327b05ca738SKOSAKI Motohiro if (err) 1328b05ca738SKOSAKI Motohiro goto mpol_out; 1329b05ca738SKOSAKI Motohiro 1330d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13316ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1332d8835445SYang Shi 1333d8835445SYang Shi if (ret < 0) { 1334a85dfc30SYang Shi err = ret; 1335d8835445SYang Shi goto up_out; 1336d8835445SYang Shi } 1337d8835445SYang Shi 13389d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13397e2ab150SChristoph Lameter 1340b24f53a0SLee Schermerhorn if (!err) { 1341b24f53a0SLee Schermerhorn int nr_failed = 0; 1342b24f53a0SLee Schermerhorn 1343cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1344b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1345d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1346d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1347cf608ac1SMinchan Kim if (nr_failed) 134874060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1349cf608ac1SMinchan Kim } 13506ce3c4c0SChristoph Lameter 1351d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13526ce3c4c0SChristoph Lameter err = -EIO; 1353a85dfc30SYang Shi } else { 1354d8835445SYang Shi up_out: 1355a85dfc30SYang Shi if (!list_empty(&pagelist)) 1356a85dfc30SYang Shi putback_movable_pages(&pagelist); 1357a85dfc30SYang Shi } 1358a85dfc30SYang Shi 13596ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1360b05ca738SKOSAKI Motohiro mpol_out: 1361f0be3d32SLee Schermerhorn mpol_put(new); 13626ce3c4c0SChristoph Lameter return err; 13636ce3c4c0SChristoph Lameter } 13646ce3c4c0SChristoph Lameter 136539743889SChristoph Lameter /* 13668bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13678bccd85fSChristoph Lameter */ 13688bccd85fSChristoph Lameter 13698bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 137039743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13718bccd85fSChristoph Lameter unsigned long maxnode) 13728bccd85fSChristoph Lameter { 13738bccd85fSChristoph Lameter unsigned long k; 137456521e7aSYisheng Xie unsigned long t; 13758bccd85fSChristoph Lameter unsigned long nlongs; 13768bccd85fSChristoph Lameter unsigned long endmask; 13778bccd85fSChristoph Lameter 13788bccd85fSChristoph Lameter --maxnode; 13798bccd85fSChristoph Lameter nodes_clear(*nodes); 13808bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13818bccd85fSChristoph Lameter return 0; 1382a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1383636f13c1SChris Wright return -EINVAL; 13848bccd85fSChristoph Lameter 13858bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 13868bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 13878bccd85fSChristoph Lameter endmask = ~0UL; 13888bccd85fSChristoph Lameter else 13898bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 13908bccd85fSChristoph Lameter 139156521e7aSYisheng Xie /* 139256521e7aSYisheng Xie * When the user specified more nodes than supported just check 139356521e7aSYisheng Xie * if the non supported part is all zero. 139456521e7aSYisheng Xie * 139556521e7aSYisheng Xie * If maxnode have more longs than MAX_NUMNODES, check 139656521e7aSYisheng Xie * the bits in that area first. And then go through to 139756521e7aSYisheng Xie * check the rest bits which equal or bigger than MAX_NUMNODES. 139856521e7aSYisheng Xie * Otherwise, just check bits [MAX_NUMNODES, maxnode). 139956521e7aSYisheng Xie */ 14008bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 14018bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 14028bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 14038bccd85fSChristoph Lameter return -EFAULT; 14048bccd85fSChristoph Lameter if (k == nlongs - 1) { 14058bccd85fSChristoph Lameter if (t & endmask) 14068bccd85fSChristoph Lameter return -EINVAL; 14078bccd85fSChristoph Lameter } else if (t) 14088bccd85fSChristoph Lameter return -EINVAL; 14098bccd85fSChristoph Lameter } 14108bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 14118bccd85fSChristoph Lameter endmask = ~0UL; 14128bccd85fSChristoph Lameter } 14138bccd85fSChristoph Lameter 141456521e7aSYisheng Xie if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { 141556521e7aSYisheng Xie unsigned long valid_mask = endmask; 141656521e7aSYisheng Xie 141756521e7aSYisheng Xie valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 141856521e7aSYisheng Xie if (get_user(t, nmask + nlongs - 1)) 141956521e7aSYisheng Xie return -EFAULT; 142056521e7aSYisheng Xie if (t & valid_mask) 142156521e7aSYisheng Xie return -EINVAL; 142256521e7aSYisheng Xie } 142356521e7aSYisheng Xie 14248bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 14258bccd85fSChristoph Lameter return -EFAULT; 14268bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 14278bccd85fSChristoph Lameter return 0; 14288bccd85fSChristoph Lameter } 14298bccd85fSChristoph Lameter 14308bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14318bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14328bccd85fSChristoph Lameter nodemask_t *nodes) 14338bccd85fSChristoph Lameter { 14348bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1435050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 14368bccd85fSChristoph Lameter 14378bccd85fSChristoph Lameter if (copy > nbytes) { 14388bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14398bccd85fSChristoph Lameter return -EINVAL; 14408bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14418bccd85fSChristoph Lameter return -EFAULT; 14428bccd85fSChristoph Lameter copy = nbytes; 14438bccd85fSChristoph Lameter } 14448bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14458bccd85fSChristoph Lameter } 14468bccd85fSChristoph Lameter 1447e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1448e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1449e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14508bccd85fSChristoph Lameter { 14518bccd85fSChristoph Lameter nodemask_t nodes; 14528bccd85fSChristoph Lameter int err; 1453028fec41SDavid Rientjes unsigned short mode_flags; 14548bccd85fSChristoph Lameter 1455057d3389SAndrey Konovalov start = untagged_addr(start); 1456028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1457028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1458a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1459a3b51e01SDavid Rientjes return -EINVAL; 14604c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 14614c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 14624c50bc01SDavid Rientjes return -EINVAL; 14638bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14648bccd85fSChristoph Lameter if (err) 14658bccd85fSChristoph Lameter return err; 1466028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 14678bccd85fSChristoph Lameter } 14688bccd85fSChristoph Lameter 1469e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1470e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1471e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1472e7dc9ad6SDominik Brodowski { 1473e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1474e7dc9ad6SDominik Brodowski } 1475e7dc9ad6SDominik Brodowski 14768bccd85fSChristoph Lameter /* Set the process memory policy */ 1477af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1478af03c4acSDominik Brodowski unsigned long maxnode) 14798bccd85fSChristoph Lameter { 14808bccd85fSChristoph Lameter int err; 14818bccd85fSChristoph Lameter nodemask_t nodes; 1482028fec41SDavid Rientjes unsigned short flags; 14838bccd85fSChristoph Lameter 1484028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1485028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1486028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 14878bccd85fSChristoph Lameter return -EINVAL; 14884c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 14894c50bc01SDavid Rientjes return -EINVAL; 14908bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14918bccd85fSChristoph Lameter if (err) 14928bccd85fSChristoph Lameter return err; 1493028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 14948bccd85fSChristoph Lameter } 14958bccd85fSChristoph Lameter 1496af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1497af03c4acSDominik Brodowski unsigned long, maxnode) 1498af03c4acSDominik Brodowski { 1499af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1500af03c4acSDominik Brodowski } 1501af03c4acSDominik Brodowski 1502b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1503b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1504b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 150539743889SChristoph Lameter { 1506596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 150739743889SChristoph Lameter struct task_struct *task; 150839743889SChristoph Lameter nodemask_t task_nodes; 150939743889SChristoph Lameter int err; 1510596d7cfaSKOSAKI Motohiro nodemask_t *old; 1511596d7cfaSKOSAKI Motohiro nodemask_t *new; 1512596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 151339743889SChristoph Lameter 1514596d7cfaSKOSAKI Motohiro if (!scratch) 1515596d7cfaSKOSAKI Motohiro return -ENOMEM; 151639743889SChristoph Lameter 1517596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1518596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1519596d7cfaSKOSAKI Motohiro 1520596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 152139743889SChristoph Lameter if (err) 1522596d7cfaSKOSAKI Motohiro goto out; 1523596d7cfaSKOSAKI Motohiro 1524596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1525596d7cfaSKOSAKI Motohiro if (err) 1526596d7cfaSKOSAKI Motohiro goto out; 152739743889SChristoph Lameter 152839743889SChristoph Lameter /* Find the mm_struct */ 152955cfaa3cSZeng Zhaoming rcu_read_lock(); 1530228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 153139743889SChristoph Lameter if (!task) { 153255cfaa3cSZeng Zhaoming rcu_read_unlock(); 1533596d7cfaSKOSAKI Motohiro err = -ESRCH; 1534596d7cfaSKOSAKI Motohiro goto out; 153539743889SChristoph Lameter } 15363268c63eSChristoph Lameter get_task_struct(task); 153739743889SChristoph Lameter 1538596d7cfaSKOSAKI Motohiro err = -EINVAL; 153939743889SChristoph Lameter 154039743889SChristoph Lameter /* 154131367466SOtto Ebeling * Check if this process has the right to modify the specified process. 154231367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 154339743889SChristoph Lameter */ 154431367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1545c69e8d9cSDavid Howells rcu_read_unlock(); 154639743889SChristoph Lameter err = -EPERM; 15473268c63eSChristoph Lameter goto out_put; 154839743889SChristoph Lameter } 1549c69e8d9cSDavid Howells rcu_read_unlock(); 155039743889SChristoph Lameter 155139743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 155239743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1553596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 155439743889SChristoph Lameter err = -EPERM; 15553268c63eSChristoph Lameter goto out_put; 155639743889SChristoph Lameter } 155739743889SChristoph Lameter 15580486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 15590486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 15600486a38bSYisheng Xie if (nodes_empty(*new)) 15613268c63eSChristoph Lameter goto out_put; 15620486a38bSYisheng Xie 156386c3a764SDavid Quigley err = security_task_movememory(task); 156486c3a764SDavid Quigley if (err) 15653268c63eSChristoph Lameter goto out_put; 156686c3a764SDavid Quigley 15673268c63eSChristoph Lameter mm = get_task_mm(task); 15683268c63eSChristoph Lameter put_task_struct(task); 1569f2a9ef88SSasha Levin 1570f2a9ef88SSasha Levin if (!mm) { 1571f2a9ef88SSasha Levin err = -EINVAL; 1572f2a9ef88SSasha Levin goto out; 1573f2a9ef88SSasha Levin } 1574f2a9ef88SSasha Levin 1575596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 157674c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 15773268c63eSChristoph Lameter 157839743889SChristoph Lameter mmput(mm); 15793268c63eSChristoph Lameter out: 1580596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1581596d7cfaSKOSAKI Motohiro 158239743889SChristoph Lameter return err; 15833268c63eSChristoph Lameter 15843268c63eSChristoph Lameter out_put: 15853268c63eSChristoph Lameter put_task_struct(task); 15863268c63eSChristoph Lameter goto out; 15873268c63eSChristoph Lameter 158839743889SChristoph Lameter } 158939743889SChristoph Lameter 1590b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1591b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1592b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1593b6e9b0baSDominik Brodowski { 1594b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1595b6e9b0baSDominik Brodowski } 1596b6e9b0baSDominik Brodowski 159739743889SChristoph Lameter 15988bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1599af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1600af03c4acSDominik Brodowski unsigned long __user *nmask, 1601af03c4acSDominik Brodowski unsigned long maxnode, 1602af03c4acSDominik Brodowski unsigned long addr, 1603af03c4acSDominik Brodowski unsigned long flags) 16048bccd85fSChristoph Lameter { 1605dbcb0f19SAdrian Bunk int err; 1606dbcb0f19SAdrian Bunk int uninitialized_var(pval); 16078bccd85fSChristoph Lameter nodemask_t nodes; 16088bccd85fSChristoph Lameter 1609057d3389SAndrey Konovalov addr = untagged_addr(addr); 1610057d3389SAndrey Konovalov 1611050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 16128bccd85fSChristoph Lameter return -EINVAL; 16138bccd85fSChristoph Lameter 16148bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 16158bccd85fSChristoph Lameter 16168bccd85fSChristoph Lameter if (err) 16178bccd85fSChristoph Lameter return err; 16188bccd85fSChristoph Lameter 16198bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 16208bccd85fSChristoph Lameter return -EFAULT; 16218bccd85fSChristoph Lameter 16228bccd85fSChristoph Lameter if (nmask) 16238bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 16248bccd85fSChristoph Lameter 16258bccd85fSChristoph Lameter return err; 16268bccd85fSChristoph Lameter } 16278bccd85fSChristoph Lameter 1628af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1629af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1630af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1631af03c4acSDominik Brodowski { 1632af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1633af03c4acSDominik Brodowski } 1634af03c4acSDominik Brodowski 16351da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 16361da177e4SLinus Torvalds 1637c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1638c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1639c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1640c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 16411da177e4SLinus Torvalds { 16421da177e4SLinus Torvalds long err; 16431da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16441da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16451da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16461da177e4SLinus Torvalds 1647050c17f2SRalph Campbell nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); 16481da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16491da177e4SLinus Torvalds 16501da177e4SLinus Torvalds if (nmask) 16511da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 16521da177e4SLinus Torvalds 1653af03c4acSDominik Brodowski err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 16541da177e4SLinus Torvalds 16551da177e4SLinus Torvalds if (!err && nmask) { 16562bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 16572bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 16582bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 16591da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 16601da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 16611da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 16621da177e4SLinus Torvalds } 16631da177e4SLinus Torvalds 16641da177e4SLinus Torvalds return err; 16651da177e4SLinus Torvalds } 16661da177e4SLinus Torvalds 1667c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1668c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 16691da177e4SLinus Torvalds { 16701da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16711da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16721da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16731da177e4SLinus Torvalds 16741da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16751da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16761da177e4SLinus Torvalds 16771da177e4SLinus Torvalds if (nmask) { 1678cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 16791da177e4SLinus Torvalds return -EFAULT; 1680cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1681cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1682cf01fb99SChris Salls return -EFAULT; 1683cf01fb99SChris Salls } 16841da177e4SLinus Torvalds 1685af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nm, nr_bits+1); 16861da177e4SLinus Torvalds } 16871da177e4SLinus Torvalds 1688c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1689c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1690c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 16911da177e4SLinus Torvalds { 16921da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16931da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1694dfcd3c0dSAndi Kleen nodemask_t bm; 16951da177e4SLinus Torvalds 16961da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16971da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16981da177e4SLinus Torvalds 16991da177e4SLinus Torvalds if (nmask) { 1700cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 17011da177e4SLinus Torvalds return -EFAULT; 1702cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1703cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1704cf01fb99SChris Salls return -EFAULT; 1705cf01fb99SChris Salls } 17061da177e4SLinus Torvalds 1707e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); 17081da177e4SLinus Torvalds } 17091da177e4SLinus Torvalds 1710b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, 1711b6e9b0baSDominik Brodowski compat_ulong_t, maxnode, 1712b6e9b0baSDominik Brodowski const compat_ulong_t __user *, old_nodes, 1713b6e9b0baSDominik Brodowski const compat_ulong_t __user *, new_nodes) 1714b6e9b0baSDominik Brodowski { 1715b6e9b0baSDominik Brodowski unsigned long __user *old = NULL; 1716b6e9b0baSDominik Brodowski unsigned long __user *new = NULL; 1717b6e9b0baSDominik Brodowski nodemask_t tmp_mask; 1718b6e9b0baSDominik Brodowski unsigned long nr_bits; 1719b6e9b0baSDominik Brodowski unsigned long size; 1720b6e9b0baSDominik Brodowski 1721b6e9b0baSDominik Brodowski nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1722b6e9b0baSDominik Brodowski size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1723b6e9b0baSDominik Brodowski if (old_nodes) { 1724b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1725b6e9b0baSDominik Brodowski return -EFAULT; 1726b6e9b0baSDominik Brodowski old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1727b6e9b0baSDominik Brodowski if (new_nodes) 1728b6e9b0baSDominik Brodowski new = old + size / sizeof(unsigned long); 1729b6e9b0baSDominik Brodowski if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1730b6e9b0baSDominik Brodowski return -EFAULT; 1731b6e9b0baSDominik Brodowski } 1732b6e9b0baSDominik Brodowski if (new_nodes) { 1733b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1734b6e9b0baSDominik Brodowski return -EFAULT; 1735b6e9b0baSDominik Brodowski if (new == NULL) 1736b6e9b0baSDominik Brodowski new = compat_alloc_user_space(size); 1737b6e9b0baSDominik Brodowski if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1738b6e9b0baSDominik Brodowski return -EFAULT; 1739b6e9b0baSDominik Brodowski } 1740b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, nr_bits + 1, old, new); 1741b6e9b0baSDominik Brodowski } 1742b6e9b0baSDominik Brodowski 1743b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */ 17441da177e4SLinus Torvalds 174520ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 174620ca87f2SLi Xinhai { 174720ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 174820ca87f2SLi Xinhai return false; 174920ca87f2SLi Xinhai 175020ca87f2SLi Xinhai /* 175120ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 175220ca87f2SLi Xinhai * incurring periodic faults. 175320ca87f2SLi Xinhai */ 175420ca87f2SLi Xinhai if (vma_is_dax(vma)) 175520ca87f2SLi Xinhai return false; 175620ca87f2SLi Xinhai 175720ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 175820ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 175920ca87f2SLi Xinhai return false; 176020ca87f2SLi Xinhai 176120ca87f2SLi Xinhai /* 176220ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 176320ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 176420ca87f2SLi Xinhai * possible. 176520ca87f2SLi Xinhai */ 176620ca87f2SLi Xinhai if (vma->vm_file && 176720ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 176820ca87f2SLi Xinhai < policy_zone) 176920ca87f2SLi Xinhai return false; 177020ca87f2SLi Xinhai return true; 177120ca87f2SLi Xinhai } 177220ca87f2SLi Xinhai 177374d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 177474d2c3a0SOleg Nesterov unsigned long addr) 17751da177e4SLinus Torvalds { 17768d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17771da177e4SLinus Torvalds 17781da177e4SLinus Torvalds if (vma) { 1779480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17808d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 178100442ad0SMel Gorman } else if (vma->vm_policy) { 17821da177e4SLinus Torvalds pol = vma->vm_policy; 178300442ad0SMel Gorman 178400442ad0SMel Gorman /* 178500442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 178600442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 178700442ad0SMel Gorman * count on these policies which will be dropped by 178800442ad0SMel Gorman * mpol_cond_put() later 178900442ad0SMel Gorman */ 179000442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 179100442ad0SMel Gorman mpol_get(pol); 179200442ad0SMel Gorman } 17931da177e4SLinus Torvalds } 1794f15ca78eSOleg Nesterov 179574d2c3a0SOleg Nesterov return pol; 179674d2c3a0SOleg Nesterov } 179774d2c3a0SOleg Nesterov 179874d2c3a0SOleg Nesterov /* 1799dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 180074d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 180174d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 180274d2c3a0SOleg Nesterov * 180374d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1804dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 180574d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 180674d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 180774d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 180874d2c3a0SOleg Nesterov * extra reference for shared policies. 180974d2c3a0SOleg Nesterov */ 1810ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1811dd6eecb9SOleg Nesterov unsigned long addr) 181274d2c3a0SOleg Nesterov { 181374d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 181474d2c3a0SOleg Nesterov 18158d90274bSOleg Nesterov if (!pol) 1816dd6eecb9SOleg Nesterov pol = get_task_policy(current); 18178d90274bSOleg Nesterov 18181da177e4SLinus Torvalds return pol; 18191da177e4SLinus Torvalds } 18201da177e4SLinus Torvalds 18216b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1822fc314724SMel Gorman { 18236b6482bbSOleg Nesterov struct mempolicy *pol; 1824f15ca78eSOleg Nesterov 1825fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1826fc314724SMel Gorman bool ret = false; 1827fc314724SMel Gorman 1828fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1829fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1830fc314724SMel Gorman ret = true; 1831fc314724SMel Gorman mpol_cond_put(pol); 1832fc314724SMel Gorman 1833fc314724SMel Gorman return ret; 18348d90274bSOleg Nesterov } 18358d90274bSOleg Nesterov 1836fc314724SMel Gorman pol = vma->vm_policy; 18378d90274bSOleg Nesterov if (!pol) 18386b6482bbSOleg Nesterov pol = get_task_policy(current); 1839fc314724SMel Gorman 1840fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1841fc314724SMel Gorman } 1842fc314724SMel Gorman 1843d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1844d3eb1570SLai Jiangshan { 1845d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1846d3eb1570SLai Jiangshan 1847d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1848d3eb1570SLai Jiangshan 1849d3eb1570SLai Jiangshan /* 1850d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1851d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1852d3eb1570SLai Jiangshan * 1853d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1854d3eb1570SLai Jiangshan * so if the following test faile, it implies 1855d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1856d3eb1570SLai Jiangshan */ 1857d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1858d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1859d3eb1570SLai Jiangshan 1860d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1861d3eb1570SLai Jiangshan } 1862d3eb1570SLai Jiangshan 186352cd3b07SLee Schermerhorn /* 186452cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 186552cd3b07SLee Schermerhorn * page allocation 186652cd3b07SLee Schermerhorn */ 186752cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 186819770b32SMel Gorman { 186919770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 187045c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1871d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 187219770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 187319770b32SMel Gorman return &policy->v.nodes; 187419770b32SMel Gorman 187519770b32SMel Gorman return NULL; 187619770b32SMel Gorman } 187719770b32SMel Gorman 187804ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */ 187904ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy, 18802f5f9486SAndi Kleen int nd) 18811da177e4SLinus Torvalds { 18826d840958SMichal Hocko if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) 18831da177e4SLinus Torvalds nd = policy->v.preferred_node; 18846d840958SMichal Hocko else { 188519770b32SMel Gorman /* 18866d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18876d840958SMichal Hocko * because we might easily break the expectation to stay on the 18886d840958SMichal Hocko * requested node and not break the policy. 188919770b32SMel Gorman */ 18906d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 18911da177e4SLinus Torvalds } 18926d840958SMichal Hocko 189304ec6264SVlastimil Babka return nd; 18941da177e4SLinus Torvalds } 18951da177e4SLinus Torvalds 18961da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 18971da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 18981da177e4SLinus Torvalds { 189945816682SVlastimil Babka unsigned next; 19001da177e4SLinus Torvalds struct task_struct *me = current; 19011da177e4SLinus Torvalds 190245816682SVlastimil Babka next = next_node_in(me->il_prev, policy->v.nodes); 1903f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 190445816682SVlastimil Babka me->il_prev = next; 190545816682SVlastimil Babka return next; 19061da177e4SLinus Torvalds } 19071da177e4SLinus Torvalds 1908dc85da15SChristoph Lameter /* 1909dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1910dc85da15SChristoph Lameter * next slab entry. 1911dc85da15SChristoph Lameter */ 19122a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1913dc85da15SChristoph Lameter { 1914e7b691b0SAndi Kleen struct mempolicy *policy; 19152a389610SDavid Rientjes int node = numa_mem_id(); 1916e7b691b0SAndi Kleen 1917e7b691b0SAndi Kleen if (in_interrupt()) 19182a389610SDavid Rientjes return node; 1919e7b691b0SAndi Kleen 1920e7b691b0SAndi Kleen policy = current->mempolicy; 1921fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 19222a389610SDavid Rientjes return node; 1923765c4507SChristoph Lameter 1924bea904d5SLee Schermerhorn switch (policy->mode) { 1925bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1926fc36b8d3SLee Schermerhorn /* 1927fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1928fc36b8d3SLee Schermerhorn */ 1929bea904d5SLee Schermerhorn return policy->v.preferred_node; 1930bea904d5SLee Schermerhorn 1931dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1932dc85da15SChristoph Lameter return interleave_nodes(policy); 1933dc85da15SChristoph Lameter 1934dd1a239fSMel Gorman case MPOL_BIND: { 1935c33d6c06SMel Gorman struct zoneref *z; 1936c33d6c06SMel Gorman 1937dc85da15SChristoph Lameter /* 1938dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1939dc85da15SChristoph Lameter * first node. 1940dc85da15SChristoph Lameter */ 194119770b32SMel Gorman struct zonelist *zonelist; 194219770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1943c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1944c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1945c33d6c06SMel Gorman &policy->v.nodes); 1946c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1947dd1a239fSMel Gorman } 1948dc85da15SChristoph Lameter 1949dc85da15SChristoph Lameter default: 1950bea904d5SLee Schermerhorn BUG(); 1951dc85da15SChristoph Lameter } 1952dc85da15SChristoph Lameter } 1953dc85da15SChristoph Lameter 1954fee83b3aSAndrew Morton /* 1955fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1956fee83b3aSAndrew Morton * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1957fee83b3aSAndrew Morton * number of present nodes. 1958fee83b3aSAndrew Morton */ 195998c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19601da177e4SLinus Torvalds { 1961dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1962f5b087b5SDavid Rientjes unsigned target; 1963fee83b3aSAndrew Morton int i; 1964fee83b3aSAndrew Morton int nid; 19651da177e4SLinus Torvalds 1966f5b087b5SDavid Rientjes if (!nnodes) 1967f5b087b5SDavid Rientjes return numa_node_id(); 1968fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1969fee83b3aSAndrew Morton nid = first_node(pol->v.nodes); 1970fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1971dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 19721da177e4SLinus Torvalds return nid; 19731da177e4SLinus Torvalds } 19741da177e4SLinus Torvalds 19755da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19765da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19775da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19785da7ca86SChristoph Lameter { 19795da7ca86SChristoph Lameter if (vma) { 19805da7ca86SChristoph Lameter unsigned long off; 19815da7ca86SChristoph Lameter 19823b98b087SNishanth Aravamudan /* 19833b98b087SNishanth Aravamudan * for small pages, there is no difference between 19843b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 19853b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 19863b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 19873b98b087SNishanth Aravamudan * a useful offset. 19883b98b087SNishanth Aravamudan */ 19893b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 19903b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 19915da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 199298c70baaSLaurent Dufour return offset_il_node(pol, off); 19935da7ca86SChristoph Lameter } else 19945da7ca86SChristoph Lameter return interleave_nodes(pol); 19955da7ca86SChristoph Lameter } 19965da7ca86SChristoph Lameter 199700ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1998480eccf9SLee Schermerhorn /* 199904ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 2000b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 2001b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 2002b46e14acSFabian Frederick * @gfp_flags: for requested zone 2003b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2004b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 2005480eccf9SLee Schermerhorn * 200604ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 200752cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 200852cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 200952cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 2010c0ff7453SMiao Xie * 2011d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2012480eccf9SLee Schermerhorn */ 201304ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 201404ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20155da7ca86SChristoph Lameter { 201604ec6264SVlastimil Babka int nid; 20175da7ca86SChristoph Lameter 2018dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 201919770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 20205da7ca86SChristoph Lameter 202152cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 202204ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 202304ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 202452cd3b07SLee Schermerhorn } else { 202504ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 202652cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 202752cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 2028480eccf9SLee Schermerhorn } 202904ec6264SVlastimil Babka return nid; 20305da7ca86SChristoph Lameter } 203106808b08SLee Schermerhorn 203206808b08SLee Schermerhorn /* 203306808b08SLee Schermerhorn * init_nodemask_of_mempolicy 203406808b08SLee Schermerhorn * 203506808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 203606808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 203706808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 203806808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 203906808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 204006808b08SLee Schermerhorn * of non-default mempolicy. 204106808b08SLee Schermerhorn * 204206808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 204306808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 204406808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 204506808b08SLee Schermerhorn * 204606808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 204706808b08SLee Schermerhorn */ 204806808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 204906808b08SLee Schermerhorn { 205006808b08SLee Schermerhorn struct mempolicy *mempolicy; 205106808b08SLee Schermerhorn int nid; 205206808b08SLee Schermerhorn 205306808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 205406808b08SLee Schermerhorn return false; 205506808b08SLee Schermerhorn 2056c0ff7453SMiao Xie task_lock(current); 205706808b08SLee Schermerhorn mempolicy = current->mempolicy; 205806808b08SLee Schermerhorn switch (mempolicy->mode) { 205906808b08SLee Schermerhorn case MPOL_PREFERRED: 206006808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 206106808b08SLee Schermerhorn nid = numa_node_id(); 206206808b08SLee Schermerhorn else 206306808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 206406808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 206506808b08SLee Schermerhorn break; 206606808b08SLee Schermerhorn 206706808b08SLee Schermerhorn case MPOL_BIND: 206806808b08SLee Schermerhorn /* Fall through */ 206906808b08SLee Schermerhorn case MPOL_INTERLEAVE: 207006808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 207106808b08SLee Schermerhorn break; 207206808b08SLee Schermerhorn 207306808b08SLee Schermerhorn default: 207406808b08SLee Schermerhorn BUG(); 207506808b08SLee Schermerhorn } 2076c0ff7453SMiao Xie task_unlock(current); 207706808b08SLee Schermerhorn 207806808b08SLee Schermerhorn return true; 207906808b08SLee Schermerhorn } 208000ac59adSChen, Kenneth W #endif 20815da7ca86SChristoph Lameter 20826f48d0ebSDavid Rientjes /* 20836f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 20846f48d0ebSDavid Rientjes * 20856f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 20866f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 20876f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 20886f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 20896f48d0ebSDavid Rientjes * 20906f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 20916f48d0ebSDavid Rientjes */ 20926f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 20936f48d0ebSDavid Rientjes const nodemask_t *mask) 20946f48d0ebSDavid Rientjes { 20956f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 20966f48d0ebSDavid Rientjes bool ret = true; 20976f48d0ebSDavid Rientjes 20986f48d0ebSDavid Rientjes if (!mask) 20996f48d0ebSDavid Rientjes return ret; 21006f48d0ebSDavid Rientjes task_lock(tsk); 21016f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 21026f48d0ebSDavid Rientjes if (!mempolicy) 21036f48d0ebSDavid Rientjes goto out; 21046f48d0ebSDavid Rientjes 21056f48d0ebSDavid Rientjes switch (mempolicy->mode) { 21066f48d0ebSDavid Rientjes case MPOL_PREFERRED: 21076f48d0ebSDavid Rientjes /* 21086f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 21096f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 21106f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 21116f48d0ebSDavid Rientjes * nodes in mask. 21126f48d0ebSDavid Rientjes */ 21136f48d0ebSDavid Rientjes break; 21146f48d0ebSDavid Rientjes case MPOL_BIND: 21156f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 21166f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 21176f48d0ebSDavid Rientjes break; 21186f48d0ebSDavid Rientjes default: 21196f48d0ebSDavid Rientjes BUG(); 21206f48d0ebSDavid Rientjes } 21216f48d0ebSDavid Rientjes out: 21226f48d0ebSDavid Rientjes task_unlock(tsk); 21236f48d0ebSDavid Rientjes return ret; 21246f48d0ebSDavid Rientjes } 21256f48d0ebSDavid Rientjes 21261da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 21271da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2128662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2129662f3a0bSAndi Kleen unsigned nid) 21301da177e4SLinus Torvalds { 21311da177e4SLinus Torvalds struct page *page; 21321da177e4SLinus Torvalds 213304ec6264SVlastimil Babka page = __alloc_pages(gfp, order, nid); 21344518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21354518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21364518085eSKemi Wang return page; 2137de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2138de55c8b2SAndrey Ryabinin preempt_disable(); 2139de55c8b2SAndrey Ryabinin __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT); 2140de55c8b2SAndrey Ryabinin preempt_enable(); 2141de55c8b2SAndrey Ryabinin } 21421da177e4SLinus Torvalds return page; 21431da177e4SLinus Torvalds } 21441da177e4SLinus Torvalds 21451da177e4SLinus Torvalds /** 21460bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 21471da177e4SLinus Torvalds * 21481da177e4SLinus Torvalds * @gfp: 21491da177e4SLinus Torvalds * %GFP_USER user allocation. 21501da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 21511da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 21521da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 21531da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 21541da177e4SLinus Torvalds * 21550bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 21561da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 21571da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 2158be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 215919deb769SDavid Rientjes * @hugepage: for hugepages try only the preferred node if possible 21601da177e4SLinus Torvalds * 21611da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 21621da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 21631da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 21641da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 2165be97a41bSVlastimil Babka * all allocations for pages that will be mapped into user space. Returns 2166be97a41bSVlastimil Babka * NULL when no page can be allocated. 21671da177e4SLinus Torvalds */ 21681da177e4SLinus Torvalds struct page * 21690bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 217019deb769SDavid Rientjes unsigned long addr, int node, bool hugepage) 21711da177e4SLinus Torvalds { 2172cc9a6c87SMel Gorman struct mempolicy *pol; 2173c0ff7453SMiao Xie struct page *page; 217404ec6264SVlastimil Babka int preferred_nid; 2175be97a41bSVlastimil Babka nodemask_t *nmask; 21761da177e4SLinus Torvalds 2177dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2178cc9a6c87SMel Gorman 2179be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 21801da177e4SLinus Torvalds unsigned nid; 21815da7ca86SChristoph Lameter 21828eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 218352cd3b07SLee Schermerhorn mpol_cond_put(pol); 21840bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2185be97a41bSVlastimil Babka goto out; 21861da177e4SLinus Torvalds } 21871da177e4SLinus Torvalds 218819deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 218919deb769SDavid Rientjes int hpage_node = node; 219019deb769SDavid Rientjes 219119deb769SDavid Rientjes /* 219219deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 219319deb769SDavid Rientjes * allows the current node (or other explicitly preferred 219419deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 219519deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 219619deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 219719deb769SDavid Rientjes * 219819deb769SDavid Rientjes * If the policy is interleave, or does not allow the current 219919deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 220019deb769SDavid Rientjes */ 220119deb769SDavid Rientjes if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) 220219deb769SDavid Rientjes hpage_node = pol->v.preferred_node; 220319deb769SDavid Rientjes 220419deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 220519deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 220619deb769SDavid Rientjes mpol_cond_put(pol); 2207cc638f32SVlastimil Babka /* 2208cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2209cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2210cc638f32SVlastimil Babka */ 221119deb769SDavid Rientjes page = __alloc_pages_node(hpage_node, 2212cc638f32SVlastimil Babka gfp | __GFP_THISNODE | __GFP_NORETRY, order); 221376e654ccSDavid Rientjes 221476e654ccSDavid Rientjes /* 221576e654ccSDavid Rientjes * If hugepage allocations are configured to always 221676e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 221776e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2218cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 221976e654ccSDavid Rientjes */ 222076e654ccSDavid Rientjes if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 222176e654ccSDavid Rientjes page = __alloc_pages_node(hpage_node, 2222cc638f32SVlastimil Babka gfp, order); 222376e654ccSDavid Rientjes 222419deb769SDavid Rientjes goto out; 222519deb769SDavid Rientjes } 222619deb769SDavid Rientjes } 222719deb769SDavid Rientjes 2228077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 222904ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 223004ec6264SVlastimil Babka page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); 2231d51e9894SVlastimil Babka mpol_cond_put(pol); 2232be97a41bSVlastimil Babka out: 2233077fcf11SAneesh Kumar K.V return page; 2234077fcf11SAneesh Kumar K.V } 223569262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma); 2236077fcf11SAneesh Kumar K.V 22371da177e4SLinus Torvalds /** 22381da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 22391da177e4SLinus Torvalds * 22401da177e4SLinus Torvalds * @gfp: 22411da177e4SLinus Torvalds * %GFP_USER user allocation, 22421da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 22431da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 22441da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 22451da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 22461da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 22471da177e4SLinus Torvalds * 22481da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 22491da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 22501da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 22511da177e4SLinus Torvalds */ 2252dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 22531da177e4SLinus Torvalds { 22548d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2255c0ff7453SMiao Xie struct page *page; 22561da177e4SLinus Torvalds 22578d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22588d90274bSOleg Nesterov pol = get_task_policy(current); 225952cd3b07SLee Schermerhorn 226052cd3b07SLee Schermerhorn /* 226152cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 226252cd3b07SLee Schermerhorn * nor system default_policy 226352cd3b07SLee Schermerhorn */ 226445c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2265c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2266c0ff7453SMiao Xie else 2267c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 226804ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 22695c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2270cc9a6c87SMel Gorman 2271c0ff7453SMiao Xie return page; 22721da177e4SLinus Torvalds } 22731da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 22741da177e4SLinus Torvalds 2275ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2276ef0855d3SOleg Nesterov { 2277ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2278ef0855d3SOleg Nesterov 2279ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2280ef0855d3SOleg Nesterov return PTR_ERR(pol); 2281ef0855d3SOleg Nesterov dst->vm_policy = pol; 2282ef0855d3SOleg Nesterov return 0; 2283ef0855d3SOleg Nesterov } 2284ef0855d3SOleg Nesterov 22854225399aSPaul Jackson /* 2286846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 22874225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 22884225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 22894225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 22904225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2291708c1bbcSMiao Xie * 2292708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2293708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 22944225399aSPaul Jackson */ 22954225399aSPaul Jackson 2296846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2297846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 22981da177e4SLinus Torvalds { 22991da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 23001da177e4SLinus Torvalds 23011da177e4SLinus Torvalds if (!new) 23021da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2303708c1bbcSMiao Xie 2304708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2305708c1bbcSMiao Xie if (old == current->mempolicy) { 2306708c1bbcSMiao Xie task_lock(current); 2307708c1bbcSMiao Xie *new = *old; 2308708c1bbcSMiao Xie task_unlock(current); 2309708c1bbcSMiao Xie } else 2310708c1bbcSMiao Xie *new = *old; 2311708c1bbcSMiao Xie 23124225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 23134225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2314213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 23154225399aSPaul Jackson } 23161da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 23171da177e4SLinus Torvalds return new; 23181da177e4SLinus Torvalds } 23191da177e4SLinus Torvalds 23201da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2321fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 23221da177e4SLinus Torvalds { 23231da177e4SLinus Torvalds if (!a || !b) 2324fcfb4dccSKOSAKI Motohiro return false; 232545c4745aSLee Schermerhorn if (a->mode != b->mode) 2326fcfb4dccSKOSAKI Motohiro return false; 232719800502SBob Liu if (a->flags != b->flags) 2328fcfb4dccSKOSAKI Motohiro return false; 232919800502SBob Liu if (mpol_store_user_nodemask(a)) 233019800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2331fcfb4dccSKOSAKI Motohiro return false; 233219800502SBob Liu 233345c4745aSLee Schermerhorn switch (a->mode) { 233419770b32SMel Gorman case MPOL_BIND: 233519770b32SMel Gorman /* Fall through */ 23361da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2337fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 23381da177e4SLinus Torvalds case MPOL_PREFERRED: 23398970a63eSYisheng Xie /* a's ->flags is the same as b's */ 23408970a63eSYisheng Xie if (a->flags & MPOL_F_LOCAL) 23418970a63eSYisheng Xie return true; 234275719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 23431da177e4SLinus Torvalds default: 23441da177e4SLinus Torvalds BUG(); 2345fcfb4dccSKOSAKI Motohiro return false; 23461da177e4SLinus Torvalds } 23471da177e4SLinus Torvalds } 23481da177e4SLinus Torvalds 23491da177e4SLinus Torvalds /* 23501da177e4SLinus Torvalds * Shared memory backing store policy support. 23511da177e4SLinus Torvalds * 23521da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 23531da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 23544a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 23551da177e4SLinus Torvalds * for any accesses to the tree. 23561da177e4SLinus Torvalds */ 23571da177e4SLinus Torvalds 23584a8c7bb5SNathan Zimmer /* 23594a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 23604a8c7bb5SNathan Zimmer * reading or for writing 23614a8c7bb5SNathan Zimmer */ 23621da177e4SLinus Torvalds static struct sp_node * 23631da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 23641da177e4SLinus Torvalds { 23651da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 23661da177e4SLinus Torvalds 23671da177e4SLinus Torvalds while (n) { 23681da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 23691da177e4SLinus Torvalds 23701da177e4SLinus Torvalds if (start >= p->end) 23711da177e4SLinus Torvalds n = n->rb_right; 23721da177e4SLinus Torvalds else if (end <= p->start) 23731da177e4SLinus Torvalds n = n->rb_left; 23741da177e4SLinus Torvalds else 23751da177e4SLinus Torvalds break; 23761da177e4SLinus Torvalds } 23771da177e4SLinus Torvalds if (!n) 23781da177e4SLinus Torvalds return NULL; 23791da177e4SLinus Torvalds for (;;) { 23801da177e4SLinus Torvalds struct sp_node *w = NULL; 23811da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 23821da177e4SLinus Torvalds if (!prev) 23831da177e4SLinus Torvalds break; 23841da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 23851da177e4SLinus Torvalds if (w->end <= start) 23861da177e4SLinus Torvalds break; 23871da177e4SLinus Torvalds n = prev; 23881da177e4SLinus Torvalds } 23891da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 23901da177e4SLinus Torvalds } 23911da177e4SLinus Torvalds 23924a8c7bb5SNathan Zimmer /* 23934a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 23944a8c7bb5SNathan Zimmer * writing. 23954a8c7bb5SNathan Zimmer */ 23961da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 23971da177e4SLinus Torvalds { 23981da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 23991da177e4SLinus Torvalds struct rb_node *parent = NULL; 24001da177e4SLinus Torvalds struct sp_node *nd; 24011da177e4SLinus Torvalds 24021da177e4SLinus Torvalds while (*p) { 24031da177e4SLinus Torvalds parent = *p; 24041da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 24051da177e4SLinus Torvalds if (new->start < nd->start) 24061da177e4SLinus Torvalds p = &(*p)->rb_left; 24071da177e4SLinus Torvalds else if (new->end > nd->end) 24081da177e4SLinus Torvalds p = &(*p)->rb_right; 24091da177e4SLinus Torvalds else 24101da177e4SLinus Torvalds BUG(); 24111da177e4SLinus Torvalds } 24121da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 24131da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2414140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 241545c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 24161da177e4SLinus Torvalds } 24171da177e4SLinus Torvalds 24181da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 24191da177e4SLinus Torvalds struct mempolicy * 24201da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 24211da177e4SLinus Torvalds { 24221da177e4SLinus Torvalds struct mempolicy *pol = NULL; 24231da177e4SLinus Torvalds struct sp_node *sn; 24241da177e4SLinus Torvalds 24251da177e4SLinus Torvalds if (!sp->root.rb_node) 24261da177e4SLinus Torvalds return NULL; 24274a8c7bb5SNathan Zimmer read_lock(&sp->lock); 24281da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 24291da177e4SLinus Torvalds if (sn) { 24301da177e4SLinus Torvalds mpol_get(sn->policy); 24311da177e4SLinus Torvalds pol = sn->policy; 24321da177e4SLinus Torvalds } 24334a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 24341da177e4SLinus Torvalds return pol; 24351da177e4SLinus Torvalds } 24361da177e4SLinus Torvalds 243763f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 243863f74ca2SKOSAKI Motohiro { 243963f74ca2SKOSAKI Motohiro mpol_put(n->policy); 244063f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 244163f74ca2SKOSAKI Motohiro } 244263f74ca2SKOSAKI Motohiro 2443771fb4d8SLee Schermerhorn /** 2444771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2445771fb4d8SLee Schermerhorn * 2446b46e14acSFabian Frederick * @page: page to be checked 2447b46e14acSFabian Frederick * @vma: vm area where page mapped 2448b46e14acSFabian Frederick * @addr: virtual address where page mapped 2449771fb4d8SLee Schermerhorn * 2450771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2451771fb4d8SLee Schermerhorn * node id. 2452771fb4d8SLee Schermerhorn * 2453771fb4d8SLee Schermerhorn * Returns: 2454771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2455771fb4d8SLee Schermerhorn * node - node id where the page should be 2456771fb4d8SLee Schermerhorn * 2457771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2458771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2459771fb4d8SLee Schermerhorn */ 2460771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2461771fb4d8SLee Schermerhorn { 2462771fb4d8SLee Schermerhorn struct mempolicy *pol; 2463c33d6c06SMel Gorman struct zoneref *z; 2464771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2465771fb4d8SLee Schermerhorn unsigned long pgoff; 246690572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 246790572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 246898fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2469771fb4d8SLee Schermerhorn int ret = -1; 2470771fb4d8SLee Schermerhorn 2471dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2472771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2473771fb4d8SLee Schermerhorn goto out; 2474771fb4d8SLee Schermerhorn 2475771fb4d8SLee Schermerhorn switch (pol->mode) { 2476771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2477771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2478771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 247998c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2480771fb4d8SLee Schermerhorn break; 2481771fb4d8SLee Schermerhorn 2482771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2483771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2484771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2485771fb4d8SLee Schermerhorn else 2486771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2487771fb4d8SLee Schermerhorn break; 2488771fb4d8SLee Schermerhorn 2489771fb4d8SLee Schermerhorn case MPOL_BIND: 2490c33d6c06SMel Gorman 2491771fb4d8SLee Schermerhorn /* 2492771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2493771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2494771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2495771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2496771fb4d8SLee Schermerhorn */ 2497771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2498771fb4d8SLee Schermerhorn goto out; 2499c33d6c06SMel Gorman z = first_zones_zonelist( 2500771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2501771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2502c33d6c06SMel Gorman &pol->v.nodes); 2503c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2504771fb4d8SLee Schermerhorn break; 2505771fb4d8SLee Schermerhorn 2506771fb4d8SLee Schermerhorn default: 2507771fb4d8SLee Schermerhorn BUG(); 2508771fb4d8SLee Schermerhorn } 25095606e387SMel Gorman 25105606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2511e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 251290572890SPeter Zijlstra polnid = thisnid; 25135606e387SMel Gorman 251410f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2515de1c9ce6SRik van Riel goto out; 2516de1c9ce6SRik van Riel } 2517e42c8ff2SMel Gorman 2518771fb4d8SLee Schermerhorn if (curnid != polnid) 2519771fb4d8SLee Schermerhorn ret = polnid; 2520771fb4d8SLee Schermerhorn out: 2521771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2522771fb4d8SLee Schermerhorn 2523771fb4d8SLee Schermerhorn return ret; 2524771fb4d8SLee Schermerhorn } 2525771fb4d8SLee Schermerhorn 2526c11600e4SDavid Rientjes /* 2527c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2528c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2529c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2530c11600e4SDavid Rientjes * policy. 2531c11600e4SDavid Rientjes */ 2532c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2533c11600e4SDavid Rientjes { 2534c11600e4SDavid Rientjes struct mempolicy *pol; 2535c11600e4SDavid Rientjes 2536c11600e4SDavid Rientjes task_lock(task); 2537c11600e4SDavid Rientjes pol = task->mempolicy; 2538c11600e4SDavid Rientjes task->mempolicy = NULL; 2539c11600e4SDavid Rientjes task_unlock(task); 2540c11600e4SDavid Rientjes mpol_put(pol); 2541c11600e4SDavid Rientjes } 2542c11600e4SDavid Rientjes 25431da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 25441da177e4SLinus Torvalds { 2545140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 25461da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 254763f74ca2SKOSAKI Motohiro sp_free(n); 25481da177e4SLinus Torvalds } 25491da177e4SLinus Torvalds 255042288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 255142288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 255242288fe3SMel Gorman { 255342288fe3SMel Gorman node->start = start; 255442288fe3SMel Gorman node->end = end; 255542288fe3SMel Gorman node->policy = pol; 255642288fe3SMel Gorman } 255742288fe3SMel Gorman 2558dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2559dbcb0f19SAdrian Bunk struct mempolicy *pol) 25601da177e4SLinus Torvalds { 2561869833f2SKOSAKI Motohiro struct sp_node *n; 2562869833f2SKOSAKI Motohiro struct mempolicy *newpol; 25631da177e4SLinus Torvalds 2564869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 25651da177e4SLinus Torvalds if (!n) 25661da177e4SLinus Torvalds return NULL; 2567869833f2SKOSAKI Motohiro 2568869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2569869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2570869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2571869833f2SKOSAKI Motohiro return NULL; 2572869833f2SKOSAKI Motohiro } 2573869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 257442288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2575869833f2SKOSAKI Motohiro 25761da177e4SLinus Torvalds return n; 25771da177e4SLinus Torvalds } 25781da177e4SLinus Torvalds 25791da177e4SLinus Torvalds /* Replace a policy range. */ 25801da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 25811da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 25821da177e4SLinus Torvalds { 2583b22d127aSMel Gorman struct sp_node *n; 258442288fe3SMel Gorman struct sp_node *n_new = NULL; 258542288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2586b22d127aSMel Gorman int ret = 0; 25871da177e4SLinus Torvalds 258842288fe3SMel Gorman restart: 25894a8c7bb5SNathan Zimmer write_lock(&sp->lock); 25901da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 25911da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 25921da177e4SLinus Torvalds while (n && n->start < end) { 25931da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 25941da177e4SLinus Torvalds if (n->start >= start) { 25951da177e4SLinus Torvalds if (n->end <= end) 25961da177e4SLinus Torvalds sp_delete(sp, n); 25971da177e4SLinus Torvalds else 25981da177e4SLinus Torvalds n->start = end; 25991da177e4SLinus Torvalds } else { 26001da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 26011da177e4SLinus Torvalds if (n->end > end) { 260242288fe3SMel Gorman if (!n_new) 260342288fe3SMel Gorman goto alloc_new; 260442288fe3SMel Gorman 260542288fe3SMel Gorman *mpol_new = *n->policy; 260642288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 26077880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 26081da177e4SLinus Torvalds n->end = start; 26095ca39575SHillf Danton sp_insert(sp, n_new); 261042288fe3SMel Gorman n_new = NULL; 261142288fe3SMel Gorman mpol_new = NULL; 26121da177e4SLinus Torvalds break; 26131da177e4SLinus Torvalds } else 26141da177e4SLinus Torvalds n->end = start; 26151da177e4SLinus Torvalds } 26161da177e4SLinus Torvalds if (!next) 26171da177e4SLinus Torvalds break; 26181da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 26191da177e4SLinus Torvalds } 26201da177e4SLinus Torvalds if (new) 26211da177e4SLinus Torvalds sp_insert(sp, new); 26224a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 262342288fe3SMel Gorman ret = 0; 262442288fe3SMel Gorman 262542288fe3SMel Gorman err_out: 262642288fe3SMel Gorman if (mpol_new) 262742288fe3SMel Gorman mpol_put(mpol_new); 262842288fe3SMel Gorman if (n_new) 262942288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 263042288fe3SMel Gorman 2631b22d127aSMel Gorman return ret; 263242288fe3SMel Gorman 263342288fe3SMel Gorman alloc_new: 26344a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 263542288fe3SMel Gorman ret = -ENOMEM; 263642288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 263742288fe3SMel Gorman if (!n_new) 263842288fe3SMel Gorman goto err_out; 263942288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 264042288fe3SMel Gorman if (!mpol_new) 264142288fe3SMel Gorman goto err_out; 264242288fe3SMel Gorman goto restart; 26431da177e4SLinus Torvalds } 26441da177e4SLinus Torvalds 264571fe804bSLee Schermerhorn /** 264671fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 264771fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 264871fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 264971fe804bSLee Schermerhorn * 265071fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 265171fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 265271fe804bSLee Schermerhorn * This must be released on exit. 26534bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 265471fe804bSLee Schermerhorn */ 265571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 26567339ff83SRobin Holt { 265758568d2aSMiao Xie int ret; 265858568d2aSMiao Xie 265971fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 26604a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 26617339ff83SRobin Holt 266271fe804bSLee Schermerhorn if (mpol) { 26637339ff83SRobin Holt struct vm_area_struct pvma; 266471fe804bSLee Schermerhorn struct mempolicy *new; 26654bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 26667339ff83SRobin Holt 26674bfc4495SKAMEZAWA Hiroyuki if (!scratch) 26685c0c1654SLee Schermerhorn goto put_mpol; 266971fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 267071fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 267115d77835SLee Schermerhorn if (IS_ERR(new)) 26720cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 267358568d2aSMiao Xie 267458568d2aSMiao Xie task_lock(current); 26754bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 267658568d2aSMiao Xie task_unlock(current); 267715d77835SLee Schermerhorn if (ret) 26785c0c1654SLee Schermerhorn goto put_new; 267971fe804bSLee Schermerhorn 268071fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 26812c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 268271fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 268371fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 268415d77835SLee Schermerhorn 26855c0c1654SLee Schermerhorn put_new: 268671fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 26870cae3457SDan Carpenter free_scratch: 26884bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 26895c0c1654SLee Schermerhorn put_mpol: 26905c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 26917339ff83SRobin Holt } 26927339ff83SRobin Holt } 26937339ff83SRobin Holt 26941da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 26951da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 26961da177e4SLinus Torvalds { 26971da177e4SLinus Torvalds int err; 26981da177e4SLinus Torvalds struct sp_node *new = NULL; 26991da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 27001da177e4SLinus Torvalds 2701028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 27021da177e4SLinus Torvalds vma->vm_pgoff, 270345c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2704028fec41SDavid Rientjes npol ? npol->flags : -1, 270500ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 27061da177e4SLinus Torvalds 27071da177e4SLinus Torvalds if (npol) { 27081da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 27091da177e4SLinus Torvalds if (!new) 27101da177e4SLinus Torvalds return -ENOMEM; 27111da177e4SLinus Torvalds } 27121da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 27131da177e4SLinus Torvalds if (err && new) 271463f74ca2SKOSAKI Motohiro sp_free(new); 27151da177e4SLinus Torvalds return err; 27161da177e4SLinus Torvalds } 27171da177e4SLinus Torvalds 27181da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 27191da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 27201da177e4SLinus Torvalds { 27211da177e4SLinus Torvalds struct sp_node *n; 27221da177e4SLinus Torvalds struct rb_node *next; 27231da177e4SLinus Torvalds 27241da177e4SLinus Torvalds if (!p->root.rb_node) 27251da177e4SLinus Torvalds return; 27264a8c7bb5SNathan Zimmer write_lock(&p->lock); 27271da177e4SLinus Torvalds next = rb_first(&p->root); 27281da177e4SLinus Torvalds while (next) { 27291da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27301da177e4SLinus Torvalds next = rb_next(&n->nd); 273163f74ca2SKOSAKI Motohiro sp_delete(p, n); 27321da177e4SLinus Torvalds } 27334a8c7bb5SNathan Zimmer write_unlock(&p->lock); 27341da177e4SLinus Torvalds } 27351da177e4SLinus Torvalds 27361a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2737c297663cSMel Gorman static int __initdata numabalancing_override; 27381a687c2eSMel Gorman 27391a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 27401a687c2eSMel Gorman { 27411a687c2eSMel Gorman bool numabalancing_default = false; 27421a687c2eSMel Gorman 27431a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 27441a687c2eSMel Gorman numabalancing_default = true; 27451a687c2eSMel Gorman 2746c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2747c297663cSMel Gorman if (numabalancing_override) 2748c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2749c297663cSMel Gorman 2750b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2751756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2752c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 27531a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 27541a687c2eSMel Gorman } 27551a687c2eSMel Gorman } 27561a687c2eSMel Gorman 27571a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 27581a687c2eSMel Gorman { 27591a687c2eSMel Gorman int ret = 0; 27601a687c2eSMel Gorman if (!str) 27611a687c2eSMel Gorman goto out; 27621a687c2eSMel Gorman 27631a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2764c297663cSMel Gorman numabalancing_override = 1; 27651a687c2eSMel Gorman ret = 1; 27661a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2767c297663cSMel Gorman numabalancing_override = -1; 27681a687c2eSMel Gorman ret = 1; 27691a687c2eSMel Gorman } 27701a687c2eSMel Gorman out: 27711a687c2eSMel Gorman if (!ret) 27724a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 27731a687c2eSMel Gorman 27741a687c2eSMel Gorman return ret; 27751a687c2eSMel Gorman } 27761a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 27771a687c2eSMel Gorman #else 27781a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 27791a687c2eSMel Gorman { 27801a687c2eSMel Gorman } 27811a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 27821a687c2eSMel Gorman 27831da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 27841da177e4SLinus Torvalds void __init numa_policy_init(void) 27851da177e4SLinus Torvalds { 2786b71636e2SPaul Mundt nodemask_t interleave_nodes; 2787b71636e2SPaul Mundt unsigned long largest = 0; 2788b71636e2SPaul Mundt int nid, prefer = 0; 2789b71636e2SPaul Mundt 27901da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 27911da177e4SLinus Torvalds sizeof(struct mempolicy), 279220c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27931da177e4SLinus Torvalds 27941da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 27951da177e4SLinus Torvalds sizeof(struct sp_node), 279620c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27971da177e4SLinus Torvalds 27985606e387SMel Gorman for_each_node(nid) { 27995606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 28005606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 28015606e387SMel Gorman .mode = MPOL_PREFERRED, 28025606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 28035606e387SMel Gorman .v = { .preferred_node = nid, }, 28045606e387SMel Gorman }; 28055606e387SMel Gorman } 28065606e387SMel Gorman 2807b71636e2SPaul Mundt /* 2808b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2809b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2810b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2811b71636e2SPaul Mundt */ 2812b71636e2SPaul Mundt nodes_clear(interleave_nodes); 281301f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2814b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 28151da177e4SLinus Torvalds 2816b71636e2SPaul Mundt /* Preserve the largest node */ 2817b71636e2SPaul Mundt if (largest < total_pages) { 2818b71636e2SPaul Mundt largest = total_pages; 2819b71636e2SPaul Mundt prefer = nid; 2820b71636e2SPaul Mundt } 2821b71636e2SPaul Mundt 2822b71636e2SPaul Mundt /* Interleave this node? */ 2823b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2824b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2825b71636e2SPaul Mundt } 2826b71636e2SPaul Mundt 2827b71636e2SPaul Mundt /* All too small, use the largest */ 2828b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2829b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2830b71636e2SPaul Mundt 2831028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2832b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 28331a687c2eSMel Gorman 28341a687c2eSMel Gorman check_numabalancing_enable(); 28351da177e4SLinus Torvalds } 28361da177e4SLinus Torvalds 28378bccd85fSChristoph Lameter /* Reset policy of current process to default */ 28381da177e4SLinus Torvalds void numa_default_policy(void) 28391da177e4SLinus Torvalds { 2840028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 28411da177e4SLinus Torvalds } 284268860ec1SPaul Jackson 28434225399aSPaul Jackson /* 2844095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2845095f1fc4SLee Schermerhorn */ 2846095f1fc4SLee Schermerhorn 2847095f1fc4SLee Schermerhorn /* 2848f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 28491a75a6c8SChristoph Lameter */ 2850345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2851345ace9cSLee Schermerhorn { 2852345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2853345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2854345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2855345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2856d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2857345ace9cSLee Schermerhorn }; 28581a75a6c8SChristoph Lameter 2859095f1fc4SLee Schermerhorn 2860095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2861095f1fc4SLee Schermerhorn /** 2862f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2863095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 286471fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2865095f1fc4SLee Schermerhorn * 2866095f1fc4SLee Schermerhorn * Format of input: 2867095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2868095f1fc4SLee Schermerhorn * 286971fe804bSLee Schermerhorn * On success, returns 0, else 1 2870095f1fc4SLee Schermerhorn */ 2871a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2872095f1fc4SLee Schermerhorn { 287371fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2874f2a07f40SHugh Dickins unsigned short mode_flags; 287571fe804bSLee Schermerhorn nodemask_t nodes; 2876095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2877095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2878dedf2c73Szhong jiang int err = 1, mode; 2879095f1fc4SLee Schermerhorn 2880c7a91bc7SDan Carpenter if (flags) 2881c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 2882c7a91bc7SDan Carpenter 2883095f1fc4SLee Schermerhorn if (nodelist) { 2884095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2885095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 288671fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2887095f1fc4SLee Schermerhorn goto out; 288801f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2889095f1fc4SLee Schermerhorn goto out; 289071fe804bSLee Schermerhorn } else 289171fe804bSLee Schermerhorn nodes_clear(nodes); 289271fe804bSLee Schermerhorn 2893dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 2894dedf2c73Szhong jiang if (mode < 0) 2895095f1fc4SLee Schermerhorn goto out; 2896095f1fc4SLee Schermerhorn 289771fe804bSLee Schermerhorn switch (mode) { 2898095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 289971fe804bSLee Schermerhorn /* 2900aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 2901aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 2902aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 290371fe804bSLee Schermerhorn */ 2904095f1fc4SLee Schermerhorn if (nodelist) { 2905095f1fc4SLee Schermerhorn char *rest = nodelist; 2906095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2907095f1fc4SLee Schermerhorn rest++; 2908926f2ae0SKOSAKI Motohiro if (*rest) 2909926f2ae0SKOSAKI Motohiro goto out; 2910aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 2911aa9f7d51SRandy Dunlap goto out; 2912095f1fc4SLee Schermerhorn } 2913095f1fc4SLee Schermerhorn break; 2914095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2915095f1fc4SLee Schermerhorn /* 2916095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2917095f1fc4SLee Schermerhorn */ 2918095f1fc4SLee Schermerhorn if (!nodelist) 291901f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 29203f226aa1SLee Schermerhorn break; 292171fe804bSLee Schermerhorn case MPOL_LOCAL: 29223f226aa1SLee Schermerhorn /* 292371fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 29243f226aa1SLee Schermerhorn */ 292571fe804bSLee Schermerhorn if (nodelist) 29263f226aa1SLee Schermerhorn goto out; 292771fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 29283f226aa1SLee Schermerhorn break; 2929413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2930413b43deSRavikiran G Thirumalai /* 2931413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2932413b43deSRavikiran G Thirumalai */ 2933413b43deSRavikiran G Thirumalai if (!nodelist) 2934413b43deSRavikiran G Thirumalai err = 0; 2935413b43deSRavikiran G Thirumalai goto out; 2936d69b2e63SKOSAKI Motohiro case MPOL_BIND: 293771fe804bSLee Schermerhorn /* 2938d69b2e63SKOSAKI Motohiro * Insist on a nodelist 293971fe804bSLee Schermerhorn */ 2940d69b2e63SKOSAKI Motohiro if (!nodelist) 2941d69b2e63SKOSAKI Motohiro goto out; 2942095f1fc4SLee Schermerhorn } 2943095f1fc4SLee Schermerhorn 294471fe804bSLee Schermerhorn mode_flags = 0; 2945095f1fc4SLee Schermerhorn if (flags) { 2946095f1fc4SLee Schermerhorn /* 2947095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2948095f1fc4SLee Schermerhorn * mode flags. 2949095f1fc4SLee Schermerhorn */ 2950095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 295171fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2952095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 295371fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2954095f1fc4SLee Schermerhorn else 2955926f2ae0SKOSAKI Motohiro goto out; 2956095f1fc4SLee Schermerhorn } 295771fe804bSLee Schermerhorn 295871fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 295971fe804bSLee Schermerhorn if (IS_ERR(new)) 2960926f2ae0SKOSAKI Motohiro goto out; 2961926f2ae0SKOSAKI Motohiro 2962f2a07f40SHugh Dickins /* 2963f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2964f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2965f2a07f40SHugh Dickins */ 2966f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2967f2a07f40SHugh Dickins new->v.nodes = nodes; 2968f2a07f40SHugh Dickins else if (nodelist) 2969f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2970f2a07f40SHugh Dickins else 2971f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2972f2a07f40SHugh Dickins 2973f2a07f40SHugh Dickins /* 2974f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2975f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2976f2a07f40SHugh Dickins */ 2977e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2978f2a07f40SHugh Dickins 2979926f2ae0SKOSAKI Motohiro err = 0; 298071fe804bSLee Schermerhorn 2981095f1fc4SLee Schermerhorn out: 2982095f1fc4SLee Schermerhorn /* Restore string for error message */ 2983095f1fc4SLee Schermerhorn if (nodelist) 2984095f1fc4SLee Schermerhorn *--nodelist = ':'; 2985095f1fc4SLee Schermerhorn if (flags) 2986095f1fc4SLee Schermerhorn *--flags = '='; 298771fe804bSLee Schermerhorn if (!err) 298871fe804bSLee Schermerhorn *mpol = new; 2989095f1fc4SLee Schermerhorn return err; 2990095f1fc4SLee Schermerhorn } 2991095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2992095f1fc4SLee Schermerhorn 299371fe804bSLee Schermerhorn /** 299471fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 299571fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 299671fe804bSLee Schermerhorn * @maxlen: length of @buffer 299771fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 299871fe804bSLee Schermerhorn * 2999948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 3000948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3001948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 30021a75a6c8SChristoph Lameter */ 3003948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 30041a75a6c8SChristoph Lameter { 30051a75a6c8SChristoph Lameter char *p = buffer; 3006948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 3007948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 3008948927eeSDavid Rientjes unsigned short flags = 0; 30091a75a6c8SChristoph Lameter 30108790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3011bea904d5SLee Schermerhorn mode = pol->mode; 3012948927eeSDavid Rientjes flags = pol->flags; 3013948927eeSDavid Rientjes } 3014bea904d5SLee Schermerhorn 30151a75a6c8SChristoph Lameter switch (mode) { 30161a75a6c8SChristoph Lameter case MPOL_DEFAULT: 30171a75a6c8SChristoph Lameter break; 30181a75a6c8SChristoph Lameter case MPOL_PREFERRED: 3019fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 3020f2a07f40SHugh Dickins mode = MPOL_LOCAL; 302153f2556bSLee Schermerhorn else 3022fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 30231a75a6c8SChristoph Lameter break; 30241a75a6c8SChristoph Lameter case MPOL_BIND: 30251a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 30261a75a6c8SChristoph Lameter nodes = pol->v.nodes; 30271a75a6c8SChristoph Lameter break; 30281a75a6c8SChristoph Lameter default: 3029948927eeSDavid Rientjes WARN_ON_ONCE(1); 3030948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 3031948927eeSDavid Rientjes return; 30321a75a6c8SChristoph Lameter } 30331a75a6c8SChristoph Lameter 3034b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 30351a75a6c8SChristoph Lameter 3036fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3037948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3038f5b087b5SDavid Rientjes 30392291990aSLee Schermerhorn /* 30402291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 30412291990aSLee Schermerhorn */ 3042f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 30432291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 30442291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 30452291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3046f5b087b5SDavid Rientjes } 3047f5b087b5SDavid Rientjes 30489e763e0fSTejun Heo if (!nodes_empty(nodes)) 30499e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 30509e763e0fSTejun Heo nodemask_pr_args(&nodes)); 30511a75a6c8SChristoph Lameter } 3052