146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 71a520110eSChristoph Hellwig #include <linux/pagewalk.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 766e84f315SIngo Molnar #include <linux/sched/mm.h> 776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 78f719ff9bSIngo Molnar #include <linux/sched/task.h> 791da177e4SLinus Torvalds #include <linux/nodemask.h> 801da177e4SLinus Torvalds #include <linux/cpuset.h> 811da177e4SLinus Torvalds #include <linux/slab.h> 821da177e4SLinus Torvalds #include <linux/string.h> 83b95f1b31SPaul Gortmaker #include <linux/export.h> 84b488893aSPavel Emelyanov #include <linux/nsproxy.h> 851da177e4SLinus Torvalds #include <linux/interrupt.h> 861da177e4SLinus Torvalds #include <linux/init.h> 871da177e4SLinus Torvalds #include <linux/compat.h> 8831367466SOtto Ebeling #include <linux/ptrace.h> 89dc9aa5b9SChristoph Lameter #include <linux/swap.h> 901a75a6c8SChristoph Lameter #include <linux/seq_file.h> 911a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 92b20a3503SChristoph Lameter #include <linux/migrate.h> 9362b61f61SHugh Dickins #include <linux/ksm.h> 9495a402c3SChristoph Lameter #include <linux/rmap.h> 9586c3a764SDavid Quigley #include <linux/security.h> 96dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 97095f1fc4SLee Schermerhorn #include <linux/ctype.h> 986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 100b1de0d13SMitchel Humpherys #include <linux/printk.h> 101c8633798SNaoya Horiguchi #include <linux/swapops.h> 102dc9aa5b9SChristoph Lameter 1031da177e4SLinus Torvalds #include <asm/tlbflush.h> 1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1051da177e4SLinus Torvalds 10662695a84SNick Piggin #include "internal.h" 10762695a84SNick Piggin 10838e35860SChristoph Lameter /* Internal flags */ 109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 111dc9aa5b9SChristoph Lameter 112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1141da177e4SLinus Torvalds 1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1161da177e4SLinus Torvalds policied. */ 1176267276fSChristoph Lameter enum zone_type policy_zone = 0; 1181da177e4SLinus Torvalds 119bea904d5SLee Schermerhorn /* 120bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 121bea904d5SLee Schermerhorn */ 122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1231da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 124bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 125fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1261da177e4SLinus Torvalds }; 1271da177e4SLinus Torvalds 1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1295606e387SMel Gorman 13074d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1315606e387SMel Gorman { 1325606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 133f15ca78eSOleg Nesterov int node; 1345606e387SMel Gorman 135f15ca78eSOleg Nesterov if (pol) 136f15ca78eSOleg Nesterov return pol; 1375606e387SMel Gorman 138f15ca78eSOleg Nesterov node = numa_node_id(); 1391da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1401da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 141f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 142f15ca78eSOleg Nesterov if (pol->mode) 143f15ca78eSOleg Nesterov return pol; 1441da6f0e1SJianguo Wu } 1455606e387SMel Gorman 146f15ca78eSOleg Nesterov return &default_policy; 1475606e387SMel Gorman } 1485606e387SMel Gorman 14937012946SDavid Rientjes static const struct mempolicy_operations { 15037012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 151213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 15237012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 15337012946SDavid Rientjes 154f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 155f5b087b5SDavid Rientjes { 1566d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1574c50bc01SDavid Rientjes } 1584c50bc01SDavid Rientjes 1594c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1604c50bc01SDavid Rientjes const nodemask_t *rel) 1614c50bc01SDavid Rientjes { 1624c50bc01SDavid Rientjes nodemask_t tmp; 1634c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1644c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 165f5b087b5SDavid Rientjes } 166f5b087b5SDavid Rientjes 16737012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 16837012946SDavid Rientjes { 16937012946SDavid Rientjes if (nodes_empty(*nodes)) 17037012946SDavid Rientjes return -EINVAL; 17137012946SDavid Rientjes pol->v.nodes = *nodes; 17237012946SDavid Rientjes return 0; 17337012946SDavid Rientjes } 17437012946SDavid Rientjes 17537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 17637012946SDavid Rientjes { 17737012946SDavid Rientjes if (!nodes) 178fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 17937012946SDavid Rientjes else if (nodes_empty(*nodes)) 18037012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 18137012946SDavid Rientjes else 18237012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 18337012946SDavid Rientjes return 0; 18437012946SDavid Rientjes } 18537012946SDavid Rientjes 18637012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 18737012946SDavid Rientjes { 188859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 18937012946SDavid Rientjes return -EINVAL; 19037012946SDavid Rientjes pol->v.nodes = *nodes; 19137012946SDavid Rientjes return 0; 19237012946SDavid Rientjes } 19337012946SDavid Rientjes 19458568d2aSMiao Xie /* 19558568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 19658568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 19758568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 19858568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 19958568d2aSMiao Xie * 20058568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 20158568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 20258568d2aSMiao Xie */ 2034bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2044bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 20558568d2aSMiao Xie { 20658568d2aSMiao Xie int ret; 20758568d2aSMiao Xie 20858568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 20958568d2aSMiao Xie if (pol == NULL) 21058568d2aSMiao Xie return 0; 21101f13bd6SLai Jiangshan /* Check N_MEMORY */ 2124bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 21301f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 21458568d2aSMiao Xie 21558568d2aSMiao Xie VM_BUG_ON(!nodes); 21658568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 21758568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 21858568d2aSMiao Xie else { 21958568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2204bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 22158568d2aSMiao Xie else 2224bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2234bfc4495SKAMEZAWA Hiroyuki 22458568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 22558568d2aSMiao Xie pol->w.user_nodemask = *nodes; 22658568d2aSMiao Xie else 22758568d2aSMiao Xie pol->w.cpuset_mems_allowed = 22858568d2aSMiao Xie cpuset_current_mems_allowed; 22958568d2aSMiao Xie } 23058568d2aSMiao Xie 2314bfc4495SKAMEZAWA Hiroyuki if (nodes) 2324bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2334bfc4495SKAMEZAWA Hiroyuki else 2344bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 23558568d2aSMiao Xie return ret; 23658568d2aSMiao Xie } 23758568d2aSMiao Xie 23858568d2aSMiao Xie /* 23958568d2aSMiao Xie * This function just creates a new policy, does some check and simple 24058568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 24158568d2aSMiao Xie */ 242028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 243028fec41SDavid Rientjes nodemask_t *nodes) 2441da177e4SLinus Torvalds { 2451da177e4SLinus Torvalds struct mempolicy *policy; 2461da177e4SLinus Torvalds 247028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 24800ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 249140d5a49SPaul Mundt 2503e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2513e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 25237012946SDavid Rientjes return ERR_PTR(-EINVAL); 253d3a71033SLee Schermerhorn return NULL; 25437012946SDavid Rientjes } 2553e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2563e1f0645SDavid Rientjes 2573e1f0645SDavid Rientjes /* 2583e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2593e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2603e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2613e1f0645SDavid Rientjes */ 2623e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2633e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2643e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2653e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2663e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2673e1f0645SDavid Rientjes } 268479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2698d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2708d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2718d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 272479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 273479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2743e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2753e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2761da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2771da177e4SLinus Torvalds if (!policy) 2781da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2791da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 28045c4745aSLee Schermerhorn policy->mode = mode; 28137012946SDavid Rientjes policy->flags = flags; 2823e1f0645SDavid Rientjes 28337012946SDavid Rientjes return policy; 28437012946SDavid Rientjes } 28537012946SDavid Rientjes 28652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 28752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 28852cd3b07SLee Schermerhorn { 28952cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 29052cd3b07SLee Schermerhorn return; 29152cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 29252cd3b07SLee Schermerhorn } 29352cd3b07SLee Schermerhorn 294213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 29537012946SDavid Rientjes { 29637012946SDavid Rientjes } 29737012946SDavid Rientjes 298213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 2991d0d2680SDavid Rientjes { 3001d0d2680SDavid Rientjes nodemask_t tmp; 3011d0d2680SDavid Rientjes 30237012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 30337012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 30437012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 30537012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3061d0d2680SDavid Rientjes else { 307213980c0SVlastimil Babka nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, 308213980c0SVlastimil Babka *nodes); 30929b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3101d0d2680SDavid Rientjes } 31137012946SDavid Rientjes 312708c1bbcSMiao Xie if (nodes_empty(tmp)) 313708c1bbcSMiao Xie tmp = *nodes; 314708c1bbcSMiao Xie 3151d0d2680SDavid Rientjes pol->v.nodes = tmp; 31637012946SDavid Rientjes } 31737012946SDavid Rientjes 31837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 319213980c0SVlastimil Babka const nodemask_t *nodes) 32037012946SDavid Rientjes { 32137012946SDavid Rientjes nodemask_t tmp; 32237012946SDavid Rientjes 32337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3241d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3251d0d2680SDavid Rientjes 326fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3271d0d2680SDavid Rientjes pol->v.preferred_node = node; 328fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 329fc36b8d3SLee Schermerhorn } else 330fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 33137012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 33237012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3331d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 334fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3351d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 33637012946SDavid Rientjes pol->w.cpuset_mems_allowed, 33737012946SDavid Rientjes *nodes); 33837012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3391d0d2680SDavid Rientjes } 3401d0d2680SDavid Rientjes } 34137012946SDavid Rientjes 342708c1bbcSMiao Xie /* 343708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 344708c1bbcSMiao Xie * 345213980c0SVlastimil Babka * Per-vma policies are protected by mmap_sem. Allocations using per-task 346213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 347213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 348708c1bbcSMiao Xie */ 349213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35037012946SDavid Rientjes { 35137012946SDavid Rientjes if (!pol) 35237012946SDavid Rientjes return; 3532e25644eSVlastimil Babka if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) && 35437012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35537012946SDavid Rientjes return; 356708c1bbcSMiao Xie 357213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3581d0d2680SDavid Rientjes } 3591d0d2680SDavid Rientjes 3601d0d2680SDavid Rientjes /* 3611d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3621d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36358568d2aSMiao Xie * 36458568d2aSMiao Xie * Called with task's alloc_lock held. 3651d0d2680SDavid Rientjes */ 3661d0d2680SDavid Rientjes 367213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3681d0d2680SDavid Rientjes { 369213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3701d0d2680SDavid Rientjes } 3711d0d2680SDavid Rientjes 3721d0d2680SDavid Rientjes /* 3731d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3741d0d2680SDavid Rientjes * 3751d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 3761d0d2680SDavid Rientjes */ 3771d0d2680SDavid Rientjes 3781d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3791d0d2680SDavid Rientjes { 3801d0d2680SDavid Rientjes struct vm_area_struct *vma; 3811d0d2680SDavid Rientjes 3821d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 3831d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 384213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 3851d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 3861d0d2680SDavid Rientjes } 3871d0d2680SDavid Rientjes 38837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 38937012946SDavid Rientjes [MPOL_DEFAULT] = { 39037012946SDavid Rientjes .rebind = mpol_rebind_default, 39137012946SDavid Rientjes }, 39237012946SDavid Rientjes [MPOL_INTERLEAVE] = { 39337012946SDavid Rientjes .create = mpol_new_interleave, 39437012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39537012946SDavid Rientjes }, 39637012946SDavid Rientjes [MPOL_PREFERRED] = { 39737012946SDavid Rientjes .create = mpol_new_preferred, 39837012946SDavid Rientjes .rebind = mpol_rebind_preferred, 39937012946SDavid Rientjes }, 40037012946SDavid Rientjes [MPOL_BIND] = { 40137012946SDavid Rientjes .create = mpol_new_bind, 40237012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40337012946SDavid Rientjes }, 40437012946SDavid Rientjes }; 40537012946SDavid Rientjes 406a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 407fc301289SChristoph Lameter unsigned long flags); 4081a75a6c8SChristoph Lameter 4096f4576e3SNaoya Horiguchi struct queue_pages { 4106f4576e3SNaoya Horiguchi struct list_head *pagelist; 4116f4576e3SNaoya Horiguchi unsigned long flags; 4126f4576e3SNaoya Horiguchi nodemask_t *nmask; 4136f4576e3SNaoya Horiguchi struct vm_area_struct *prev; 4146f4576e3SNaoya Horiguchi }; 4156f4576e3SNaoya Horiguchi 41698094945SNaoya Horiguchi /* 41788aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 41888aaa2a1SNaoya Horiguchi * 41988aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 42088aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 42188aaa2a1SNaoya Horiguchi */ 42288aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 42388aaa2a1SNaoya Horiguchi struct queue_pages *qp) 42488aaa2a1SNaoya Horiguchi { 42588aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 42688aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 42788aaa2a1SNaoya Horiguchi 42888aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 42988aaa2a1SNaoya Horiguchi } 43088aaa2a1SNaoya Horiguchi 431a7f40cfeSYang Shi /* 432d8835445SYang Shi * queue_pages_pmd() has four possible return values: 433d8835445SYang Shi * 0 - pages are placed on the right node or queued successfully. 434d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 435d8835445SYang Shi * specified. 436d8835445SYang Shi * 2 - THP was split. 437d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 438d8835445SYang Shi * existing page was already on a node that does not follow the 439d8835445SYang Shi * policy. 440a7f40cfeSYang Shi */ 441c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 442c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 443c8633798SNaoya Horiguchi { 444c8633798SNaoya Horiguchi int ret = 0; 445c8633798SNaoya Horiguchi struct page *page; 446c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 447c8633798SNaoya Horiguchi unsigned long flags; 448c8633798SNaoya Horiguchi 449c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 450a7f40cfeSYang Shi ret = -EIO; 451c8633798SNaoya Horiguchi goto unlock; 452c8633798SNaoya Horiguchi } 453c8633798SNaoya Horiguchi page = pmd_page(*pmd); 454c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 455c8633798SNaoya Horiguchi spin_unlock(ptl); 456c8633798SNaoya Horiguchi __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 457d8835445SYang Shi ret = 2; 458c8633798SNaoya Horiguchi goto out; 459c8633798SNaoya Horiguchi } 460d8835445SYang Shi if (!queue_pages_required(page, qp)) 461c8633798SNaoya Horiguchi goto unlock; 462c8633798SNaoya Horiguchi 463c8633798SNaoya Horiguchi flags = qp->flags; 464c8633798SNaoya Horiguchi /* go to thp migration */ 465a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 466a53190a4SYang Shi if (!vma_migratable(walk->vma) || 467a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 468d8835445SYang Shi ret = 1; 469a7f40cfeSYang Shi goto unlock; 470a7f40cfeSYang Shi } 471a7f40cfeSYang Shi } else 472a7f40cfeSYang Shi ret = -EIO; 473c8633798SNaoya Horiguchi unlock: 474c8633798SNaoya Horiguchi spin_unlock(ptl); 475c8633798SNaoya Horiguchi out: 476c8633798SNaoya Horiguchi return ret; 477c8633798SNaoya Horiguchi } 478c8633798SNaoya Horiguchi 47988aaa2a1SNaoya Horiguchi /* 48098094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 48198094945SNaoya Horiguchi * and move them to the pagelist if they do. 482d8835445SYang Shi * 483d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 484d8835445SYang Shi * 0 - pages are placed on the right node or queued successfully. 485d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 486d8835445SYang Shi * specified. 487d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 488d8835445SYang Shi * on a node that does not follow the policy. 48998094945SNaoya Horiguchi */ 4906f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 4916f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 4921da177e4SLinus Torvalds { 4936f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 4946f4576e3SNaoya Horiguchi struct page *page; 4956f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 4966f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 497c8633798SNaoya Horiguchi int ret; 498d8835445SYang Shi bool has_unmovable = false; 49991612e0dSHugh Dickins pte_t *pte; 500705e87c0SHugh Dickins spinlock_t *ptl; 501941150a3SHugh Dickins 502c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 503c8633798SNaoya Horiguchi if (ptl) { 504c8633798SNaoya Horiguchi ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 505d8835445SYang Shi if (ret != 2) 506a7f40cfeSYang Shi return ret; 507248db92dSKirill A. Shutemov } 508d8835445SYang Shi /* THP was split, fall through to pte walk */ 50991612e0dSHugh Dickins 510337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 511337d9abfSNaoya Horiguchi return 0; 51294723aafSMichal Hocko 5136f4576e3SNaoya Horiguchi pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5146f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 51591612e0dSHugh Dickins if (!pte_present(*pte)) 51691612e0dSHugh Dickins continue; 5176aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5186aab341eSLinus Torvalds if (!page) 51991612e0dSHugh Dickins continue; 520053837fcSNick Piggin /* 52162b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 52262b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 523053837fcSNick Piggin */ 524b79bc0a0SHugh Dickins if (PageReserved(page)) 525f4598c8bSChristoph Lameter continue; 52688aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 52738e35860SChristoph Lameter continue; 528a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 529d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 530d8835445SYang Shi if (!vma_migratable(vma)) { 531d8835445SYang Shi has_unmovable = true; 532a7f40cfeSYang Shi break; 533d8835445SYang Shi } 534a53190a4SYang Shi 535a53190a4SYang Shi /* 536a53190a4SYang Shi * Do not abort immediately since there may be 537a53190a4SYang Shi * temporary off LRU pages in the range. Still 538a53190a4SYang Shi * need migrate other LRU pages. 539a53190a4SYang Shi */ 540a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 541a53190a4SYang Shi has_unmovable = true; 542a7f40cfeSYang Shi } else 543a7f40cfeSYang Shi break; 5446f4576e3SNaoya Horiguchi } 5456f4576e3SNaoya Horiguchi pte_unmap_unlock(pte - 1, ptl); 5466f4576e3SNaoya Horiguchi cond_resched(); 547d8835445SYang Shi 548d8835445SYang Shi if (has_unmovable) 549d8835445SYang Shi return 1; 550d8835445SYang Shi 551a7f40cfeSYang Shi return addr != end ? -EIO : 0; 55291612e0dSHugh Dickins } 55391612e0dSHugh Dickins 5546f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5556f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5566f4576e3SNaoya Horiguchi struct mm_walk *walk) 557e2d8cf40SNaoya Horiguchi { 558e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5596f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5606f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 561e2d8cf40SNaoya Horiguchi struct page *page; 562cb900f41SKirill A. Shutemov spinlock_t *ptl; 563d4c54919SNaoya Horiguchi pte_t entry; 564e2d8cf40SNaoya Horiguchi 5656f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5666f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 567d4c54919SNaoya Horiguchi if (!pte_present(entry)) 568d4c54919SNaoya Horiguchi goto unlock; 569d4c54919SNaoya Horiguchi page = pte_page(entry); 57088aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 571e2d8cf40SNaoya Horiguchi goto unlock; 572e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 573e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 574e2d8cf40SNaoya Horiguchi (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 5756f4576e3SNaoya Horiguchi isolate_huge_page(page, qp->pagelist); 576e2d8cf40SNaoya Horiguchi unlock: 577cb900f41SKirill A. Shutemov spin_unlock(ptl); 578e2d8cf40SNaoya Horiguchi #else 579e2d8cf40SNaoya Horiguchi BUG(); 580e2d8cf40SNaoya Horiguchi #endif 58191612e0dSHugh Dickins return 0; 5821da177e4SLinus Torvalds } 5831da177e4SLinus Torvalds 5845877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 585b24f53a0SLee Schermerhorn /* 5864b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 5874b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 5884b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 5894b10e7d5SMel Gorman * 5904b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 5914b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 5924b10e7d5SMel Gorman * changes to the core. 593b24f53a0SLee Schermerhorn */ 5944b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 5954b10e7d5SMel Gorman unsigned long addr, unsigned long end) 596b24f53a0SLee Schermerhorn { 5974b10e7d5SMel Gorman int nr_updated; 598b24f53a0SLee Schermerhorn 5994d942466SMel Gorman nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); 60003c5a6e1SMel Gorman if (nr_updated) 60103c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 602b24f53a0SLee Schermerhorn 6034b10e7d5SMel Gorman return nr_updated; 604b24f53a0SLee Schermerhorn } 605b24f53a0SLee Schermerhorn #else 606b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 607b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 608b24f53a0SLee Schermerhorn { 609b24f53a0SLee Schermerhorn return 0; 610b24f53a0SLee Schermerhorn } 6115877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 612b24f53a0SLee Schermerhorn 6136f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6146f4576e3SNaoya Horiguchi struct mm_walk *walk) 6151da177e4SLinus Torvalds { 6166f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6176f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6185b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6196f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 620dc9aa5b9SChristoph Lameter 621a7f40cfeSYang Shi /* 622a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 623a7f40cfeSYang Shi * regardless of vma_migratable 624a7f40cfeSYang Shi */ 625a7f40cfeSYang Shi if (!vma_migratable(vma) && 626a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 62748684a65SNaoya Horiguchi return 1; 62848684a65SNaoya Horiguchi 6295b952b3cSAndi Kleen if (endvma > end) 6305b952b3cSAndi Kleen endvma = end; 6315b952b3cSAndi Kleen if (vma->vm_start > start) 6325b952b3cSAndi Kleen start = vma->vm_start; 633b24f53a0SLee Schermerhorn 634b24f53a0SLee Schermerhorn if (!(flags & MPOL_MF_DISCONTIG_OK)) { 635b24f53a0SLee Schermerhorn if (!vma->vm_next && vma->vm_end < end) 636d05f0cdcSHugh Dickins return -EFAULT; 6376f4576e3SNaoya Horiguchi if (qp->prev && qp->prev->vm_end < vma->vm_start) 638d05f0cdcSHugh Dickins return -EFAULT; 639b24f53a0SLee Schermerhorn } 640b24f53a0SLee Schermerhorn 6416f4576e3SNaoya Horiguchi qp->prev = vma; 6426f4576e3SNaoya Horiguchi 643b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6442c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6454355c018SLiang Chen if (!is_vm_hugetlb_page(vma) && 6464355c018SLiang Chen (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && 6474355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 648b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6496f4576e3SNaoya Horiguchi return 1; 650b24f53a0SLee Schermerhorn } 651b24f53a0SLee Schermerhorn 6526f4576e3SNaoya Horiguchi /* queue pages from current vma */ 653a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 6546f4576e3SNaoya Horiguchi return 0; 6556f4576e3SNaoya Horiguchi return 1; 6566f4576e3SNaoya Horiguchi } 657b24f53a0SLee Schermerhorn 6587b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 6597b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 6607b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 6617b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 6627b86ac33SChristoph Hellwig }; 6637b86ac33SChristoph Hellwig 6646f4576e3SNaoya Horiguchi /* 6656f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 6666f4576e3SNaoya Horiguchi * 6676f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 6686f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 669d8835445SYang Shi * passed via @private. 670d8835445SYang Shi * 671d8835445SYang Shi * queue_pages_range() has three possible return values: 672d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 673d8835445SYang Shi * specified. 674d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 675*a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 676*a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 677*a85dfc30SYang Shi * your accessible address space (-EFAULT) 6786f4576e3SNaoya Horiguchi */ 6796f4576e3SNaoya Horiguchi static int 6806f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 6816f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 6826f4576e3SNaoya Horiguchi struct list_head *pagelist) 6836f4576e3SNaoya Horiguchi { 6846f4576e3SNaoya Horiguchi struct queue_pages qp = { 6856f4576e3SNaoya Horiguchi .pagelist = pagelist, 6866f4576e3SNaoya Horiguchi .flags = flags, 6876f4576e3SNaoya Horiguchi .nmask = nodes, 6886f4576e3SNaoya Horiguchi .prev = NULL, 6896f4576e3SNaoya Horiguchi }; 6906f4576e3SNaoya Horiguchi 6917b86ac33SChristoph Hellwig return walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 6921da177e4SLinus Torvalds } 6931da177e4SLinus Torvalds 694869833f2SKOSAKI Motohiro /* 695869833f2SKOSAKI Motohiro * Apply policy to a single VMA 696869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 697869833f2SKOSAKI Motohiro */ 698869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 699869833f2SKOSAKI Motohiro struct mempolicy *pol) 7008d34694cSKOSAKI Motohiro { 701869833f2SKOSAKI Motohiro int err; 702869833f2SKOSAKI Motohiro struct mempolicy *old; 703869833f2SKOSAKI Motohiro struct mempolicy *new; 7048d34694cSKOSAKI Motohiro 7058d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7068d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7078d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7088d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7098d34694cSKOSAKI Motohiro 710869833f2SKOSAKI Motohiro new = mpol_dup(pol); 711869833f2SKOSAKI Motohiro if (IS_ERR(new)) 712869833f2SKOSAKI Motohiro return PTR_ERR(new); 713869833f2SKOSAKI Motohiro 714869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7158d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 716869833f2SKOSAKI Motohiro if (err) 717869833f2SKOSAKI Motohiro goto err_out; 7188d34694cSKOSAKI Motohiro } 719869833f2SKOSAKI Motohiro 720869833f2SKOSAKI Motohiro old = vma->vm_policy; 721869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 722869833f2SKOSAKI Motohiro mpol_put(old); 723869833f2SKOSAKI Motohiro 724869833f2SKOSAKI Motohiro return 0; 725869833f2SKOSAKI Motohiro err_out: 726869833f2SKOSAKI Motohiro mpol_put(new); 7278d34694cSKOSAKI Motohiro return err; 7288d34694cSKOSAKI Motohiro } 7298d34694cSKOSAKI Motohiro 7301da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7319d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7329d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7331da177e4SLinus Torvalds { 7341da177e4SLinus Torvalds struct vm_area_struct *next; 7359d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7369d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7379d8cebd4SKOSAKI Motohiro int err = 0; 738e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7399d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7409d8cebd4SKOSAKI Motohiro unsigned long vmend; 7411da177e4SLinus Torvalds 742097d5910SLinus Torvalds vma = find_vma(mm, start); 7439d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 7449d8cebd4SKOSAKI Motohiro return -EFAULT; 7459d8cebd4SKOSAKI Motohiro 746097d5910SLinus Torvalds prev = vma->vm_prev; 747e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 748e26a5114SKOSAKI Motohiro prev = vma; 749e26a5114SKOSAKI Motohiro 7509d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 7511da177e4SLinus Torvalds next = vma->vm_next; 7529d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 7539d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 7549d8cebd4SKOSAKI Motohiro 755e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 756e26a5114SKOSAKI Motohiro continue; 757e26a5114SKOSAKI Motohiro 758e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 759e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 7609d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 761e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 76219a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 7639d8cebd4SKOSAKI Motohiro if (prev) { 7649d8cebd4SKOSAKI Motohiro vma = prev; 7659d8cebd4SKOSAKI Motohiro next = vma->vm_next; 7663964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 7679d8cebd4SKOSAKI Motohiro continue; 7683964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 7693964acd0SOleg Nesterov goto replace; 7701da177e4SLinus Torvalds } 7719d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 7729d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 7739d8cebd4SKOSAKI Motohiro if (err) 7749d8cebd4SKOSAKI Motohiro goto out; 7759d8cebd4SKOSAKI Motohiro } 7769d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 7779d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 7789d8cebd4SKOSAKI Motohiro if (err) 7799d8cebd4SKOSAKI Motohiro goto out; 7809d8cebd4SKOSAKI Motohiro } 7813964acd0SOleg Nesterov replace: 782869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 7839d8cebd4SKOSAKI Motohiro if (err) 7849d8cebd4SKOSAKI Motohiro goto out; 7859d8cebd4SKOSAKI Motohiro } 7869d8cebd4SKOSAKI Motohiro 7879d8cebd4SKOSAKI Motohiro out: 7881da177e4SLinus Torvalds return err; 7891da177e4SLinus Torvalds } 7901da177e4SLinus Torvalds 7911da177e4SLinus Torvalds /* Set the process memory policy */ 792028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 793028fec41SDavid Rientjes nodemask_t *nodes) 7941da177e4SLinus Torvalds { 79558568d2aSMiao Xie struct mempolicy *new, *old; 7964bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 79758568d2aSMiao Xie int ret; 7981da177e4SLinus Torvalds 7994bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8004bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 801f4e53d91SLee Schermerhorn 8024bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8034bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8044bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8054bfc4495SKAMEZAWA Hiroyuki goto out; 8064bfc4495SKAMEZAWA Hiroyuki } 8072c7c3a7dSOleg Nesterov 80858568d2aSMiao Xie task_lock(current); 8094bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 81058568d2aSMiao Xie if (ret) { 81158568d2aSMiao Xie task_unlock(current); 81258568d2aSMiao Xie mpol_put(new); 8134bfc4495SKAMEZAWA Hiroyuki goto out; 81458568d2aSMiao Xie } 81558568d2aSMiao Xie old = current->mempolicy; 8161da177e4SLinus Torvalds current->mempolicy = new; 81745816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 81845816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 81958568d2aSMiao Xie task_unlock(current); 82058568d2aSMiao Xie mpol_put(old); 8214bfc4495SKAMEZAWA Hiroyuki ret = 0; 8224bfc4495SKAMEZAWA Hiroyuki out: 8234bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8244bfc4495SKAMEZAWA Hiroyuki return ret; 8251da177e4SLinus Torvalds } 8261da177e4SLinus Torvalds 827bea904d5SLee Schermerhorn /* 828bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 82958568d2aSMiao Xie * 83058568d2aSMiao Xie * Called with task's alloc_lock held 831bea904d5SLee Schermerhorn */ 832bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8331da177e4SLinus Torvalds { 834dfcd3c0dSAndi Kleen nodes_clear(*nodes); 835bea904d5SLee Schermerhorn if (p == &default_policy) 836bea904d5SLee Schermerhorn return; 837bea904d5SLee Schermerhorn 83845c4745aSLee Schermerhorn switch (p->mode) { 83919770b32SMel Gorman case MPOL_BIND: 84019770b32SMel Gorman /* Fall through */ 8411da177e4SLinus Torvalds case MPOL_INTERLEAVE: 842dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 8431da177e4SLinus Torvalds break; 8441da177e4SLinus Torvalds case MPOL_PREFERRED: 845fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 846dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 84753f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 8481da177e4SLinus Torvalds break; 8491da177e4SLinus Torvalds default: 8501da177e4SLinus Torvalds BUG(); 8511da177e4SLinus Torvalds } 8521da177e4SLinus Torvalds } 8531da177e4SLinus Torvalds 8543b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 8551da177e4SLinus Torvalds { 8561da177e4SLinus Torvalds struct page *p; 8571da177e4SLinus Torvalds int err; 8581da177e4SLinus Torvalds 8593b9aadf7SAndrea Arcangeli int locked = 1; 8603b9aadf7SAndrea Arcangeli err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); 8611da177e4SLinus Torvalds if (err >= 0) { 8621da177e4SLinus Torvalds err = page_to_nid(p); 8631da177e4SLinus Torvalds put_page(p); 8641da177e4SLinus Torvalds } 8653b9aadf7SAndrea Arcangeli if (locked) 8663b9aadf7SAndrea Arcangeli up_read(&mm->mmap_sem); 8671da177e4SLinus Torvalds return err; 8681da177e4SLinus Torvalds } 8691da177e4SLinus Torvalds 8701da177e4SLinus Torvalds /* Retrieve NUMA policy */ 871dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8721da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8731da177e4SLinus Torvalds { 8748bccd85fSChristoph Lameter int err; 8751da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8761da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8773b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 8781da177e4SLinus Torvalds 879754af6f5SLee Schermerhorn if (flags & 880754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8811da177e4SLinus Torvalds return -EINVAL; 882754af6f5SLee Schermerhorn 883754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 884754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 885754af6f5SLee Schermerhorn return -EINVAL; 886754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 88758568d2aSMiao Xie task_lock(current); 888754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 88958568d2aSMiao Xie task_unlock(current); 890754af6f5SLee Schermerhorn return 0; 891754af6f5SLee Schermerhorn } 892754af6f5SLee Schermerhorn 8931da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 894bea904d5SLee Schermerhorn /* 895bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 896bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 897bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 898bea904d5SLee Schermerhorn */ 8991da177e4SLinus Torvalds down_read(&mm->mmap_sem); 9001da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 9011da177e4SLinus Torvalds if (!vma) { 9021da177e4SLinus Torvalds up_read(&mm->mmap_sem); 9031da177e4SLinus Torvalds return -EFAULT; 9041da177e4SLinus Torvalds } 9051da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9061da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9071da177e4SLinus Torvalds else 9081da177e4SLinus Torvalds pol = vma->vm_policy; 9091da177e4SLinus Torvalds } else if (addr) 9101da177e4SLinus Torvalds return -EINVAL; 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds if (!pol) 913bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9141da177e4SLinus Torvalds 9151da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9161da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9173b9aadf7SAndrea Arcangeli /* 9183b9aadf7SAndrea Arcangeli * Take a refcount on the mpol, lookup_node() 9193b9aadf7SAndrea Arcangeli * wil drop the mmap_sem, so after calling 9203b9aadf7SAndrea Arcangeli * lookup_node() only "pol" remains valid, "vma" 9213b9aadf7SAndrea Arcangeli * is stale. 9223b9aadf7SAndrea Arcangeli */ 9233b9aadf7SAndrea Arcangeli pol_refcount = pol; 9243b9aadf7SAndrea Arcangeli vma = NULL; 9253b9aadf7SAndrea Arcangeli mpol_get(pol); 9263b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9271da177e4SLinus Torvalds if (err < 0) 9281da177e4SLinus Torvalds goto out; 9298bccd85fSChristoph Lameter *policy = err; 9301da177e4SLinus Torvalds } else if (pol == current->mempolicy && 93145c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 93245816682SVlastimil Babka *policy = next_node_in(current->il_prev, pol->v.nodes); 9331da177e4SLinus Torvalds } else { 9341da177e4SLinus Torvalds err = -EINVAL; 9351da177e4SLinus Torvalds goto out; 9361da177e4SLinus Torvalds } 937bea904d5SLee Schermerhorn } else { 938bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 939bea904d5SLee Schermerhorn pol->mode; 940d79df630SDavid Rientjes /* 941d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 942d79df630SDavid Rientjes * the policy to userspace. 943d79df630SDavid Rientjes */ 944d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 945bea904d5SLee Schermerhorn } 9461da177e4SLinus Torvalds 9471da177e4SLinus Torvalds err = 0; 94858568d2aSMiao Xie if (nmask) { 949c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 950c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 951c6b6ef8bSLee Schermerhorn } else { 95258568d2aSMiao Xie task_lock(current); 953bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 95458568d2aSMiao Xie task_unlock(current); 95558568d2aSMiao Xie } 956c6b6ef8bSLee Schermerhorn } 9571da177e4SLinus Torvalds 9581da177e4SLinus Torvalds out: 95952cd3b07SLee Schermerhorn mpol_cond_put(pol); 9601da177e4SLinus Torvalds if (vma) 9613b9aadf7SAndrea Arcangeli up_read(&mm->mmap_sem); 9623b9aadf7SAndrea Arcangeli if (pol_refcount) 9633b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 9641da177e4SLinus Torvalds return err; 9651da177e4SLinus Torvalds } 9661da177e4SLinus Torvalds 967b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 9688bccd85fSChristoph Lameter /* 969c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 9706ce3c4c0SChristoph Lameter */ 971a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 972fc301289SChristoph Lameter unsigned long flags) 9736ce3c4c0SChristoph Lameter { 974c8633798SNaoya Horiguchi struct page *head = compound_head(page); 9756ce3c4c0SChristoph Lameter /* 976fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9776ce3c4c0SChristoph Lameter */ 978c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 979c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 980c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 981c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 982c8633798SNaoya Horiguchi NR_ISOLATED_ANON + page_is_file_cache(head), 983c8633798SNaoya Horiguchi hpage_nr_pages(head)); 984a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 985a53190a4SYang Shi /* 986a53190a4SYang Shi * Non-movable page may reach here. And, there may be 987a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 988a53190a4SYang Shi * Treat them as unmovable pages since they can't be 989a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 990a53190a4SYang Shi * should return -EIO for this case too. 991a53190a4SYang Shi */ 992a53190a4SYang Shi return -EIO; 99362695a84SNick Piggin } 99462695a84SNick Piggin } 995a53190a4SYang Shi 996a53190a4SYang Shi return 0; 9976ce3c4c0SChristoph Lameter } 9986ce3c4c0SChristoph Lameter 999a49bd4d7SMichal Hocko /* page allocation callback for NUMA node migration */ 1000666feb21SMichal Hocko struct page *alloc_new_node_page(struct page *page, unsigned long node) 100195a402c3SChristoph Lameter { 1002e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 1003e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 1004e2d8cf40SNaoya Horiguchi node); 100594723aafSMichal Hocko else if (PageTransHuge(page)) { 1006c8633798SNaoya Horiguchi struct page *thp; 1007c8633798SNaoya Horiguchi 1008c8633798SNaoya Horiguchi thp = alloc_pages_node(node, 1009c8633798SNaoya Horiguchi (GFP_TRANSHUGE | __GFP_THISNODE), 1010c8633798SNaoya Horiguchi HPAGE_PMD_ORDER); 1011c8633798SNaoya Horiguchi if (!thp) 1012c8633798SNaoya Horiguchi return NULL; 1013c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1014c8633798SNaoya Horiguchi return thp; 1015c8633798SNaoya Horiguchi } else 101696db800fSVlastimil Babka return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | 1017b360edb4SDavid Rientjes __GFP_THISNODE, 0); 101895a402c3SChristoph Lameter } 101995a402c3SChristoph Lameter 10206ce3c4c0SChristoph Lameter /* 10217e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10227e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10237e2ab150SChristoph Lameter */ 1024dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1025dbcb0f19SAdrian Bunk int flags) 10267e2ab150SChristoph Lameter { 10277e2ab150SChristoph Lameter nodemask_t nmask; 10287e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10297e2ab150SChristoph Lameter int err = 0; 10307e2ab150SChristoph Lameter 10317e2ab150SChristoph Lameter nodes_clear(nmask); 10327e2ab150SChristoph Lameter node_set(source, nmask); 10337e2ab150SChristoph Lameter 103408270807SMinchan Kim /* 103508270807SMinchan Kim * This does not "check" the range but isolates all pages that 103608270807SMinchan Kim * need migration. Between passing in the full user address 103708270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 103808270807SMinchan Kim */ 103908270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 104098094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10417e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10427e2ab150SChristoph Lameter 1043cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1044a49bd4d7SMichal Hocko err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest, 10459c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 1046cf608ac1SMinchan Kim if (err) 1047e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1048cf608ac1SMinchan Kim } 104995a402c3SChristoph Lameter 10507e2ab150SChristoph Lameter return err; 10517e2ab150SChristoph Lameter } 10527e2ab150SChristoph Lameter 10537e2ab150SChristoph Lameter /* 10547e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10557e2ab150SChristoph Lameter * layout as much as possible. 105639743889SChristoph Lameter * 105739743889SChristoph Lameter * Returns the number of page that could not be moved. 105839743889SChristoph Lameter */ 10590ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10600ce72d4fSAndrew Morton const nodemask_t *to, int flags) 106139743889SChristoph Lameter { 10627e2ab150SChristoph Lameter int busy = 0; 10630aedadf9SChristoph Lameter int err; 10647e2ab150SChristoph Lameter nodemask_t tmp; 106539743889SChristoph Lameter 10660aedadf9SChristoph Lameter err = migrate_prep(); 10670aedadf9SChristoph Lameter if (err) 10680aedadf9SChristoph Lameter return err; 10690aedadf9SChristoph Lameter 107039743889SChristoph Lameter down_read(&mm->mmap_sem); 1071d4984711SChristoph Lameter 10727e2ab150SChristoph Lameter /* 10737e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 10747e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 10757e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 10767e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 10777e2ab150SChristoph Lameter * 10787e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 10797e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 10807e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 10817e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 10827e2ab150SChristoph Lameter * 10837e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 10847e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 10857e2ab150SChristoph Lameter * (nothing left to migrate). 10867e2ab150SChristoph Lameter * 10877e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 10887e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 10897e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 10907e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 10917e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 10927e2ab150SChristoph Lameter * 10937e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 10947e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 10957e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 10967e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1097ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 10987e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 10997e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11007e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11017e2ab150SChristoph Lameter */ 11027e2ab150SChristoph Lameter 11030ce72d4fSAndrew Morton tmp = *from; 11047e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11057e2ab150SChristoph Lameter int s,d; 1106b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11077e2ab150SChristoph Lameter int dest = 0; 11087e2ab150SChristoph Lameter 11097e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11104a5b18ccSLarry Woodman 11114a5b18ccSLarry Woodman /* 11124a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11134a5b18ccSLarry Woodman * node relationship of the pages established between 11144a5b18ccSLarry Woodman * threads and memory areas. 11154a5b18ccSLarry Woodman * 11164a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11174a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11184a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11194a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11204a5b18ccSLarry Woodman * mask. 11214a5b18ccSLarry Woodman * 11224a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11234a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11244a5b18ccSLarry Woodman */ 11254a5b18ccSLarry Woodman 11260ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11270ce72d4fSAndrew Morton (node_isset(s, *to))) 11284a5b18ccSLarry Woodman continue; 11294a5b18ccSLarry Woodman 11300ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11317e2ab150SChristoph Lameter if (s == d) 11327e2ab150SChristoph Lameter continue; 11337e2ab150SChristoph Lameter 11347e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11357e2ab150SChristoph Lameter dest = d; 11367e2ab150SChristoph Lameter 11377e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11387e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11397e2ab150SChristoph Lameter break; 11407e2ab150SChristoph Lameter } 1141b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11427e2ab150SChristoph Lameter break; 11437e2ab150SChristoph Lameter 11447e2ab150SChristoph Lameter node_clear(source, tmp); 11457e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11467e2ab150SChristoph Lameter if (err > 0) 11477e2ab150SChristoph Lameter busy += err; 11487e2ab150SChristoph Lameter if (err < 0) 11497e2ab150SChristoph Lameter break; 115039743889SChristoph Lameter } 115139743889SChristoph Lameter up_read(&mm->mmap_sem); 11527e2ab150SChristoph Lameter if (err < 0) 11537e2ab150SChristoph Lameter return err; 11547e2ab150SChristoph Lameter return busy; 1155b20a3503SChristoph Lameter 115639743889SChristoph Lameter } 115739743889SChristoph Lameter 11583ad33b24SLee Schermerhorn /* 11593ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1160d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 11613ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11623ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11633ad33b24SLee Schermerhorn * is in virtual address order. 11643ad33b24SLee Schermerhorn */ 1165666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 116695a402c3SChristoph Lameter { 1167d05f0cdcSHugh Dickins struct vm_area_struct *vma; 11683ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 116995a402c3SChristoph Lameter 1170d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 11713ad33b24SLee Schermerhorn while (vma) { 11723ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 11733ad33b24SLee Schermerhorn if (address != -EFAULT) 11743ad33b24SLee Schermerhorn break; 11753ad33b24SLee Schermerhorn vma = vma->vm_next; 11763ad33b24SLee Schermerhorn } 11773ad33b24SLee Schermerhorn 117811c731e8SWanpeng Li if (PageHuge(page)) { 1179389c8178SMichal Hocko return alloc_huge_page_vma(page_hstate(compound_head(page)), 1180389c8178SMichal Hocko vma, address); 118194723aafSMichal Hocko } else if (PageTransHuge(page)) { 1182c8633798SNaoya Horiguchi struct page *thp; 1183c8633798SNaoya Horiguchi 118419deb769SDavid Rientjes thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 118519deb769SDavid Rientjes HPAGE_PMD_ORDER); 1186c8633798SNaoya Horiguchi if (!thp) 1187c8633798SNaoya Horiguchi return NULL; 1188c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1189c8633798SNaoya Horiguchi return thp; 119011c731e8SWanpeng Li } 119111c731e8SWanpeng Li /* 119211c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 119311c731e8SWanpeng Li */ 11940f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 11950f556856SMichal Hocko vma, address); 119695a402c3SChristoph Lameter } 1197b20a3503SChristoph Lameter #else 1198b20a3503SChristoph Lameter 1199a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1200b20a3503SChristoph Lameter unsigned long flags) 1201b20a3503SChristoph Lameter { 1202a53190a4SYang Shi return -EIO; 1203b20a3503SChristoph Lameter } 1204b20a3503SChristoph Lameter 12050ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12060ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1207b20a3503SChristoph Lameter { 1208b20a3503SChristoph Lameter return -ENOSYS; 1209b20a3503SChristoph Lameter } 121095a402c3SChristoph Lameter 1211666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 121295a402c3SChristoph Lameter { 121395a402c3SChristoph Lameter return NULL; 121495a402c3SChristoph Lameter } 1215b20a3503SChristoph Lameter #endif 1216b20a3503SChristoph Lameter 1217dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1218028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1219028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12206ce3c4c0SChristoph Lameter { 12216ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12226ce3c4c0SChristoph Lameter struct mempolicy *new; 12236ce3c4c0SChristoph Lameter unsigned long end; 12246ce3c4c0SChristoph Lameter int err; 1225d8835445SYang Shi int ret; 12266ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12276ce3c4c0SChristoph Lameter 1228b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12296ce3c4c0SChristoph Lameter return -EINVAL; 123074c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12316ce3c4c0SChristoph Lameter return -EPERM; 12326ce3c4c0SChristoph Lameter 12336ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12346ce3c4c0SChristoph Lameter return -EINVAL; 12356ce3c4c0SChristoph Lameter 12366ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12376ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12386ce3c4c0SChristoph Lameter 12396ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12406ce3c4c0SChristoph Lameter end = start + len; 12416ce3c4c0SChristoph Lameter 12426ce3c4c0SChristoph Lameter if (end < start) 12436ce3c4c0SChristoph Lameter return -EINVAL; 12446ce3c4c0SChristoph Lameter if (end == start) 12456ce3c4c0SChristoph Lameter return 0; 12466ce3c4c0SChristoph Lameter 1247028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12486ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12496ce3c4c0SChristoph Lameter return PTR_ERR(new); 12506ce3c4c0SChristoph Lameter 1251b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1252b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1253b24f53a0SLee Schermerhorn 12546ce3c4c0SChristoph Lameter /* 12556ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12566ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12576ce3c4c0SChristoph Lameter */ 12586ce3c4c0SChristoph Lameter if (!new) 12596ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12606ce3c4c0SChristoph Lameter 1261028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1262028fec41SDavid Rientjes start, start + len, mode, mode_flags, 126300ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12646ce3c4c0SChristoph Lameter 12650aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12660aedadf9SChristoph Lameter 12670aedadf9SChristoph Lameter err = migrate_prep(); 12680aedadf9SChristoph Lameter if (err) 1269b05ca738SKOSAKI Motohiro goto mpol_out; 12700aedadf9SChristoph Lameter } 12714bfc4495SKAMEZAWA Hiroyuki { 12724bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 12734bfc4495SKAMEZAWA Hiroyuki if (scratch) { 12746ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 127558568d2aSMiao Xie task_lock(current); 12764bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 127758568d2aSMiao Xie task_unlock(current); 12784bfc4495SKAMEZAWA Hiroyuki if (err) 127958568d2aSMiao Xie up_write(&mm->mmap_sem); 12804bfc4495SKAMEZAWA Hiroyuki } else 12814bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 12824bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 12834bfc4495SKAMEZAWA Hiroyuki } 1284b05ca738SKOSAKI Motohiro if (err) 1285b05ca738SKOSAKI Motohiro goto mpol_out; 1286b05ca738SKOSAKI Motohiro 1287d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 12886ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1289d8835445SYang Shi 1290d8835445SYang Shi if (ret < 0) { 1291*a85dfc30SYang Shi err = ret; 1292d8835445SYang Shi goto up_out; 1293d8835445SYang Shi } 1294d8835445SYang Shi 12959d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 12967e2ab150SChristoph Lameter 1297b24f53a0SLee Schermerhorn if (!err) { 1298b24f53a0SLee Schermerhorn int nr_failed = 0; 1299b24f53a0SLee Schermerhorn 1300cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1301b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1302d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1303d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1304cf608ac1SMinchan Kim if (nr_failed) 130574060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1306cf608ac1SMinchan Kim } 13076ce3c4c0SChristoph Lameter 1308d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13096ce3c4c0SChristoph Lameter err = -EIO; 1310*a85dfc30SYang Shi } else { 1311d8835445SYang Shi up_out: 1312*a85dfc30SYang Shi if (!list_empty(&pagelist)) 1313*a85dfc30SYang Shi putback_movable_pages(&pagelist); 1314*a85dfc30SYang Shi } 1315*a85dfc30SYang Shi 13166ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1317b05ca738SKOSAKI Motohiro mpol_out: 1318f0be3d32SLee Schermerhorn mpol_put(new); 13196ce3c4c0SChristoph Lameter return err; 13206ce3c4c0SChristoph Lameter } 13216ce3c4c0SChristoph Lameter 132239743889SChristoph Lameter /* 13238bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13248bccd85fSChristoph Lameter */ 13258bccd85fSChristoph Lameter 13268bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 132739743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13288bccd85fSChristoph Lameter unsigned long maxnode) 13298bccd85fSChristoph Lameter { 13308bccd85fSChristoph Lameter unsigned long k; 133156521e7aSYisheng Xie unsigned long t; 13328bccd85fSChristoph Lameter unsigned long nlongs; 13338bccd85fSChristoph Lameter unsigned long endmask; 13348bccd85fSChristoph Lameter 13358bccd85fSChristoph Lameter --maxnode; 13368bccd85fSChristoph Lameter nodes_clear(*nodes); 13378bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13388bccd85fSChristoph Lameter return 0; 1339a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1340636f13c1SChris Wright return -EINVAL; 13418bccd85fSChristoph Lameter 13428bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 13438bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 13448bccd85fSChristoph Lameter endmask = ~0UL; 13458bccd85fSChristoph Lameter else 13468bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 13478bccd85fSChristoph Lameter 134856521e7aSYisheng Xie /* 134956521e7aSYisheng Xie * When the user specified more nodes than supported just check 135056521e7aSYisheng Xie * if the non supported part is all zero. 135156521e7aSYisheng Xie * 135256521e7aSYisheng Xie * If maxnode have more longs than MAX_NUMNODES, check 135356521e7aSYisheng Xie * the bits in that area first. And then go through to 135456521e7aSYisheng Xie * check the rest bits which equal or bigger than MAX_NUMNODES. 135556521e7aSYisheng Xie * Otherwise, just check bits [MAX_NUMNODES, maxnode). 135656521e7aSYisheng Xie */ 13578bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 13588bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 13598bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 13608bccd85fSChristoph Lameter return -EFAULT; 13618bccd85fSChristoph Lameter if (k == nlongs - 1) { 13628bccd85fSChristoph Lameter if (t & endmask) 13638bccd85fSChristoph Lameter return -EINVAL; 13648bccd85fSChristoph Lameter } else if (t) 13658bccd85fSChristoph Lameter return -EINVAL; 13668bccd85fSChristoph Lameter } 13678bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 13688bccd85fSChristoph Lameter endmask = ~0UL; 13698bccd85fSChristoph Lameter } 13708bccd85fSChristoph Lameter 137156521e7aSYisheng Xie if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { 137256521e7aSYisheng Xie unsigned long valid_mask = endmask; 137356521e7aSYisheng Xie 137456521e7aSYisheng Xie valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 137556521e7aSYisheng Xie if (get_user(t, nmask + nlongs - 1)) 137656521e7aSYisheng Xie return -EFAULT; 137756521e7aSYisheng Xie if (t & valid_mask) 137856521e7aSYisheng Xie return -EINVAL; 137956521e7aSYisheng Xie } 138056521e7aSYisheng Xie 13818bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 13828bccd85fSChristoph Lameter return -EFAULT; 13838bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 13848bccd85fSChristoph Lameter return 0; 13858bccd85fSChristoph Lameter } 13868bccd85fSChristoph Lameter 13878bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 13888bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 13898bccd85fSChristoph Lameter nodemask_t *nodes) 13908bccd85fSChristoph Lameter { 13918bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1392050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 13938bccd85fSChristoph Lameter 13948bccd85fSChristoph Lameter if (copy > nbytes) { 13958bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 13968bccd85fSChristoph Lameter return -EINVAL; 13978bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 13988bccd85fSChristoph Lameter return -EFAULT; 13998bccd85fSChristoph Lameter copy = nbytes; 14008bccd85fSChristoph Lameter } 14018bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14028bccd85fSChristoph Lameter } 14038bccd85fSChristoph Lameter 1404e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1405e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1406e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14078bccd85fSChristoph Lameter { 14088bccd85fSChristoph Lameter nodemask_t nodes; 14098bccd85fSChristoph Lameter int err; 1410028fec41SDavid Rientjes unsigned short mode_flags; 14118bccd85fSChristoph Lameter 1412057d3389SAndrey Konovalov start = untagged_addr(start); 1413028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1414028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1415a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1416a3b51e01SDavid Rientjes return -EINVAL; 14174c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 14184c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 14194c50bc01SDavid Rientjes return -EINVAL; 14208bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14218bccd85fSChristoph Lameter if (err) 14228bccd85fSChristoph Lameter return err; 1423028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 14248bccd85fSChristoph Lameter } 14258bccd85fSChristoph Lameter 1426e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1427e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1428e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1429e7dc9ad6SDominik Brodowski { 1430e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1431e7dc9ad6SDominik Brodowski } 1432e7dc9ad6SDominik Brodowski 14338bccd85fSChristoph Lameter /* Set the process memory policy */ 1434af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1435af03c4acSDominik Brodowski unsigned long maxnode) 14368bccd85fSChristoph Lameter { 14378bccd85fSChristoph Lameter int err; 14388bccd85fSChristoph Lameter nodemask_t nodes; 1439028fec41SDavid Rientjes unsigned short flags; 14408bccd85fSChristoph Lameter 1441028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1442028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1443028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 14448bccd85fSChristoph Lameter return -EINVAL; 14454c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 14464c50bc01SDavid Rientjes return -EINVAL; 14478bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14488bccd85fSChristoph Lameter if (err) 14498bccd85fSChristoph Lameter return err; 1450028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 14518bccd85fSChristoph Lameter } 14528bccd85fSChristoph Lameter 1453af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1454af03c4acSDominik Brodowski unsigned long, maxnode) 1455af03c4acSDominik Brodowski { 1456af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1457af03c4acSDominik Brodowski } 1458af03c4acSDominik Brodowski 1459b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1460b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1461b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 146239743889SChristoph Lameter { 1463596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 146439743889SChristoph Lameter struct task_struct *task; 146539743889SChristoph Lameter nodemask_t task_nodes; 146639743889SChristoph Lameter int err; 1467596d7cfaSKOSAKI Motohiro nodemask_t *old; 1468596d7cfaSKOSAKI Motohiro nodemask_t *new; 1469596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 147039743889SChristoph Lameter 1471596d7cfaSKOSAKI Motohiro if (!scratch) 1472596d7cfaSKOSAKI Motohiro return -ENOMEM; 147339743889SChristoph Lameter 1474596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1475596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1476596d7cfaSKOSAKI Motohiro 1477596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 147839743889SChristoph Lameter if (err) 1479596d7cfaSKOSAKI Motohiro goto out; 1480596d7cfaSKOSAKI Motohiro 1481596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1482596d7cfaSKOSAKI Motohiro if (err) 1483596d7cfaSKOSAKI Motohiro goto out; 148439743889SChristoph Lameter 148539743889SChristoph Lameter /* Find the mm_struct */ 148655cfaa3cSZeng Zhaoming rcu_read_lock(); 1487228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 148839743889SChristoph Lameter if (!task) { 148955cfaa3cSZeng Zhaoming rcu_read_unlock(); 1490596d7cfaSKOSAKI Motohiro err = -ESRCH; 1491596d7cfaSKOSAKI Motohiro goto out; 149239743889SChristoph Lameter } 14933268c63eSChristoph Lameter get_task_struct(task); 149439743889SChristoph Lameter 1495596d7cfaSKOSAKI Motohiro err = -EINVAL; 149639743889SChristoph Lameter 149739743889SChristoph Lameter /* 149831367466SOtto Ebeling * Check if this process has the right to modify the specified process. 149931367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 150039743889SChristoph Lameter */ 150131367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1502c69e8d9cSDavid Howells rcu_read_unlock(); 150339743889SChristoph Lameter err = -EPERM; 15043268c63eSChristoph Lameter goto out_put; 150539743889SChristoph Lameter } 1506c69e8d9cSDavid Howells rcu_read_unlock(); 150739743889SChristoph Lameter 150839743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 150939743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1510596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 151139743889SChristoph Lameter err = -EPERM; 15123268c63eSChristoph Lameter goto out_put; 151339743889SChristoph Lameter } 151439743889SChristoph Lameter 15150486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 15160486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 15170486a38bSYisheng Xie if (nodes_empty(*new)) 15183268c63eSChristoph Lameter goto out_put; 15190486a38bSYisheng Xie 152086c3a764SDavid Quigley err = security_task_movememory(task); 152186c3a764SDavid Quigley if (err) 15223268c63eSChristoph Lameter goto out_put; 152386c3a764SDavid Quigley 15243268c63eSChristoph Lameter mm = get_task_mm(task); 15253268c63eSChristoph Lameter put_task_struct(task); 1526f2a9ef88SSasha Levin 1527f2a9ef88SSasha Levin if (!mm) { 1528f2a9ef88SSasha Levin err = -EINVAL; 1529f2a9ef88SSasha Levin goto out; 1530f2a9ef88SSasha Levin } 1531f2a9ef88SSasha Levin 1532596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 153374c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 15343268c63eSChristoph Lameter 153539743889SChristoph Lameter mmput(mm); 15363268c63eSChristoph Lameter out: 1537596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1538596d7cfaSKOSAKI Motohiro 153939743889SChristoph Lameter return err; 15403268c63eSChristoph Lameter 15413268c63eSChristoph Lameter out_put: 15423268c63eSChristoph Lameter put_task_struct(task); 15433268c63eSChristoph Lameter goto out; 15443268c63eSChristoph Lameter 154539743889SChristoph Lameter } 154639743889SChristoph Lameter 1547b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1548b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1549b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1550b6e9b0baSDominik Brodowski { 1551b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1552b6e9b0baSDominik Brodowski } 1553b6e9b0baSDominik Brodowski 155439743889SChristoph Lameter 15558bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1556af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1557af03c4acSDominik Brodowski unsigned long __user *nmask, 1558af03c4acSDominik Brodowski unsigned long maxnode, 1559af03c4acSDominik Brodowski unsigned long addr, 1560af03c4acSDominik Brodowski unsigned long flags) 15618bccd85fSChristoph Lameter { 1562dbcb0f19SAdrian Bunk int err; 1563dbcb0f19SAdrian Bunk int uninitialized_var(pval); 15648bccd85fSChristoph Lameter nodemask_t nodes; 15658bccd85fSChristoph Lameter 1566057d3389SAndrey Konovalov addr = untagged_addr(addr); 1567057d3389SAndrey Konovalov 1568050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 15698bccd85fSChristoph Lameter return -EINVAL; 15708bccd85fSChristoph Lameter 15718bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 15728bccd85fSChristoph Lameter 15738bccd85fSChristoph Lameter if (err) 15748bccd85fSChristoph Lameter return err; 15758bccd85fSChristoph Lameter 15768bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 15778bccd85fSChristoph Lameter return -EFAULT; 15788bccd85fSChristoph Lameter 15798bccd85fSChristoph Lameter if (nmask) 15808bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 15818bccd85fSChristoph Lameter 15828bccd85fSChristoph Lameter return err; 15838bccd85fSChristoph Lameter } 15848bccd85fSChristoph Lameter 1585af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1586af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1587af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1588af03c4acSDominik Brodowski { 1589af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1590af03c4acSDominik Brodowski } 1591af03c4acSDominik Brodowski 15921da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 15931da177e4SLinus Torvalds 1594c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1595c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1596c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1597c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 15981da177e4SLinus Torvalds { 15991da177e4SLinus Torvalds long err; 16001da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16011da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16021da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16031da177e4SLinus Torvalds 1604050c17f2SRalph Campbell nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); 16051da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16061da177e4SLinus Torvalds 16071da177e4SLinus Torvalds if (nmask) 16081da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 16091da177e4SLinus Torvalds 1610af03c4acSDominik Brodowski err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 16111da177e4SLinus Torvalds 16121da177e4SLinus Torvalds if (!err && nmask) { 16132bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 16142bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 16152bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 16161da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 16171da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 16181da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 16191da177e4SLinus Torvalds } 16201da177e4SLinus Torvalds 16211da177e4SLinus Torvalds return err; 16221da177e4SLinus Torvalds } 16231da177e4SLinus Torvalds 1624c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1625c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 16261da177e4SLinus Torvalds { 16271da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16281da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16291da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16301da177e4SLinus Torvalds 16311da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16321da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16331da177e4SLinus Torvalds 16341da177e4SLinus Torvalds if (nmask) { 1635cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 16361da177e4SLinus Torvalds return -EFAULT; 1637cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1638cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1639cf01fb99SChris Salls return -EFAULT; 1640cf01fb99SChris Salls } 16411da177e4SLinus Torvalds 1642af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nm, nr_bits+1); 16431da177e4SLinus Torvalds } 16441da177e4SLinus Torvalds 1645c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1646c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1647c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 16481da177e4SLinus Torvalds { 16491da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16501da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1651dfcd3c0dSAndi Kleen nodemask_t bm; 16521da177e4SLinus Torvalds 16531da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16541da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16551da177e4SLinus Torvalds 16561da177e4SLinus Torvalds if (nmask) { 1657cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 16581da177e4SLinus Torvalds return -EFAULT; 1659cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1660cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1661cf01fb99SChris Salls return -EFAULT; 1662cf01fb99SChris Salls } 16631da177e4SLinus Torvalds 1664e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); 16651da177e4SLinus Torvalds } 16661da177e4SLinus Torvalds 1667b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, 1668b6e9b0baSDominik Brodowski compat_ulong_t, maxnode, 1669b6e9b0baSDominik Brodowski const compat_ulong_t __user *, old_nodes, 1670b6e9b0baSDominik Brodowski const compat_ulong_t __user *, new_nodes) 1671b6e9b0baSDominik Brodowski { 1672b6e9b0baSDominik Brodowski unsigned long __user *old = NULL; 1673b6e9b0baSDominik Brodowski unsigned long __user *new = NULL; 1674b6e9b0baSDominik Brodowski nodemask_t tmp_mask; 1675b6e9b0baSDominik Brodowski unsigned long nr_bits; 1676b6e9b0baSDominik Brodowski unsigned long size; 1677b6e9b0baSDominik Brodowski 1678b6e9b0baSDominik Brodowski nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1679b6e9b0baSDominik Brodowski size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1680b6e9b0baSDominik Brodowski if (old_nodes) { 1681b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1682b6e9b0baSDominik Brodowski return -EFAULT; 1683b6e9b0baSDominik Brodowski old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1684b6e9b0baSDominik Brodowski if (new_nodes) 1685b6e9b0baSDominik Brodowski new = old + size / sizeof(unsigned long); 1686b6e9b0baSDominik Brodowski if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1687b6e9b0baSDominik Brodowski return -EFAULT; 1688b6e9b0baSDominik Brodowski } 1689b6e9b0baSDominik Brodowski if (new_nodes) { 1690b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1691b6e9b0baSDominik Brodowski return -EFAULT; 1692b6e9b0baSDominik Brodowski if (new == NULL) 1693b6e9b0baSDominik Brodowski new = compat_alloc_user_space(size); 1694b6e9b0baSDominik Brodowski if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1695b6e9b0baSDominik Brodowski return -EFAULT; 1696b6e9b0baSDominik Brodowski } 1697b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, nr_bits + 1, old, new); 1698b6e9b0baSDominik Brodowski } 1699b6e9b0baSDominik Brodowski 1700b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */ 17011da177e4SLinus Torvalds 170274d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 170374d2c3a0SOleg Nesterov unsigned long addr) 17041da177e4SLinus Torvalds { 17058d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17061da177e4SLinus Torvalds 17071da177e4SLinus Torvalds if (vma) { 1708480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17098d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 171000442ad0SMel Gorman } else if (vma->vm_policy) { 17111da177e4SLinus Torvalds pol = vma->vm_policy; 171200442ad0SMel Gorman 171300442ad0SMel Gorman /* 171400442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 171500442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 171600442ad0SMel Gorman * count on these policies which will be dropped by 171700442ad0SMel Gorman * mpol_cond_put() later 171800442ad0SMel Gorman */ 171900442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 172000442ad0SMel Gorman mpol_get(pol); 172100442ad0SMel Gorman } 17221da177e4SLinus Torvalds } 1723f15ca78eSOleg Nesterov 172474d2c3a0SOleg Nesterov return pol; 172574d2c3a0SOleg Nesterov } 172674d2c3a0SOleg Nesterov 172774d2c3a0SOleg Nesterov /* 1728dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 172974d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 173074d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 173174d2c3a0SOleg Nesterov * 173274d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1733dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 173474d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 173574d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 173674d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 173774d2c3a0SOleg Nesterov * extra reference for shared policies. 173874d2c3a0SOleg Nesterov */ 1739ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1740dd6eecb9SOleg Nesterov unsigned long addr) 174174d2c3a0SOleg Nesterov { 174274d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 174374d2c3a0SOleg Nesterov 17448d90274bSOleg Nesterov if (!pol) 1745dd6eecb9SOleg Nesterov pol = get_task_policy(current); 17468d90274bSOleg Nesterov 17471da177e4SLinus Torvalds return pol; 17481da177e4SLinus Torvalds } 17491da177e4SLinus Torvalds 17506b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1751fc314724SMel Gorman { 17526b6482bbSOleg Nesterov struct mempolicy *pol; 1753f15ca78eSOleg Nesterov 1754fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1755fc314724SMel Gorman bool ret = false; 1756fc314724SMel Gorman 1757fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1758fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1759fc314724SMel Gorman ret = true; 1760fc314724SMel Gorman mpol_cond_put(pol); 1761fc314724SMel Gorman 1762fc314724SMel Gorman return ret; 17638d90274bSOleg Nesterov } 17648d90274bSOleg Nesterov 1765fc314724SMel Gorman pol = vma->vm_policy; 17668d90274bSOleg Nesterov if (!pol) 17676b6482bbSOleg Nesterov pol = get_task_policy(current); 1768fc314724SMel Gorman 1769fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1770fc314724SMel Gorman } 1771fc314724SMel Gorman 1772d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1773d3eb1570SLai Jiangshan { 1774d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1775d3eb1570SLai Jiangshan 1776d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1777d3eb1570SLai Jiangshan 1778d3eb1570SLai Jiangshan /* 1779d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1780d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1781d3eb1570SLai Jiangshan * 1782d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1783d3eb1570SLai Jiangshan * so if the following test faile, it implies 1784d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1785d3eb1570SLai Jiangshan */ 1786d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1787d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1788d3eb1570SLai Jiangshan 1789d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1790d3eb1570SLai Jiangshan } 1791d3eb1570SLai Jiangshan 179252cd3b07SLee Schermerhorn /* 179352cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 179452cd3b07SLee Schermerhorn * page allocation 179552cd3b07SLee Schermerhorn */ 179652cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 179719770b32SMel Gorman { 179819770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 179945c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1800d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 180119770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 180219770b32SMel Gorman return &policy->v.nodes; 180319770b32SMel Gorman 180419770b32SMel Gorman return NULL; 180519770b32SMel Gorman } 180619770b32SMel Gorman 180704ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */ 180804ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy, 18092f5f9486SAndi Kleen int nd) 18101da177e4SLinus Torvalds { 18116d840958SMichal Hocko if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) 18121da177e4SLinus Torvalds nd = policy->v.preferred_node; 18136d840958SMichal Hocko else { 181419770b32SMel Gorman /* 18156d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18166d840958SMichal Hocko * because we might easily break the expectation to stay on the 18176d840958SMichal Hocko * requested node and not break the policy. 181819770b32SMel Gorman */ 18196d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 18201da177e4SLinus Torvalds } 18216d840958SMichal Hocko 182204ec6264SVlastimil Babka return nd; 18231da177e4SLinus Torvalds } 18241da177e4SLinus Torvalds 18251da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 18261da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 18271da177e4SLinus Torvalds { 182845816682SVlastimil Babka unsigned next; 18291da177e4SLinus Torvalds struct task_struct *me = current; 18301da177e4SLinus Torvalds 183145816682SVlastimil Babka next = next_node_in(me->il_prev, policy->v.nodes); 1832f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 183345816682SVlastimil Babka me->il_prev = next; 183445816682SVlastimil Babka return next; 18351da177e4SLinus Torvalds } 18361da177e4SLinus Torvalds 1837dc85da15SChristoph Lameter /* 1838dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1839dc85da15SChristoph Lameter * next slab entry. 1840dc85da15SChristoph Lameter */ 18412a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1842dc85da15SChristoph Lameter { 1843e7b691b0SAndi Kleen struct mempolicy *policy; 18442a389610SDavid Rientjes int node = numa_mem_id(); 1845e7b691b0SAndi Kleen 1846e7b691b0SAndi Kleen if (in_interrupt()) 18472a389610SDavid Rientjes return node; 1848e7b691b0SAndi Kleen 1849e7b691b0SAndi Kleen policy = current->mempolicy; 1850fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 18512a389610SDavid Rientjes return node; 1852765c4507SChristoph Lameter 1853bea904d5SLee Schermerhorn switch (policy->mode) { 1854bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1855fc36b8d3SLee Schermerhorn /* 1856fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1857fc36b8d3SLee Schermerhorn */ 1858bea904d5SLee Schermerhorn return policy->v.preferred_node; 1859bea904d5SLee Schermerhorn 1860dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1861dc85da15SChristoph Lameter return interleave_nodes(policy); 1862dc85da15SChristoph Lameter 1863dd1a239fSMel Gorman case MPOL_BIND: { 1864c33d6c06SMel Gorman struct zoneref *z; 1865c33d6c06SMel Gorman 1866dc85da15SChristoph Lameter /* 1867dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1868dc85da15SChristoph Lameter * first node. 1869dc85da15SChristoph Lameter */ 187019770b32SMel Gorman struct zonelist *zonelist; 187119770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1872c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1873c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1874c33d6c06SMel Gorman &policy->v.nodes); 1875c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1876dd1a239fSMel Gorman } 1877dc85da15SChristoph Lameter 1878dc85da15SChristoph Lameter default: 1879bea904d5SLee Schermerhorn BUG(); 1880dc85da15SChristoph Lameter } 1881dc85da15SChristoph Lameter } 1882dc85da15SChristoph Lameter 1883fee83b3aSAndrew Morton /* 1884fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1885fee83b3aSAndrew Morton * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1886fee83b3aSAndrew Morton * number of present nodes. 1887fee83b3aSAndrew Morton */ 188898c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 18891da177e4SLinus Torvalds { 1890dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1891f5b087b5SDavid Rientjes unsigned target; 1892fee83b3aSAndrew Morton int i; 1893fee83b3aSAndrew Morton int nid; 18941da177e4SLinus Torvalds 1895f5b087b5SDavid Rientjes if (!nnodes) 1896f5b087b5SDavid Rientjes return numa_node_id(); 1897fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1898fee83b3aSAndrew Morton nid = first_node(pol->v.nodes); 1899fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1900dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 19011da177e4SLinus Torvalds return nid; 19021da177e4SLinus Torvalds } 19031da177e4SLinus Torvalds 19045da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19055da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19065da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19075da7ca86SChristoph Lameter { 19085da7ca86SChristoph Lameter if (vma) { 19095da7ca86SChristoph Lameter unsigned long off; 19105da7ca86SChristoph Lameter 19113b98b087SNishanth Aravamudan /* 19123b98b087SNishanth Aravamudan * for small pages, there is no difference between 19133b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 19143b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 19153b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 19163b98b087SNishanth Aravamudan * a useful offset. 19173b98b087SNishanth Aravamudan */ 19183b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 19193b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 19205da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 192198c70baaSLaurent Dufour return offset_il_node(pol, off); 19225da7ca86SChristoph Lameter } else 19235da7ca86SChristoph Lameter return interleave_nodes(pol); 19245da7ca86SChristoph Lameter } 19255da7ca86SChristoph Lameter 192600ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1927480eccf9SLee Schermerhorn /* 192804ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 1929b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1930b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1931b46e14acSFabian Frederick * @gfp_flags: for requested zone 1932b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1933b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1934480eccf9SLee Schermerhorn * 193504ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 193652cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 193752cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 193852cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1939c0ff7453SMiao Xie * 1940d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 1941480eccf9SLee Schermerhorn */ 194204ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 194304ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 19445da7ca86SChristoph Lameter { 194504ec6264SVlastimil Babka int nid; 19465da7ca86SChristoph Lameter 1947dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 194819770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 19495da7ca86SChristoph Lameter 195052cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 195104ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 195204ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 195352cd3b07SLee Schermerhorn } else { 195404ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 195552cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 195652cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1957480eccf9SLee Schermerhorn } 195804ec6264SVlastimil Babka return nid; 19595da7ca86SChristoph Lameter } 196006808b08SLee Schermerhorn 196106808b08SLee Schermerhorn /* 196206808b08SLee Schermerhorn * init_nodemask_of_mempolicy 196306808b08SLee Schermerhorn * 196406808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 196506808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 196606808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 196706808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 196806808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 196906808b08SLee Schermerhorn * of non-default mempolicy. 197006808b08SLee Schermerhorn * 197106808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 197206808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 197306808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 197406808b08SLee Schermerhorn * 197506808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 197606808b08SLee Schermerhorn */ 197706808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 197806808b08SLee Schermerhorn { 197906808b08SLee Schermerhorn struct mempolicy *mempolicy; 198006808b08SLee Schermerhorn int nid; 198106808b08SLee Schermerhorn 198206808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 198306808b08SLee Schermerhorn return false; 198406808b08SLee Schermerhorn 1985c0ff7453SMiao Xie task_lock(current); 198606808b08SLee Schermerhorn mempolicy = current->mempolicy; 198706808b08SLee Schermerhorn switch (mempolicy->mode) { 198806808b08SLee Schermerhorn case MPOL_PREFERRED: 198906808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 199006808b08SLee Schermerhorn nid = numa_node_id(); 199106808b08SLee Schermerhorn else 199206808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 199306808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 199406808b08SLee Schermerhorn break; 199506808b08SLee Schermerhorn 199606808b08SLee Schermerhorn case MPOL_BIND: 199706808b08SLee Schermerhorn /* Fall through */ 199806808b08SLee Schermerhorn case MPOL_INTERLEAVE: 199906808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 200006808b08SLee Schermerhorn break; 200106808b08SLee Schermerhorn 200206808b08SLee Schermerhorn default: 200306808b08SLee Schermerhorn BUG(); 200406808b08SLee Schermerhorn } 2005c0ff7453SMiao Xie task_unlock(current); 200606808b08SLee Schermerhorn 200706808b08SLee Schermerhorn return true; 200806808b08SLee Schermerhorn } 200900ac59adSChen, Kenneth W #endif 20105da7ca86SChristoph Lameter 20116f48d0ebSDavid Rientjes /* 20126f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 20136f48d0ebSDavid Rientjes * 20146f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 20156f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 20166f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 20176f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 20186f48d0ebSDavid Rientjes * 20196f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 20206f48d0ebSDavid Rientjes */ 20216f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 20226f48d0ebSDavid Rientjes const nodemask_t *mask) 20236f48d0ebSDavid Rientjes { 20246f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 20256f48d0ebSDavid Rientjes bool ret = true; 20266f48d0ebSDavid Rientjes 20276f48d0ebSDavid Rientjes if (!mask) 20286f48d0ebSDavid Rientjes return ret; 20296f48d0ebSDavid Rientjes task_lock(tsk); 20306f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 20316f48d0ebSDavid Rientjes if (!mempolicy) 20326f48d0ebSDavid Rientjes goto out; 20336f48d0ebSDavid Rientjes 20346f48d0ebSDavid Rientjes switch (mempolicy->mode) { 20356f48d0ebSDavid Rientjes case MPOL_PREFERRED: 20366f48d0ebSDavid Rientjes /* 20376f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 20386f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 20396f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 20406f48d0ebSDavid Rientjes * nodes in mask. 20416f48d0ebSDavid Rientjes */ 20426f48d0ebSDavid Rientjes break; 20436f48d0ebSDavid Rientjes case MPOL_BIND: 20446f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 20456f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 20466f48d0ebSDavid Rientjes break; 20476f48d0ebSDavid Rientjes default: 20486f48d0ebSDavid Rientjes BUG(); 20496f48d0ebSDavid Rientjes } 20506f48d0ebSDavid Rientjes out: 20516f48d0ebSDavid Rientjes task_unlock(tsk); 20526f48d0ebSDavid Rientjes return ret; 20536f48d0ebSDavid Rientjes } 20546f48d0ebSDavid Rientjes 20551da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 20561da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2057662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2058662f3a0bSAndi Kleen unsigned nid) 20591da177e4SLinus Torvalds { 20601da177e4SLinus Torvalds struct page *page; 20611da177e4SLinus Torvalds 206204ec6264SVlastimil Babka page = __alloc_pages(gfp, order, nid); 20634518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 20644518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 20654518085eSKemi Wang return page; 2066de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2067de55c8b2SAndrey Ryabinin preempt_disable(); 2068de55c8b2SAndrey Ryabinin __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT); 2069de55c8b2SAndrey Ryabinin preempt_enable(); 2070de55c8b2SAndrey Ryabinin } 20711da177e4SLinus Torvalds return page; 20721da177e4SLinus Torvalds } 20731da177e4SLinus Torvalds 20741da177e4SLinus Torvalds /** 20750bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 20761da177e4SLinus Torvalds * 20771da177e4SLinus Torvalds * @gfp: 20781da177e4SLinus Torvalds * %GFP_USER user allocation. 20791da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 20801da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 20811da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 20821da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 20831da177e4SLinus Torvalds * 20840bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 20851da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 20861da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 2087be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 208819deb769SDavid Rientjes * @hugepage: for hugepages try only the preferred node if possible 20891da177e4SLinus Torvalds * 20901da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 20911da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 20921da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 20931da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 2094be97a41bSVlastimil Babka * all allocations for pages that will be mapped into user space. Returns 2095be97a41bSVlastimil Babka * NULL when no page can be allocated. 20961da177e4SLinus Torvalds */ 20971da177e4SLinus Torvalds struct page * 20980bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 209919deb769SDavid Rientjes unsigned long addr, int node, bool hugepage) 21001da177e4SLinus Torvalds { 2101cc9a6c87SMel Gorman struct mempolicy *pol; 2102c0ff7453SMiao Xie struct page *page; 210304ec6264SVlastimil Babka int preferred_nid; 2104be97a41bSVlastimil Babka nodemask_t *nmask; 21051da177e4SLinus Torvalds 2106dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2107cc9a6c87SMel Gorman 2108be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 21091da177e4SLinus Torvalds unsigned nid; 21105da7ca86SChristoph Lameter 21118eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 211252cd3b07SLee Schermerhorn mpol_cond_put(pol); 21130bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2114be97a41bSVlastimil Babka goto out; 21151da177e4SLinus Torvalds } 21161da177e4SLinus Torvalds 211719deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 211819deb769SDavid Rientjes int hpage_node = node; 211919deb769SDavid Rientjes 212019deb769SDavid Rientjes /* 212119deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 212219deb769SDavid Rientjes * allows the current node (or other explicitly preferred 212319deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 212419deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 212519deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 212619deb769SDavid Rientjes * 212719deb769SDavid Rientjes * If the policy is interleave, or does not allow the current 212819deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 212919deb769SDavid Rientjes */ 213019deb769SDavid Rientjes if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) 213119deb769SDavid Rientjes hpage_node = pol->v.preferred_node; 213219deb769SDavid Rientjes 213319deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 213419deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 213519deb769SDavid Rientjes mpol_cond_put(pol); 213619deb769SDavid Rientjes page = __alloc_pages_node(hpage_node, 213719deb769SDavid Rientjes gfp | __GFP_THISNODE, order); 213876e654ccSDavid Rientjes 213976e654ccSDavid Rientjes /* 214076e654ccSDavid Rientjes * If hugepage allocations are configured to always 214176e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 214276e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 214376e654ccSDavid Rientjes * memory as well. 214476e654ccSDavid Rientjes */ 214576e654ccSDavid Rientjes if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 214676e654ccSDavid Rientjes page = __alloc_pages_node(hpage_node, 214776e654ccSDavid Rientjes gfp | __GFP_NORETRY, order); 214876e654ccSDavid Rientjes 214919deb769SDavid Rientjes goto out; 215019deb769SDavid Rientjes } 215119deb769SDavid Rientjes } 215219deb769SDavid Rientjes 2153077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 215404ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 215504ec6264SVlastimil Babka page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); 2156d51e9894SVlastimil Babka mpol_cond_put(pol); 2157be97a41bSVlastimil Babka out: 2158077fcf11SAneesh Kumar K.V return page; 2159077fcf11SAneesh Kumar K.V } 216069262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma); 2161077fcf11SAneesh Kumar K.V 21621da177e4SLinus Torvalds /** 21631da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 21641da177e4SLinus Torvalds * 21651da177e4SLinus Torvalds * @gfp: 21661da177e4SLinus Torvalds * %GFP_USER user allocation, 21671da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 21681da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 21691da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 21701da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 21711da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 21721da177e4SLinus Torvalds * 21731da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 21741da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 21751da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 21761da177e4SLinus Torvalds */ 2177dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 21781da177e4SLinus Torvalds { 21798d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2180c0ff7453SMiao Xie struct page *page; 21811da177e4SLinus Torvalds 21828d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 21838d90274bSOleg Nesterov pol = get_task_policy(current); 218452cd3b07SLee Schermerhorn 218552cd3b07SLee Schermerhorn /* 218652cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 218752cd3b07SLee Schermerhorn * nor system default_policy 218852cd3b07SLee Schermerhorn */ 218945c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2190c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2191c0ff7453SMiao Xie else 2192c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 219304ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 21945c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2195cc9a6c87SMel Gorman 2196c0ff7453SMiao Xie return page; 21971da177e4SLinus Torvalds } 21981da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 21991da177e4SLinus Torvalds 2200ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2201ef0855d3SOleg Nesterov { 2202ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2203ef0855d3SOleg Nesterov 2204ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2205ef0855d3SOleg Nesterov return PTR_ERR(pol); 2206ef0855d3SOleg Nesterov dst->vm_policy = pol; 2207ef0855d3SOleg Nesterov return 0; 2208ef0855d3SOleg Nesterov } 2209ef0855d3SOleg Nesterov 22104225399aSPaul Jackson /* 2211846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 22124225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 22134225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 22144225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 22154225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2216708c1bbcSMiao Xie * 2217708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2218708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 22194225399aSPaul Jackson */ 22204225399aSPaul Jackson 2221846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2222846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 22231da177e4SLinus Torvalds { 22241da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 22251da177e4SLinus Torvalds 22261da177e4SLinus Torvalds if (!new) 22271da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2228708c1bbcSMiao Xie 2229708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2230708c1bbcSMiao Xie if (old == current->mempolicy) { 2231708c1bbcSMiao Xie task_lock(current); 2232708c1bbcSMiao Xie *new = *old; 2233708c1bbcSMiao Xie task_unlock(current); 2234708c1bbcSMiao Xie } else 2235708c1bbcSMiao Xie *new = *old; 2236708c1bbcSMiao Xie 22374225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 22384225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2239213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 22404225399aSPaul Jackson } 22411da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 22421da177e4SLinus Torvalds return new; 22431da177e4SLinus Torvalds } 22441da177e4SLinus Torvalds 22451da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2246fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 22471da177e4SLinus Torvalds { 22481da177e4SLinus Torvalds if (!a || !b) 2249fcfb4dccSKOSAKI Motohiro return false; 225045c4745aSLee Schermerhorn if (a->mode != b->mode) 2251fcfb4dccSKOSAKI Motohiro return false; 225219800502SBob Liu if (a->flags != b->flags) 2253fcfb4dccSKOSAKI Motohiro return false; 225419800502SBob Liu if (mpol_store_user_nodemask(a)) 225519800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2256fcfb4dccSKOSAKI Motohiro return false; 225719800502SBob Liu 225845c4745aSLee Schermerhorn switch (a->mode) { 225919770b32SMel Gorman case MPOL_BIND: 226019770b32SMel Gorman /* Fall through */ 22611da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2262fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 22631da177e4SLinus Torvalds case MPOL_PREFERRED: 22648970a63eSYisheng Xie /* a's ->flags is the same as b's */ 22658970a63eSYisheng Xie if (a->flags & MPOL_F_LOCAL) 22668970a63eSYisheng Xie return true; 226775719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 22681da177e4SLinus Torvalds default: 22691da177e4SLinus Torvalds BUG(); 2270fcfb4dccSKOSAKI Motohiro return false; 22711da177e4SLinus Torvalds } 22721da177e4SLinus Torvalds } 22731da177e4SLinus Torvalds 22741da177e4SLinus Torvalds /* 22751da177e4SLinus Torvalds * Shared memory backing store policy support. 22761da177e4SLinus Torvalds * 22771da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 22781da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 22794a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 22801da177e4SLinus Torvalds * for any accesses to the tree. 22811da177e4SLinus Torvalds */ 22821da177e4SLinus Torvalds 22834a8c7bb5SNathan Zimmer /* 22844a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 22854a8c7bb5SNathan Zimmer * reading or for writing 22864a8c7bb5SNathan Zimmer */ 22871da177e4SLinus Torvalds static struct sp_node * 22881da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 22891da177e4SLinus Torvalds { 22901da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 22911da177e4SLinus Torvalds 22921da177e4SLinus Torvalds while (n) { 22931da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 22941da177e4SLinus Torvalds 22951da177e4SLinus Torvalds if (start >= p->end) 22961da177e4SLinus Torvalds n = n->rb_right; 22971da177e4SLinus Torvalds else if (end <= p->start) 22981da177e4SLinus Torvalds n = n->rb_left; 22991da177e4SLinus Torvalds else 23001da177e4SLinus Torvalds break; 23011da177e4SLinus Torvalds } 23021da177e4SLinus Torvalds if (!n) 23031da177e4SLinus Torvalds return NULL; 23041da177e4SLinus Torvalds for (;;) { 23051da177e4SLinus Torvalds struct sp_node *w = NULL; 23061da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 23071da177e4SLinus Torvalds if (!prev) 23081da177e4SLinus Torvalds break; 23091da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 23101da177e4SLinus Torvalds if (w->end <= start) 23111da177e4SLinus Torvalds break; 23121da177e4SLinus Torvalds n = prev; 23131da177e4SLinus Torvalds } 23141da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 23151da177e4SLinus Torvalds } 23161da177e4SLinus Torvalds 23174a8c7bb5SNathan Zimmer /* 23184a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 23194a8c7bb5SNathan Zimmer * writing. 23204a8c7bb5SNathan Zimmer */ 23211da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 23221da177e4SLinus Torvalds { 23231da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 23241da177e4SLinus Torvalds struct rb_node *parent = NULL; 23251da177e4SLinus Torvalds struct sp_node *nd; 23261da177e4SLinus Torvalds 23271da177e4SLinus Torvalds while (*p) { 23281da177e4SLinus Torvalds parent = *p; 23291da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 23301da177e4SLinus Torvalds if (new->start < nd->start) 23311da177e4SLinus Torvalds p = &(*p)->rb_left; 23321da177e4SLinus Torvalds else if (new->end > nd->end) 23331da177e4SLinus Torvalds p = &(*p)->rb_right; 23341da177e4SLinus Torvalds else 23351da177e4SLinus Torvalds BUG(); 23361da177e4SLinus Torvalds } 23371da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 23381da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2339140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 234045c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 23411da177e4SLinus Torvalds } 23421da177e4SLinus Torvalds 23431da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 23441da177e4SLinus Torvalds struct mempolicy * 23451da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 23461da177e4SLinus Torvalds { 23471da177e4SLinus Torvalds struct mempolicy *pol = NULL; 23481da177e4SLinus Torvalds struct sp_node *sn; 23491da177e4SLinus Torvalds 23501da177e4SLinus Torvalds if (!sp->root.rb_node) 23511da177e4SLinus Torvalds return NULL; 23524a8c7bb5SNathan Zimmer read_lock(&sp->lock); 23531da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 23541da177e4SLinus Torvalds if (sn) { 23551da177e4SLinus Torvalds mpol_get(sn->policy); 23561da177e4SLinus Torvalds pol = sn->policy; 23571da177e4SLinus Torvalds } 23584a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 23591da177e4SLinus Torvalds return pol; 23601da177e4SLinus Torvalds } 23611da177e4SLinus Torvalds 236263f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 236363f74ca2SKOSAKI Motohiro { 236463f74ca2SKOSAKI Motohiro mpol_put(n->policy); 236563f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 236663f74ca2SKOSAKI Motohiro } 236763f74ca2SKOSAKI Motohiro 2368771fb4d8SLee Schermerhorn /** 2369771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2370771fb4d8SLee Schermerhorn * 2371b46e14acSFabian Frederick * @page: page to be checked 2372b46e14acSFabian Frederick * @vma: vm area where page mapped 2373b46e14acSFabian Frederick * @addr: virtual address where page mapped 2374771fb4d8SLee Schermerhorn * 2375771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2376771fb4d8SLee Schermerhorn * node id. 2377771fb4d8SLee Schermerhorn * 2378771fb4d8SLee Schermerhorn * Returns: 2379771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2380771fb4d8SLee Schermerhorn * node - node id where the page should be 2381771fb4d8SLee Schermerhorn * 2382771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2383771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2384771fb4d8SLee Schermerhorn */ 2385771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2386771fb4d8SLee Schermerhorn { 2387771fb4d8SLee Schermerhorn struct mempolicy *pol; 2388c33d6c06SMel Gorman struct zoneref *z; 2389771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2390771fb4d8SLee Schermerhorn unsigned long pgoff; 239190572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 239290572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 239398fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2394771fb4d8SLee Schermerhorn int ret = -1; 2395771fb4d8SLee Schermerhorn 2396dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2397771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2398771fb4d8SLee Schermerhorn goto out; 2399771fb4d8SLee Schermerhorn 2400771fb4d8SLee Schermerhorn switch (pol->mode) { 2401771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2402771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2403771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 240498c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2405771fb4d8SLee Schermerhorn break; 2406771fb4d8SLee Schermerhorn 2407771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2408771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2409771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2410771fb4d8SLee Schermerhorn else 2411771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2412771fb4d8SLee Schermerhorn break; 2413771fb4d8SLee Schermerhorn 2414771fb4d8SLee Schermerhorn case MPOL_BIND: 2415c33d6c06SMel Gorman 2416771fb4d8SLee Schermerhorn /* 2417771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2418771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2419771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2420771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2421771fb4d8SLee Schermerhorn */ 2422771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2423771fb4d8SLee Schermerhorn goto out; 2424c33d6c06SMel Gorman z = first_zones_zonelist( 2425771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2426771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2427c33d6c06SMel Gorman &pol->v.nodes); 2428c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2429771fb4d8SLee Schermerhorn break; 2430771fb4d8SLee Schermerhorn 2431771fb4d8SLee Schermerhorn default: 2432771fb4d8SLee Schermerhorn BUG(); 2433771fb4d8SLee Schermerhorn } 24345606e387SMel Gorman 24355606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2436e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 243790572890SPeter Zijlstra polnid = thisnid; 24385606e387SMel Gorman 243910f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2440de1c9ce6SRik van Riel goto out; 2441de1c9ce6SRik van Riel } 2442e42c8ff2SMel Gorman 2443771fb4d8SLee Schermerhorn if (curnid != polnid) 2444771fb4d8SLee Schermerhorn ret = polnid; 2445771fb4d8SLee Schermerhorn out: 2446771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2447771fb4d8SLee Schermerhorn 2448771fb4d8SLee Schermerhorn return ret; 2449771fb4d8SLee Schermerhorn } 2450771fb4d8SLee Schermerhorn 2451c11600e4SDavid Rientjes /* 2452c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2453c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2454c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2455c11600e4SDavid Rientjes * policy. 2456c11600e4SDavid Rientjes */ 2457c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2458c11600e4SDavid Rientjes { 2459c11600e4SDavid Rientjes struct mempolicy *pol; 2460c11600e4SDavid Rientjes 2461c11600e4SDavid Rientjes task_lock(task); 2462c11600e4SDavid Rientjes pol = task->mempolicy; 2463c11600e4SDavid Rientjes task->mempolicy = NULL; 2464c11600e4SDavid Rientjes task_unlock(task); 2465c11600e4SDavid Rientjes mpol_put(pol); 2466c11600e4SDavid Rientjes } 2467c11600e4SDavid Rientjes 24681da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 24691da177e4SLinus Torvalds { 2470140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 24711da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 247263f74ca2SKOSAKI Motohiro sp_free(n); 24731da177e4SLinus Torvalds } 24741da177e4SLinus Torvalds 247542288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 247642288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 247742288fe3SMel Gorman { 247842288fe3SMel Gorman node->start = start; 247942288fe3SMel Gorman node->end = end; 248042288fe3SMel Gorman node->policy = pol; 248142288fe3SMel Gorman } 248242288fe3SMel Gorman 2483dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2484dbcb0f19SAdrian Bunk struct mempolicy *pol) 24851da177e4SLinus Torvalds { 2486869833f2SKOSAKI Motohiro struct sp_node *n; 2487869833f2SKOSAKI Motohiro struct mempolicy *newpol; 24881da177e4SLinus Torvalds 2489869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 24901da177e4SLinus Torvalds if (!n) 24911da177e4SLinus Torvalds return NULL; 2492869833f2SKOSAKI Motohiro 2493869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2494869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2495869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2496869833f2SKOSAKI Motohiro return NULL; 2497869833f2SKOSAKI Motohiro } 2498869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 249942288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2500869833f2SKOSAKI Motohiro 25011da177e4SLinus Torvalds return n; 25021da177e4SLinus Torvalds } 25031da177e4SLinus Torvalds 25041da177e4SLinus Torvalds /* Replace a policy range. */ 25051da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 25061da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 25071da177e4SLinus Torvalds { 2508b22d127aSMel Gorman struct sp_node *n; 250942288fe3SMel Gorman struct sp_node *n_new = NULL; 251042288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2511b22d127aSMel Gorman int ret = 0; 25121da177e4SLinus Torvalds 251342288fe3SMel Gorman restart: 25144a8c7bb5SNathan Zimmer write_lock(&sp->lock); 25151da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 25161da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 25171da177e4SLinus Torvalds while (n && n->start < end) { 25181da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 25191da177e4SLinus Torvalds if (n->start >= start) { 25201da177e4SLinus Torvalds if (n->end <= end) 25211da177e4SLinus Torvalds sp_delete(sp, n); 25221da177e4SLinus Torvalds else 25231da177e4SLinus Torvalds n->start = end; 25241da177e4SLinus Torvalds } else { 25251da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 25261da177e4SLinus Torvalds if (n->end > end) { 252742288fe3SMel Gorman if (!n_new) 252842288fe3SMel Gorman goto alloc_new; 252942288fe3SMel Gorman 253042288fe3SMel Gorman *mpol_new = *n->policy; 253142288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 25327880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 25331da177e4SLinus Torvalds n->end = start; 25345ca39575SHillf Danton sp_insert(sp, n_new); 253542288fe3SMel Gorman n_new = NULL; 253642288fe3SMel Gorman mpol_new = NULL; 25371da177e4SLinus Torvalds break; 25381da177e4SLinus Torvalds } else 25391da177e4SLinus Torvalds n->end = start; 25401da177e4SLinus Torvalds } 25411da177e4SLinus Torvalds if (!next) 25421da177e4SLinus Torvalds break; 25431da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25441da177e4SLinus Torvalds } 25451da177e4SLinus Torvalds if (new) 25461da177e4SLinus Torvalds sp_insert(sp, new); 25474a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 254842288fe3SMel Gorman ret = 0; 254942288fe3SMel Gorman 255042288fe3SMel Gorman err_out: 255142288fe3SMel Gorman if (mpol_new) 255242288fe3SMel Gorman mpol_put(mpol_new); 255342288fe3SMel Gorman if (n_new) 255442288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 255542288fe3SMel Gorman 2556b22d127aSMel Gorman return ret; 255742288fe3SMel Gorman 255842288fe3SMel Gorman alloc_new: 25594a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 256042288fe3SMel Gorman ret = -ENOMEM; 256142288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 256242288fe3SMel Gorman if (!n_new) 256342288fe3SMel Gorman goto err_out; 256442288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 256542288fe3SMel Gorman if (!mpol_new) 256642288fe3SMel Gorman goto err_out; 256742288fe3SMel Gorman goto restart; 25681da177e4SLinus Torvalds } 25691da177e4SLinus Torvalds 257071fe804bSLee Schermerhorn /** 257171fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 257271fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 257371fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 257471fe804bSLee Schermerhorn * 257571fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 257671fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 257771fe804bSLee Schermerhorn * This must be released on exit. 25784bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 257971fe804bSLee Schermerhorn */ 258071fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 25817339ff83SRobin Holt { 258258568d2aSMiao Xie int ret; 258358568d2aSMiao Xie 258471fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 25854a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 25867339ff83SRobin Holt 258771fe804bSLee Schermerhorn if (mpol) { 25887339ff83SRobin Holt struct vm_area_struct pvma; 258971fe804bSLee Schermerhorn struct mempolicy *new; 25904bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 25917339ff83SRobin Holt 25924bfc4495SKAMEZAWA Hiroyuki if (!scratch) 25935c0c1654SLee Schermerhorn goto put_mpol; 259471fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 259571fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 259615d77835SLee Schermerhorn if (IS_ERR(new)) 25970cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 259858568d2aSMiao Xie 259958568d2aSMiao Xie task_lock(current); 26004bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 260158568d2aSMiao Xie task_unlock(current); 260215d77835SLee Schermerhorn if (ret) 26035c0c1654SLee Schermerhorn goto put_new; 260471fe804bSLee Schermerhorn 260571fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 26062c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 260771fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 260871fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 260915d77835SLee Schermerhorn 26105c0c1654SLee Schermerhorn put_new: 261171fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 26120cae3457SDan Carpenter free_scratch: 26134bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 26145c0c1654SLee Schermerhorn put_mpol: 26155c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 26167339ff83SRobin Holt } 26177339ff83SRobin Holt } 26187339ff83SRobin Holt 26191da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 26201da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 26211da177e4SLinus Torvalds { 26221da177e4SLinus Torvalds int err; 26231da177e4SLinus Torvalds struct sp_node *new = NULL; 26241da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 26251da177e4SLinus Torvalds 2626028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 26271da177e4SLinus Torvalds vma->vm_pgoff, 262845c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2629028fec41SDavid Rientjes npol ? npol->flags : -1, 263000ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 26311da177e4SLinus Torvalds 26321da177e4SLinus Torvalds if (npol) { 26331da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 26341da177e4SLinus Torvalds if (!new) 26351da177e4SLinus Torvalds return -ENOMEM; 26361da177e4SLinus Torvalds } 26371da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 26381da177e4SLinus Torvalds if (err && new) 263963f74ca2SKOSAKI Motohiro sp_free(new); 26401da177e4SLinus Torvalds return err; 26411da177e4SLinus Torvalds } 26421da177e4SLinus Torvalds 26431da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 26441da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 26451da177e4SLinus Torvalds { 26461da177e4SLinus Torvalds struct sp_node *n; 26471da177e4SLinus Torvalds struct rb_node *next; 26481da177e4SLinus Torvalds 26491da177e4SLinus Torvalds if (!p->root.rb_node) 26501da177e4SLinus Torvalds return; 26514a8c7bb5SNathan Zimmer write_lock(&p->lock); 26521da177e4SLinus Torvalds next = rb_first(&p->root); 26531da177e4SLinus Torvalds while (next) { 26541da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 26551da177e4SLinus Torvalds next = rb_next(&n->nd); 265663f74ca2SKOSAKI Motohiro sp_delete(p, n); 26571da177e4SLinus Torvalds } 26584a8c7bb5SNathan Zimmer write_unlock(&p->lock); 26591da177e4SLinus Torvalds } 26601da177e4SLinus Torvalds 26611a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2662c297663cSMel Gorman static int __initdata numabalancing_override; 26631a687c2eSMel Gorman 26641a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 26651a687c2eSMel Gorman { 26661a687c2eSMel Gorman bool numabalancing_default = false; 26671a687c2eSMel Gorman 26681a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 26691a687c2eSMel Gorman numabalancing_default = true; 26701a687c2eSMel Gorman 2671c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2672c297663cSMel Gorman if (numabalancing_override) 2673c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2674c297663cSMel Gorman 2675b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2676756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2677c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 26781a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 26791a687c2eSMel Gorman } 26801a687c2eSMel Gorman } 26811a687c2eSMel Gorman 26821a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 26831a687c2eSMel Gorman { 26841a687c2eSMel Gorman int ret = 0; 26851a687c2eSMel Gorman if (!str) 26861a687c2eSMel Gorman goto out; 26871a687c2eSMel Gorman 26881a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2689c297663cSMel Gorman numabalancing_override = 1; 26901a687c2eSMel Gorman ret = 1; 26911a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2692c297663cSMel Gorman numabalancing_override = -1; 26931a687c2eSMel Gorman ret = 1; 26941a687c2eSMel Gorman } 26951a687c2eSMel Gorman out: 26961a687c2eSMel Gorman if (!ret) 26974a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 26981a687c2eSMel Gorman 26991a687c2eSMel Gorman return ret; 27001a687c2eSMel Gorman } 27011a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 27021a687c2eSMel Gorman #else 27031a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 27041a687c2eSMel Gorman { 27051a687c2eSMel Gorman } 27061a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 27071a687c2eSMel Gorman 27081da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 27091da177e4SLinus Torvalds void __init numa_policy_init(void) 27101da177e4SLinus Torvalds { 2711b71636e2SPaul Mundt nodemask_t interleave_nodes; 2712b71636e2SPaul Mundt unsigned long largest = 0; 2713b71636e2SPaul Mundt int nid, prefer = 0; 2714b71636e2SPaul Mundt 27151da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 27161da177e4SLinus Torvalds sizeof(struct mempolicy), 271720c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27181da177e4SLinus Torvalds 27191da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 27201da177e4SLinus Torvalds sizeof(struct sp_node), 272120c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27221da177e4SLinus Torvalds 27235606e387SMel Gorman for_each_node(nid) { 27245606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 27255606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 27265606e387SMel Gorman .mode = MPOL_PREFERRED, 27275606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 27285606e387SMel Gorman .v = { .preferred_node = nid, }, 27295606e387SMel Gorman }; 27305606e387SMel Gorman } 27315606e387SMel Gorman 2732b71636e2SPaul Mundt /* 2733b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2734b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2735b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2736b71636e2SPaul Mundt */ 2737b71636e2SPaul Mundt nodes_clear(interleave_nodes); 273801f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2739b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 27401da177e4SLinus Torvalds 2741b71636e2SPaul Mundt /* Preserve the largest node */ 2742b71636e2SPaul Mundt if (largest < total_pages) { 2743b71636e2SPaul Mundt largest = total_pages; 2744b71636e2SPaul Mundt prefer = nid; 2745b71636e2SPaul Mundt } 2746b71636e2SPaul Mundt 2747b71636e2SPaul Mundt /* Interleave this node? */ 2748b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2749b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2750b71636e2SPaul Mundt } 2751b71636e2SPaul Mundt 2752b71636e2SPaul Mundt /* All too small, use the largest */ 2753b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2754b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2755b71636e2SPaul Mundt 2756028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2757b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 27581a687c2eSMel Gorman 27591a687c2eSMel Gorman check_numabalancing_enable(); 27601da177e4SLinus Torvalds } 27611da177e4SLinus Torvalds 27628bccd85fSChristoph Lameter /* Reset policy of current process to default */ 27631da177e4SLinus Torvalds void numa_default_policy(void) 27641da177e4SLinus Torvalds { 2765028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 27661da177e4SLinus Torvalds } 276768860ec1SPaul Jackson 27684225399aSPaul Jackson /* 2769095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2770095f1fc4SLee Schermerhorn */ 2771095f1fc4SLee Schermerhorn 2772095f1fc4SLee Schermerhorn /* 2773f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 27741a75a6c8SChristoph Lameter */ 2775345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2776345ace9cSLee Schermerhorn { 2777345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2778345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2779345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2780345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2781d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2782345ace9cSLee Schermerhorn }; 27831a75a6c8SChristoph Lameter 2784095f1fc4SLee Schermerhorn 2785095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2786095f1fc4SLee Schermerhorn /** 2787f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2788095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 278971fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2790095f1fc4SLee Schermerhorn * 2791095f1fc4SLee Schermerhorn * Format of input: 2792095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2793095f1fc4SLee Schermerhorn * 279471fe804bSLee Schermerhorn * On success, returns 0, else 1 2795095f1fc4SLee Schermerhorn */ 2796a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2797095f1fc4SLee Schermerhorn { 279871fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2799f2a07f40SHugh Dickins unsigned short mode_flags; 280071fe804bSLee Schermerhorn nodemask_t nodes; 2801095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2802095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2803dedf2c73Szhong jiang int err = 1, mode; 2804095f1fc4SLee Schermerhorn 2805095f1fc4SLee Schermerhorn if (nodelist) { 2806095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2807095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 280871fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2809095f1fc4SLee Schermerhorn goto out; 281001f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2811095f1fc4SLee Schermerhorn goto out; 281271fe804bSLee Schermerhorn } else 281371fe804bSLee Schermerhorn nodes_clear(nodes); 281471fe804bSLee Schermerhorn 2815095f1fc4SLee Schermerhorn if (flags) 2816095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2817095f1fc4SLee Schermerhorn 2818dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 2819dedf2c73Szhong jiang if (mode < 0) 2820095f1fc4SLee Schermerhorn goto out; 2821095f1fc4SLee Schermerhorn 282271fe804bSLee Schermerhorn switch (mode) { 2823095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 282471fe804bSLee Schermerhorn /* 282571fe804bSLee Schermerhorn * Insist on a nodelist of one node only 282671fe804bSLee Schermerhorn */ 2827095f1fc4SLee Schermerhorn if (nodelist) { 2828095f1fc4SLee Schermerhorn char *rest = nodelist; 2829095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2830095f1fc4SLee Schermerhorn rest++; 2831926f2ae0SKOSAKI Motohiro if (*rest) 2832926f2ae0SKOSAKI Motohiro goto out; 2833095f1fc4SLee Schermerhorn } 2834095f1fc4SLee Schermerhorn break; 2835095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2836095f1fc4SLee Schermerhorn /* 2837095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2838095f1fc4SLee Schermerhorn */ 2839095f1fc4SLee Schermerhorn if (!nodelist) 284001f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 28413f226aa1SLee Schermerhorn break; 284271fe804bSLee Schermerhorn case MPOL_LOCAL: 28433f226aa1SLee Schermerhorn /* 284471fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 28453f226aa1SLee Schermerhorn */ 284671fe804bSLee Schermerhorn if (nodelist) 28473f226aa1SLee Schermerhorn goto out; 284871fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 28493f226aa1SLee Schermerhorn break; 2850413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2851413b43deSRavikiran G Thirumalai /* 2852413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2853413b43deSRavikiran G Thirumalai */ 2854413b43deSRavikiran G Thirumalai if (!nodelist) 2855413b43deSRavikiran G Thirumalai err = 0; 2856413b43deSRavikiran G Thirumalai goto out; 2857d69b2e63SKOSAKI Motohiro case MPOL_BIND: 285871fe804bSLee Schermerhorn /* 2859d69b2e63SKOSAKI Motohiro * Insist on a nodelist 286071fe804bSLee Schermerhorn */ 2861d69b2e63SKOSAKI Motohiro if (!nodelist) 2862d69b2e63SKOSAKI Motohiro goto out; 2863095f1fc4SLee Schermerhorn } 2864095f1fc4SLee Schermerhorn 286571fe804bSLee Schermerhorn mode_flags = 0; 2866095f1fc4SLee Schermerhorn if (flags) { 2867095f1fc4SLee Schermerhorn /* 2868095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2869095f1fc4SLee Schermerhorn * mode flags. 2870095f1fc4SLee Schermerhorn */ 2871095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 287271fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2873095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 287471fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2875095f1fc4SLee Schermerhorn else 2876926f2ae0SKOSAKI Motohiro goto out; 2877095f1fc4SLee Schermerhorn } 287871fe804bSLee Schermerhorn 287971fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 288071fe804bSLee Schermerhorn if (IS_ERR(new)) 2881926f2ae0SKOSAKI Motohiro goto out; 2882926f2ae0SKOSAKI Motohiro 2883f2a07f40SHugh Dickins /* 2884f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2885f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2886f2a07f40SHugh Dickins */ 2887f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2888f2a07f40SHugh Dickins new->v.nodes = nodes; 2889f2a07f40SHugh Dickins else if (nodelist) 2890f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2891f2a07f40SHugh Dickins else 2892f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2893f2a07f40SHugh Dickins 2894f2a07f40SHugh Dickins /* 2895f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2896f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2897f2a07f40SHugh Dickins */ 2898e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2899f2a07f40SHugh Dickins 2900926f2ae0SKOSAKI Motohiro err = 0; 290171fe804bSLee Schermerhorn 2902095f1fc4SLee Schermerhorn out: 2903095f1fc4SLee Schermerhorn /* Restore string for error message */ 2904095f1fc4SLee Schermerhorn if (nodelist) 2905095f1fc4SLee Schermerhorn *--nodelist = ':'; 2906095f1fc4SLee Schermerhorn if (flags) 2907095f1fc4SLee Schermerhorn *--flags = '='; 290871fe804bSLee Schermerhorn if (!err) 290971fe804bSLee Schermerhorn *mpol = new; 2910095f1fc4SLee Schermerhorn return err; 2911095f1fc4SLee Schermerhorn } 2912095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2913095f1fc4SLee Schermerhorn 291471fe804bSLee Schermerhorn /** 291571fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 291671fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 291771fe804bSLee Schermerhorn * @maxlen: length of @buffer 291871fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 291971fe804bSLee Schermerhorn * 2920948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2921948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2922948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 29231a75a6c8SChristoph Lameter */ 2924948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 29251a75a6c8SChristoph Lameter { 29261a75a6c8SChristoph Lameter char *p = buffer; 2927948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2928948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2929948927eeSDavid Rientjes unsigned short flags = 0; 29301a75a6c8SChristoph Lameter 29318790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2932bea904d5SLee Schermerhorn mode = pol->mode; 2933948927eeSDavid Rientjes flags = pol->flags; 2934948927eeSDavid Rientjes } 2935bea904d5SLee Schermerhorn 29361a75a6c8SChristoph Lameter switch (mode) { 29371a75a6c8SChristoph Lameter case MPOL_DEFAULT: 29381a75a6c8SChristoph Lameter break; 29391a75a6c8SChristoph Lameter case MPOL_PREFERRED: 2940fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 2941f2a07f40SHugh Dickins mode = MPOL_LOCAL; 294253f2556bSLee Schermerhorn else 2943fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 29441a75a6c8SChristoph Lameter break; 29451a75a6c8SChristoph Lameter case MPOL_BIND: 29461a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 29471a75a6c8SChristoph Lameter nodes = pol->v.nodes; 29481a75a6c8SChristoph Lameter break; 29491a75a6c8SChristoph Lameter default: 2950948927eeSDavid Rientjes WARN_ON_ONCE(1); 2951948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 2952948927eeSDavid Rientjes return; 29531a75a6c8SChristoph Lameter } 29541a75a6c8SChristoph Lameter 2955b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 29561a75a6c8SChristoph Lameter 2957fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2958948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 2959f5b087b5SDavid Rientjes 29602291990aSLee Schermerhorn /* 29612291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 29622291990aSLee Schermerhorn */ 2963f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 29642291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 29652291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 29662291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2967f5b087b5SDavid Rientjes } 2968f5b087b5SDavid Rientjes 29699e763e0fSTejun Heo if (!nodes_empty(nodes)) 29709e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 29719e763e0fSTejun Heo nodemask_pr_args(&nodes)); 29721a75a6c8SChristoph Lameter } 2973