146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 71a520110eSChristoph Hellwig #include <linux/pagewalk.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 766e84f315SIngo Molnar #include <linux/sched/mm.h> 776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 78f719ff9bSIngo Molnar #include <linux/sched/task.h> 791da177e4SLinus Torvalds #include <linux/nodemask.h> 801da177e4SLinus Torvalds #include <linux/cpuset.h> 811da177e4SLinus Torvalds #include <linux/slab.h> 821da177e4SLinus Torvalds #include <linux/string.h> 83b95f1b31SPaul Gortmaker #include <linux/export.h> 84b488893aSPavel Emelyanov #include <linux/nsproxy.h> 851da177e4SLinus Torvalds #include <linux/interrupt.h> 861da177e4SLinus Torvalds #include <linux/init.h> 871da177e4SLinus Torvalds #include <linux/compat.h> 8831367466SOtto Ebeling #include <linux/ptrace.h> 89dc9aa5b9SChristoph Lameter #include <linux/swap.h> 901a75a6c8SChristoph Lameter #include <linux/seq_file.h> 911a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 92b20a3503SChristoph Lameter #include <linux/migrate.h> 9362b61f61SHugh Dickins #include <linux/ksm.h> 9495a402c3SChristoph Lameter #include <linux/rmap.h> 9586c3a764SDavid Quigley #include <linux/security.h> 96dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 97095f1fc4SLee Schermerhorn #include <linux/ctype.h> 986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 100b1de0d13SMitchel Humpherys #include <linux/printk.h> 101c8633798SNaoya Horiguchi #include <linux/swapops.h> 102dc9aa5b9SChristoph Lameter 1031da177e4SLinus Torvalds #include <asm/tlbflush.h> 1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1051da177e4SLinus Torvalds 10662695a84SNick Piggin #include "internal.h" 10762695a84SNick Piggin 10838e35860SChristoph Lameter /* Internal flags */ 109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 111dc9aa5b9SChristoph Lameter 112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1141da177e4SLinus Torvalds 1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1161da177e4SLinus Torvalds policied. */ 1176267276fSChristoph Lameter enum zone_type policy_zone = 0; 1181da177e4SLinus Torvalds 119bea904d5SLee Schermerhorn /* 120bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 121bea904d5SLee Schermerhorn */ 122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1231da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 124bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 125fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1261da177e4SLinus Torvalds }; 1271da177e4SLinus Torvalds 1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1295606e387SMel Gorman 13074d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1315606e387SMel Gorman { 1325606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 133f15ca78eSOleg Nesterov int node; 1345606e387SMel Gorman 135f15ca78eSOleg Nesterov if (pol) 136f15ca78eSOleg Nesterov return pol; 1375606e387SMel Gorman 138f15ca78eSOleg Nesterov node = numa_node_id(); 1391da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1401da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 141f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 142f15ca78eSOleg Nesterov if (pol->mode) 143f15ca78eSOleg Nesterov return pol; 1441da6f0e1SJianguo Wu } 1455606e387SMel Gorman 146f15ca78eSOleg Nesterov return &default_policy; 1475606e387SMel Gorman } 1485606e387SMel Gorman 14937012946SDavid Rientjes static const struct mempolicy_operations { 15037012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 151213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 15237012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 15337012946SDavid Rientjes 154f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 155f5b087b5SDavid Rientjes { 1566d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1574c50bc01SDavid Rientjes } 1584c50bc01SDavid Rientjes 1594c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1604c50bc01SDavid Rientjes const nodemask_t *rel) 1614c50bc01SDavid Rientjes { 1624c50bc01SDavid Rientjes nodemask_t tmp; 1634c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1644c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 165f5b087b5SDavid Rientjes } 166f5b087b5SDavid Rientjes 16737012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 16837012946SDavid Rientjes { 16937012946SDavid Rientjes if (nodes_empty(*nodes)) 17037012946SDavid Rientjes return -EINVAL; 17137012946SDavid Rientjes pol->v.nodes = *nodes; 17237012946SDavid Rientjes return 0; 17337012946SDavid Rientjes } 17437012946SDavid Rientjes 17537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 17637012946SDavid Rientjes { 17737012946SDavid Rientjes if (!nodes) 178fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 17937012946SDavid Rientjes else if (nodes_empty(*nodes)) 18037012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 18137012946SDavid Rientjes else 18237012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 18337012946SDavid Rientjes return 0; 18437012946SDavid Rientjes } 18537012946SDavid Rientjes 18637012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 18737012946SDavid Rientjes { 188859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 18937012946SDavid Rientjes return -EINVAL; 19037012946SDavid Rientjes pol->v.nodes = *nodes; 19137012946SDavid Rientjes return 0; 19237012946SDavid Rientjes } 19337012946SDavid Rientjes 19458568d2aSMiao Xie /* 19558568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 19658568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 19758568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 19858568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 19958568d2aSMiao Xie * 20058568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 20158568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 20258568d2aSMiao Xie */ 2034bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2044bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 20558568d2aSMiao Xie { 20658568d2aSMiao Xie int ret; 20758568d2aSMiao Xie 20858568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 20958568d2aSMiao Xie if (pol == NULL) 21058568d2aSMiao Xie return 0; 21101f13bd6SLai Jiangshan /* Check N_MEMORY */ 2124bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 21301f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 21458568d2aSMiao Xie 21558568d2aSMiao Xie VM_BUG_ON(!nodes); 21658568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 21758568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 21858568d2aSMiao Xie else { 21958568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2204bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 22158568d2aSMiao Xie else 2224bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2234bfc4495SKAMEZAWA Hiroyuki 22458568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 22558568d2aSMiao Xie pol->w.user_nodemask = *nodes; 22658568d2aSMiao Xie else 22758568d2aSMiao Xie pol->w.cpuset_mems_allowed = 22858568d2aSMiao Xie cpuset_current_mems_allowed; 22958568d2aSMiao Xie } 23058568d2aSMiao Xie 2314bfc4495SKAMEZAWA Hiroyuki if (nodes) 2324bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2334bfc4495SKAMEZAWA Hiroyuki else 2344bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 23558568d2aSMiao Xie return ret; 23658568d2aSMiao Xie } 23758568d2aSMiao Xie 23858568d2aSMiao Xie /* 23958568d2aSMiao Xie * This function just creates a new policy, does some check and simple 24058568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 24158568d2aSMiao Xie */ 242028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 243028fec41SDavid Rientjes nodemask_t *nodes) 2441da177e4SLinus Torvalds { 2451da177e4SLinus Torvalds struct mempolicy *policy; 2461da177e4SLinus Torvalds 247028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 24800ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 249140d5a49SPaul Mundt 2503e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2513e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 25237012946SDavid Rientjes return ERR_PTR(-EINVAL); 253d3a71033SLee Schermerhorn return NULL; 25437012946SDavid Rientjes } 2553e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2563e1f0645SDavid Rientjes 2573e1f0645SDavid Rientjes /* 2583e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2593e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2603e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2613e1f0645SDavid Rientjes */ 2623e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2633e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2643e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2653e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2663e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2673e1f0645SDavid Rientjes } 268479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2698d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2708d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2718d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 272479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 273479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2743e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2753e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2761da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2771da177e4SLinus Torvalds if (!policy) 2781da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2791da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 28045c4745aSLee Schermerhorn policy->mode = mode; 28137012946SDavid Rientjes policy->flags = flags; 2823e1f0645SDavid Rientjes 28337012946SDavid Rientjes return policy; 28437012946SDavid Rientjes } 28537012946SDavid Rientjes 28652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 28752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 28852cd3b07SLee Schermerhorn { 28952cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 29052cd3b07SLee Schermerhorn return; 29152cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 29252cd3b07SLee Schermerhorn } 29352cd3b07SLee Schermerhorn 294213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 29537012946SDavid Rientjes { 29637012946SDavid Rientjes } 29737012946SDavid Rientjes 298213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 2991d0d2680SDavid Rientjes { 3001d0d2680SDavid Rientjes nodemask_t tmp; 3011d0d2680SDavid Rientjes 30237012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 30337012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 30437012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 30537012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3061d0d2680SDavid Rientjes else { 307213980c0SVlastimil Babka nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, 308213980c0SVlastimil Babka *nodes); 30929b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3101d0d2680SDavid Rientjes } 31137012946SDavid Rientjes 312708c1bbcSMiao Xie if (nodes_empty(tmp)) 313708c1bbcSMiao Xie tmp = *nodes; 314708c1bbcSMiao Xie 3151d0d2680SDavid Rientjes pol->v.nodes = tmp; 31637012946SDavid Rientjes } 31737012946SDavid Rientjes 31837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 319213980c0SVlastimil Babka const nodemask_t *nodes) 32037012946SDavid Rientjes { 32137012946SDavid Rientjes nodemask_t tmp; 32237012946SDavid Rientjes 32337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3241d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3251d0d2680SDavid Rientjes 326fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3271d0d2680SDavid Rientjes pol->v.preferred_node = node; 328fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 329fc36b8d3SLee Schermerhorn } else 330fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 33137012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 33237012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3331d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 334fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3351d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 33637012946SDavid Rientjes pol->w.cpuset_mems_allowed, 33737012946SDavid Rientjes *nodes); 33837012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3391d0d2680SDavid Rientjes } 3401d0d2680SDavid Rientjes } 34137012946SDavid Rientjes 342708c1bbcSMiao Xie /* 343708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 344708c1bbcSMiao Xie * 345213980c0SVlastimil Babka * Per-vma policies are protected by mmap_sem. Allocations using per-task 346213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 347213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 348708c1bbcSMiao Xie */ 349213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35037012946SDavid Rientjes { 35137012946SDavid Rientjes if (!pol) 35237012946SDavid Rientjes return; 3532e25644eSVlastimil Babka if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) && 35437012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35537012946SDavid Rientjes return; 356708c1bbcSMiao Xie 357213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3581d0d2680SDavid Rientjes } 3591d0d2680SDavid Rientjes 3601d0d2680SDavid Rientjes /* 3611d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3621d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36358568d2aSMiao Xie * 36458568d2aSMiao Xie * Called with task's alloc_lock held. 3651d0d2680SDavid Rientjes */ 3661d0d2680SDavid Rientjes 367213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3681d0d2680SDavid Rientjes { 369213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3701d0d2680SDavid Rientjes } 3711d0d2680SDavid Rientjes 3721d0d2680SDavid Rientjes /* 3731d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3741d0d2680SDavid Rientjes * 3751d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 3761d0d2680SDavid Rientjes */ 3771d0d2680SDavid Rientjes 3781d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3791d0d2680SDavid Rientjes { 3801d0d2680SDavid Rientjes struct vm_area_struct *vma; 3811d0d2680SDavid Rientjes 3821d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 3831d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 384213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 3851d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 3861d0d2680SDavid Rientjes } 3871d0d2680SDavid Rientjes 38837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 38937012946SDavid Rientjes [MPOL_DEFAULT] = { 39037012946SDavid Rientjes .rebind = mpol_rebind_default, 39137012946SDavid Rientjes }, 39237012946SDavid Rientjes [MPOL_INTERLEAVE] = { 39337012946SDavid Rientjes .create = mpol_new_interleave, 39437012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39537012946SDavid Rientjes }, 39637012946SDavid Rientjes [MPOL_PREFERRED] = { 39737012946SDavid Rientjes .create = mpol_new_preferred, 39837012946SDavid Rientjes .rebind = mpol_rebind_preferred, 39937012946SDavid Rientjes }, 40037012946SDavid Rientjes [MPOL_BIND] = { 40137012946SDavid Rientjes .create = mpol_new_bind, 40237012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40337012946SDavid Rientjes }, 40437012946SDavid Rientjes }; 40537012946SDavid Rientjes 406a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 407fc301289SChristoph Lameter unsigned long flags); 4081a75a6c8SChristoph Lameter 4096f4576e3SNaoya Horiguchi struct queue_pages { 4106f4576e3SNaoya Horiguchi struct list_head *pagelist; 4116f4576e3SNaoya Horiguchi unsigned long flags; 4126f4576e3SNaoya Horiguchi nodemask_t *nmask; 413*f18da660SLi Xinhai unsigned long start; 414*f18da660SLi Xinhai unsigned long end; 415*f18da660SLi Xinhai struct vm_area_struct *first; 4166f4576e3SNaoya Horiguchi }; 4176f4576e3SNaoya Horiguchi 41898094945SNaoya Horiguchi /* 41988aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 42088aaa2a1SNaoya Horiguchi * 42188aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 42288aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 42388aaa2a1SNaoya Horiguchi */ 42488aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 42588aaa2a1SNaoya Horiguchi struct queue_pages *qp) 42688aaa2a1SNaoya Horiguchi { 42788aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 42888aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 42988aaa2a1SNaoya Horiguchi 43088aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 43188aaa2a1SNaoya Horiguchi } 43288aaa2a1SNaoya Horiguchi 433a7f40cfeSYang Shi /* 434d8835445SYang Shi * queue_pages_pmd() has four possible return values: 435d8835445SYang Shi * 0 - pages are placed on the right node or queued successfully. 436d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 437d8835445SYang Shi * specified. 438d8835445SYang Shi * 2 - THP was split. 439d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 440d8835445SYang Shi * existing page was already on a node that does not follow the 441d8835445SYang Shi * policy. 442a7f40cfeSYang Shi */ 443c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 444c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 445c8633798SNaoya Horiguchi { 446c8633798SNaoya Horiguchi int ret = 0; 447c8633798SNaoya Horiguchi struct page *page; 448c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 449c8633798SNaoya Horiguchi unsigned long flags; 450c8633798SNaoya Horiguchi 451c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 452a7f40cfeSYang Shi ret = -EIO; 453c8633798SNaoya Horiguchi goto unlock; 454c8633798SNaoya Horiguchi } 455c8633798SNaoya Horiguchi page = pmd_page(*pmd); 456c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 457c8633798SNaoya Horiguchi spin_unlock(ptl); 458c8633798SNaoya Horiguchi __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 459d8835445SYang Shi ret = 2; 460c8633798SNaoya Horiguchi goto out; 461c8633798SNaoya Horiguchi } 462d8835445SYang Shi if (!queue_pages_required(page, qp)) 463c8633798SNaoya Horiguchi goto unlock; 464c8633798SNaoya Horiguchi 465c8633798SNaoya Horiguchi flags = qp->flags; 466c8633798SNaoya Horiguchi /* go to thp migration */ 467a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 468a53190a4SYang Shi if (!vma_migratable(walk->vma) || 469a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 470d8835445SYang Shi ret = 1; 471a7f40cfeSYang Shi goto unlock; 472a7f40cfeSYang Shi } 473a7f40cfeSYang Shi } else 474a7f40cfeSYang Shi ret = -EIO; 475c8633798SNaoya Horiguchi unlock: 476c8633798SNaoya Horiguchi spin_unlock(ptl); 477c8633798SNaoya Horiguchi out: 478c8633798SNaoya Horiguchi return ret; 479c8633798SNaoya Horiguchi } 480c8633798SNaoya Horiguchi 48188aaa2a1SNaoya Horiguchi /* 48298094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 48398094945SNaoya Horiguchi * and move them to the pagelist if they do. 484d8835445SYang Shi * 485d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 486d8835445SYang Shi * 0 - pages are placed on the right node or queued successfully. 487d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 488d8835445SYang Shi * specified. 489d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 490d8835445SYang Shi * on a node that does not follow the policy. 49198094945SNaoya Horiguchi */ 4926f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 4936f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 4941da177e4SLinus Torvalds { 4956f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 4966f4576e3SNaoya Horiguchi struct page *page; 4976f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 4986f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 499c8633798SNaoya Horiguchi int ret; 500d8835445SYang Shi bool has_unmovable = false; 50191612e0dSHugh Dickins pte_t *pte; 502705e87c0SHugh Dickins spinlock_t *ptl; 503941150a3SHugh Dickins 504c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 505c8633798SNaoya Horiguchi if (ptl) { 506c8633798SNaoya Horiguchi ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 507d8835445SYang Shi if (ret != 2) 508a7f40cfeSYang Shi return ret; 509248db92dSKirill A. Shutemov } 510d8835445SYang Shi /* THP was split, fall through to pte walk */ 51191612e0dSHugh Dickins 512337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 513337d9abfSNaoya Horiguchi return 0; 51494723aafSMichal Hocko 5156f4576e3SNaoya Horiguchi pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5166f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 51791612e0dSHugh Dickins if (!pte_present(*pte)) 51891612e0dSHugh Dickins continue; 5196aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5206aab341eSLinus Torvalds if (!page) 52191612e0dSHugh Dickins continue; 522053837fcSNick Piggin /* 52362b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 52462b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 525053837fcSNick Piggin */ 526b79bc0a0SHugh Dickins if (PageReserved(page)) 527f4598c8bSChristoph Lameter continue; 52888aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 52938e35860SChristoph Lameter continue; 530a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 531d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 532d8835445SYang Shi if (!vma_migratable(vma)) { 533d8835445SYang Shi has_unmovable = true; 534a7f40cfeSYang Shi break; 535d8835445SYang Shi } 536a53190a4SYang Shi 537a53190a4SYang Shi /* 538a53190a4SYang Shi * Do not abort immediately since there may be 539a53190a4SYang Shi * temporary off LRU pages in the range. Still 540a53190a4SYang Shi * need migrate other LRU pages. 541a53190a4SYang Shi */ 542a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 543a53190a4SYang Shi has_unmovable = true; 544a7f40cfeSYang Shi } else 545a7f40cfeSYang Shi break; 5466f4576e3SNaoya Horiguchi } 5476f4576e3SNaoya Horiguchi pte_unmap_unlock(pte - 1, ptl); 5486f4576e3SNaoya Horiguchi cond_resched(); 549d8835445SYang Shi 550d8835445SYang Shi if (has_unmovable) 551d8835445SYang Shi return 1; 552d8835445SYang Shi 553a7f40cfeSYang Shi return addr != end ? -EIO : 0; 55491612e0dSHugh Dickins } 55591612e0dSHugh Dickins 5566f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5576f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5586f4576e3SNaoya Horiguchi struct mm_walk *walk) 559e2d8cf40SNaoya Horiguchi { 560e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5616f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5626f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 563e2d8cf40SNaoya Horiguchi struct page *page; 564cb900f41SKirill A. Shutemov spinlock_t *ptl; 565d4c54919SNaoya Horiguchi pte_t entry; 566e2d8cf40SNaoya Horiguchi 5676f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5686f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 569d4c54919SNaoya Horiguchi if (!pte_present(entry)) 570d4c54919SNaoya Horiguchi goto unlock; 571d4c54919SNaoya Horiguchi page = pte_page(entry); 57288aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 573e2d8cf40SNaoya Horiguchi goto unlock; 574e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 575e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 576e2d8cf40SNaoya Horiguchi (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 5776f4576e3SNaoya Horiguchi isolate_huge_page(page, qp->pagelist); 578e2d8cf40SNaoya Horiguchi unlock: 579cb900f41SKirill A. Shutemov spin_unlock(ptl); 580e2d8cf40SNaoya Horiguchi #else 581e2d8cf40SNaoya Horiguchi BUG(); 582e2d8cf40SNaoya Horiguchi #endif 58391612e0dSHugh Dickins return 0; 5841da177e4SLinus Torvalds } 5851da177e4SLinus Torvalds 5865877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 587b24f53a0SLee Schermerhorn /* 5884b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 5894b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 5904b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 5914b10e7d5SMel Gorman * 5924b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 5934b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 5944b10e7d5SMel Gorman * changes to the core. 595b24f53a0SLee Schermerhorn */ 5964b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 5974b10e7d5SMel Gorman unsigned long addr, unsigned long end) 598b24f53a0SLee Schermerhorn { 5994b10e7d5SMel Gorman int nr_updated; 600b24f53a0SLee Schermerhorn 6014d942466SMel Gorman nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); 60203c5a6e1SMel Gorman if (nr_updated) 60303c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 604b24f53a0SLee Schermerhorn 6054b10e7d5SMel Gorman return nr_updated; 606b24f53a0SLee Schermerhorn } 607b24f53a0SLee Schermerhorn #else 608b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 609b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 610b24f53a0SLee Schermerhorn { 611b24f53a0SLee Schermerhorn return 0; 612b24f53a0SLee Schermerhorn } 6135877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 614b24f53a0SLee Schermerhorn 6156f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6166f4576e3SNaoya Horiguchi struct mm_walk *walk) 6171da177e4SLinus Torvalds { 6186f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6196f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6205b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6216f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 622dc9aa5b9SChristoph Lameter 623a18b3ac2SLi Xinhai /* range check first */ 624*f18da660SLi Xinhai VM_BUG_ON((vma->vm_start > start) || (vma->vm_end < end)); 625*f18da660SLi Xinhai 626*f18da660SLi Xinhai if (!qp->first) { 627*f18da660SLi Xinhai qp->first = vma; 628*f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 629*f18da660SLi Xinhai (qp->start < vma->vm_start)) 630*f18da660SLi Xinhai /* hole at head side of range */ 631a18b3ac2SLi Xinhai return -EFAULT; 632a18b3ac2SLi Xinhai } 633*f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 634*f18da660SLi Xinhai ((vma->vm_end < qp->end) && 635*f18da660SLi Xinhai (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) 636*f18da660SLi Xinhai /* hole at middle or tail of range */ 637*f18da660SLi Xinhai return -EFAULT; 638a18b3ac2SLi Xinhai 639a7f40cfeSYang Shi /* 640a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 641a7f40cfeSYang Shi * regardless of vma_migratable 642a7f40cfeSYang Shi */ 643a7f40cfeSYang Shi if (!vma_migratable(vma) && 644a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 64548684a65SNaoya Horiguchi return 1; 64648684a65SNaoya Horiguchi 6475b952b3cSAndi Kleen if (endvma > end) 6485b952b3cSAndi Kleen endvma = end; 649b24f53a0SLee Schermerhorn 650b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6512c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6524355c018SLiang Chen if (!is_vm_hugetlb_page(vma) && 6534355c018SLiang Chen (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && 6544355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 655b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6566f4576e3SNaoya Horiguchi return 1; 657b24f53a0SLee Schermerhorn } 658b24f53a0SLee Schermerhorn 6596f4576e3SNaoya Horiguchi /* queue pages from current vma */ 660a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 6616f4576e3SNaoya Horiguchi return 0; 6626f4576e3SNaoya Horiguchi return 1; 6636f4576e3SNaoya Horiguchi } 664b24f53a0SLee Schermerhorn 6657b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 6667b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 6677b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 6687b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 6697b86ac33SChristoph Hellwig }; 6707b86ac33SChristoph Hellwig 6716f4576e3SNaoya Horiguchi /* 6726f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 6736f4576e3SNaoya Horiguchi * 6746f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 6756f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 676d8835445SYang Shi * passed via @private. 677d8835445SYang Shi * 678d8835445SYang Shi * queue_pages_range() has three possible return values: 679d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 680d8835445SYang Shi * specified. 681d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 682a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 683a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 684a85dfc30SYang Shi * your accessible address space (-EFAULT) 6856f4576e3SNaoya Horiguchi */ 6866f4576e3SNaoya Horiguchi static int 6876f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 6886f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 6896f4576e3SNaoya Horiguchi struct list_head *pagelist) 6906f4576e3SNaoya Horiguchi { 691*f18da660SLi Xinhai int err; 6926f4576e3SNaoya Horiguchi struct queue_pages qp = { 6936f4576e3SNaoya Horiguchi .pagelist = pagelist, 6946f4576e3SNaoya Horiguchi .flags = flags, 6956f4576e3SNaoya Horiguchi .nmask = nodes, 696*f18da660SLi Xinhai .start = start, 697*f18da660SLi Xinhai .end = end, 698*f18da660SLi Xinhai .first = NULL, 6996f4576e3SNaoya Horiguchi }; 7006f4576e3SNaoya Horiguchi 701*f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 702*f18da660SLi Xinhai 703*f18da660SLi Xinhai if (!qp.first) 704*f18da660SLi Xinhai /* whole range in hole */ 705*f18da660SLi Xinhai err = -EFAULT; 706*f18da660SLi Xinhai 707*f18da660SLi Xinhai return err; 7081da177e4SLinus Torvalds } 7091da177e4SLinus Torvalds 710869833f2SKOSAKI Motohiro /* 711869833f2SKOSAKI Motohiro * Apply policy to a single VMA 712869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 713869833f2SKOSAKI Motohiro */ 714869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 715869833f2SKOSAKI Motohiro struct mempolicy *pol) 7168d34694cSKOSAKI Motohiro { 717869833f2SKOSAKI Motohiro int err; 718869833f2SKOSAKI Motohiro struct mempolicy *old; 719869833f2SKOSAKI Motohiro struct mempolicy *new; 7208d34694cSKOSAKI Motohiro 7218d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7228d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7238d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7248d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7258d34694cSKOSAKI Motohiro 726869833f2SKOSAKI Motohiro new = mpol_dup(pol); 727869833f2SKOSAKI Motohiro if (IS_ERR(new)) 728869833f2SKOSAKI Motohiro return PTR_ERR(new); 729869833f2SKOSAKI Motohiro 730869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7318d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 732869833f2SKOSAKI Motohiro if (err) 733869833f2SKOSAKI Motohiro goto err_out; 7348d34694cSKOSAKI Motohiro } 735869833f2SKOSAKI Motohiro 736869833f2SKOSAKI Motohiro old = vma->vm_policy; 737869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 738869833f2SKOSAKI Motohiro mpol_put(old); 739869833f2SKOSAKI Motohiro 740869833f2SKOSAKI Motohiro return 0; 741869833f2SKOSAKI Motohiro err_out: 742869833f2SKOSAKI Motohiro mpol_put(new); 7438d34694cSKOSAKI Motohiro return err; 7448d34694cSKOSAKI Motohiro } 7458d34694cSKOSAKI Motohiro 7461da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7479d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7489d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7491da177e4SLinus Torvalds { 7501da177e4SLinus Torvalds struct vm_area_struct *next; 7519d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7529d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7539d8cebd4SKOSAKI Motohiro int err = 0; 754e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7559d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7569d8cebd4SKOSAKI Motohiro unsigned long vmend; 7571da177e4SLinus Torvalds 758097d5910SLinus Torvalds vma = find_vma(mm, start); 759*f18da660SLi Xinhai VM_BUG_ON(!vma); 7609d8cebd4SKOSAKI Motohiro 761097d5910SLinus Torvalds prev = vma->vm_prev; 762e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 763e26a5114SKOSAKI Motohiro prev = vma; 764e26a5114SKOSAKI Motohiro 7659d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 7661da177e4SLinus Torvalds next = vma->vm_next; 7679d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 7689d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 7699d8cebd4SKOSAKI Motohiro 770e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 771e26a5114SKOSAKI Motohiro continue; 772e26a5114SKOSAKI Motohiro 773e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 774e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 7759d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 776e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 77719a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 7789d8cebd4SKOSAKI Motohiro if (prev) { 7799d8cebd4SKOSAKI Motohiro vma = prev; 7809d8cebd4SKOSAKI Motohiro next = vma->vm_next; 7813964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 7829d8cebd4SKOSAKI Motohiro continue; 7833964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 7843964acd0SOleg Nesterov goto replace; 7851da177e4SLinus Torvalds } 7869d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 7879d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 7889d8cebd4SKOSAKI Motohiro if (err) 7899d8cebd4SKOSAKI Motohiro goto out; 7909d8cebd4SKOSAKI Motohiro } 7919d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 7929d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 7939d8cebd4SKOSAKI Motohiro if (err) 7949d8cebd4SKOSAKI Motohiro goto out; 7959d8cebd4SKOSAKI Motohiro } 7963964acd0SOleg Nesterov replace: 797869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 7989d8cebd4SKOSAKI Motohiro if (err) 7999d8cebd4SKOSAKI Motohiro goto out; 8009d8cebd4SKOSAKI Motohiro } 8019d8cebd4SKOSAKI Motohiro 8029d8cebd4SKOSAKI Motohiro out: 8031da177e4SLinus Torvalds return err; 8041da177e4SLinus Torvalds } 8051da177e4SLinus Torvalds 8061da177e4SLinus Torvalds /* Set the process memory policy */ 807028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 808028fec41SDavid Rientjes nodemask_t *nodes) 8091da177e4SLinus Torvalds { 81058568d2aSMiao Xie struct mempolicy *new, *old; 8114bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 81258568d2aSMiao Xie int ret; 8131da177e4SLinus Torvalds 8144bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8154bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 816f4e53d91SLee Schermerhorn 8174bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8184bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8194bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8204bfc4495SKAMEZAWA Hiroyuki goto out; 8214bfc4495SKAMEZAWA Hiroyuki } 8222c7c3a7dSOleg Nesterov 82358568d2aSMiao Xie task_lock(current); 8244bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 82558568d2aSMiao Xie if (ret) { 82658568d2aSMiao Xie task_unlock(current); 82758568d2aSMiao Xie mpol_put(new); 8284bfc4495SKAMEZAWA Hiroyuki goto out; 82958568d2aSMiao Xie } 83058568d2aSMiao Xie old = current->mempolicy; 8311da177e4SLinus Torvalds current->mempolicy = new; 83245816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 83345816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 83458568d2aSMiao Xie task_unlock(current); 83558568d2aSMiao Xie mpol_put(old); 8364bfc4495SKAMEZAWA Hiroyuki ret = 0; 8374bfc4495SKAMEZAWA Hiroyuki out: 8384bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8394bfc4495SKAMEZAWA Hiroyuki return ret; 8401da177e4SLinus Torvalds } 8411da177e4SLinus Torvalds 842bea904d5SLee Schermerhorn /* 843bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 84458568d2aSMiao Xie * 84558568d2aSMiao Xie * Called with task's alloc_lock held 846bea904d5SLee Schermerhorn */ 847bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8481da177e4SLinus Torvalds { 849dfcd3c0dSAndi Kleen nodes_clear(*nodes); 850bea904d5SLee Schermerhorn if (p == &default_policy) 851bea904d5SLee Schermerhorn return; 852bea904d5SLee Schermerhorn 85345c4745aSLee Schermerhorn switch (p->mode) { 85419770b32SMel Gorman case MPOL_BIND: 85519770b32SMel Gorman /* Fall through */ 8561da177e4SLinus Torvalds case MPOL_INTERLEAVE: 857dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 8581da177e4SLinus Torvalds break; 8591da177e4SLinus Torvalds case MPOL_PREFERRED: 860fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 861dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 86253f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 8631da177e4SLinus Torvalds break; 8641da177e4SLinus Torvalds default: 8651da177e4SLinus Torvalds BUG(); 8661da177e4SLinus Torvalds } 8671da177e4SLinus Torvalds } 8681da177e4SLinus Torvalds 8693b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 8701da177e4SLinus Torvalds { 8711da177e4SLinus Torvalds struct page *p; 8721da177e4SLinus Torvalds int err; 8731da177e4SLinus Torvalds 8743b9aadf7SAndrea Arcangeli int locked = 1; 8753b9aadf7SAndrea Arcangeli err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); 8761da177e4SLinus Torvalds if (err >= 0) { 8771da177e4SLinus Torvalds err = page_to_nid(p); 8781da177e4SLinus Torvalds put_page(p); 8791da177e4SLinus Torvalds } 8803b9aadf7SAndrea Arcangeli if (locked) 8813b9aadf7SAndrea Arcangeli up_read(&mm->mmap_sem); 8821da177e4SLinus Torvalds return err; 8831da177e4SLinus Torvalds } 8841da177e4SLinus Torvalds 8851da177e4SLinus Torvalds /* Retrieve NUMA policy */ 886dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8871da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8881da177e4SLinus Torvalds { 8898bccd85fSChristoph Lameter int err; 8901da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8911da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8923b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 8931da177e4SLinus Torvalds 894754af6f5SLee Schermerhorn if (flags & 895754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8961da177e4SLinus Torvalds return -EINVAL; 897754af6f5SLee Schermerhorn 898754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 899754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 900754af6f5SLee Schermerhorn return -EINVAL; 901754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 90258568d2aSMiao Xie task_lock(current); 903754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 90458568d2aSMiao Xie task_unlock(current); 905754af6f5SLee Schermerhorn return 0; 906754af6f5SLee Schermerhorn } 907754af6f5SLee Schermerhorn 9081da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 909bea904d5SLee Schermerhorn /* 910bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 911bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 912bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 913bea904d5SLee Schermerhorn */ 9141da177e4SLinus Torvalds down_read(&mm->mmap_sem); 9151da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 9161da177e4SLinus Torvalds if (!vma) { 9171da177e4SLinus Torvalds up_read(&mm->mmap_sem); 9181da177e4SLinus Torvalds return -EFAULT; 9191da177e4SLinus Torvalds } 9201da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9211da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9221da177e4SLinus Torvalds else 9231da177e4SLinus Torvalds pol = vma->vm_policy; 9241da177e4SLinus Torvalds } else if (addr) 9251da177e4SLinus Torvalds return -EINVAL; 9261da177e4SLinus Torvalds 9271da177e4SLinus Torvalds if (!pol) 928bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9291da177e4SLinus Torvalds 9301da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9311da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9323b9aadf7SAndrea Arcangeli /* 9333b9aadf7SAndrea Arcangeli * Take a refcount on the mpol, lookup_node() 9343b9aadf7SAndrea Arcangeli * wil drop the mmap_sem, so after calling 9353b9aadf7SAndrea Arcangeli * lookup_node() only "pol" remains valid, "vma" 9363b9aadf7SAndrea Arcangeli * is stale. 9373b9aadf7SAndrea Arcangeli */ 9383b9aadf7SAndrea Arcangeli pol_refcount = pol; 9393b9aadf7SAndrea Arcangeli vma = NULL; 9403b9aadf7SAndrea Arcangeli mpol_get(pol); 9413b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9421da177e4SLinus Torvalds if (err < 0) 9431da177e4SLinus Torvalds goto out; 9448bccd85fSChristoph Lameter *policy = err; 9451da177e4SLinus Torvalds } else if (pol == current->mempolicy && 94645c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 94745816682SVlastimil Babka *policy = next_node_in(current->il_prev, pol->v.nodes); 9481da177e4SLinus Torvalds } else { 9491da177e4SLinus Torvalds err = -EINVAL; 9501da177e4SLinus Torvalds goto out; 9511da177e4SLinus Torvalds } 952bea904d5SLee Schermerhorn } else { 953bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 954bea904d5SLee Schermerhorn pol->mode; 955d79df630SDavid Rientjes /* 956d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 957d79df630SDavid Rientjes * the policy to userspace. 958d79df630SDavid Rientjes */ 959d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 960bea904d5SLee Schermerhorn } 9611da177e4SLinus Torvalds 9621da177e4SLinus Torvalds err = 0; 96358568d2aSMiao Xie if (nmask) { 964c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 965c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 966c6b6ef8bSLee Schermerhorn } else { 96758568d2aSMiao Xie task_lock(current); 968bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 96958568d2aSMiao Xie task_unlock(current); 97058568d2aSMiao Xie } 971c6b6ef8bSLee Schermerhorn } 9721da177e4SLinus Torvalds 9731da177e4SLinus Torvalds out: 97452cd3b07SLee Schermerhorn mpol_cond_put(pol); 9751da177e4SLinus Torvalds if (vma) 9763b9aadf7SAndrea Arcangeli up_read(&mm->mmap_sem); 9773b9aadf7SAndrea Arcangeli if (pol_refcount) 9783b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 9791da177e4SLinus Torvalds return err; 9801da177e4SLinus Torvalds } 9811da177e4SLinus Torvalds 982b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 9838bccd85fSChristoph Lameter /* 984c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 9856ce3c4c0SChristoph Lameter */ 986a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 987fc301289SChristoph Lameter unsigned long flags) 9886ce3c4c0SChristoph Lameter { 989c8633798SNaoya Horiguchi struct page *head = compound_head(page); 9906ce3c4c0SChristoph Lameter /* 991fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9926ce3c4c0SChristoph Lameter */ 993c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 994c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 995c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 996c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 997c8633798SNaoya Horiguchi NR_ISOLATED_ANON + page_is_file_cache(head), 998c8633798SNaoya Horiguchi hpage_nr_pages(head)); 999a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1000a53190a4SYang Shi /* 1001a53190a4SYang Shi * Non-movable page may reach here. And, there may be 1002a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 1003a53190a4SYang Shi * Treat them as unmovable pages since they can't be 1004a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1005a53190a4SYang Shi * should return -EIO for this case too. 1006a53190a4SYang Shi */ 1007a53190a4SYang Shi return -EIO; 100862695a84SNick Piggin } 100962695a84SNick Piggin } 1010a53190a4SYang Shi 1011a53190a4SYang Shi return 0; 10126ce3c4c0SChristoph Lameter } 10136ce3c4c0SChristoph Lameter 1014a49bd4d7SMichal Hocko /* page allocation callback for NUMA node migration */ 1015666feb21SMichal Hocko struct page *alloc_new_node_page(struct page *page, unsigned long node) 101695a402c3SChristoph Lameter { 1017e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 1018e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 1019e2d8cf40SNaoya Horiguchi node); 102094723aafSMichal Hocko else if (PageTransHuge(page)) { 1021c8633798SNaoya Horiguchi struct page *thp; 1022c8633798SNaoya Horiguchi 1023c8633798SNaoya Horiguchi thp = alloc_pages_node(node, 1024c8633798SNaoya Horiguchi (GFP_TRANSHUGE | __GFP_THISNODE), 1025c8633798SNaoya Horiguchi HPAGE_PMD_ORDER); 1026c8633798SNaoya Horiguchi if (!thp) 1027c8633798SNaoya Horiguchi return NULL; 1028c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1029c8633798SNaoya Horiguchi return thp; 1030c8633798SNaoya Horiguchi } else 103196db800fSVlastimil Babka return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | 1032b360edb4SDavid Rientjes __GFP_THISNODE, 0); 103395a402c3SChristoph Lameter } 103495a402c3SChristoph Lameter 10356ce3c4c0SChristoph Lameter /* 10367e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10377e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10387e2ab150SChristoph Lameter */ 1039dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1040dbcb0f19SAdrian Bunk int flags) 10417e2ab150SChristoph Lameter { 10427e2ab150SChristoph Lameter nodemask_t nmask; 10437e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10447e2ab150SChristoph Lameter int err = 0; 10457e2ab150SChristoph Lameter 10467e2ab150SChristoph Lameter nodes_clear(nmask); 10477e2ab150SChristoph Lameter node_set(source, nmask); 10487e2ab150SChristoph Lameter 104908270807SMinchan Kim /* 105008270807SMinchan Kim * This does not "check" the range but isolates all pages that 105108270807SMinchan Kim * need migration. Between passing in the full user address 105208270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 105308270807SMinchan Kim */ 105408270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 105598094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10567e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10577e2ab150SChristoph Lameter 1058cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1059a49bd4d7SMichal Hocko err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest, 10609c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 1061cf608ac1SMinchan Kim if (err) 1062e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1063cf608ac1SMinchan Kim } 106495a402c3SChristoph Lameter 10657e2ab150SChristoph Lameter return err; 10667e2ab150SChristoph Lameter } 10677e2ab150SChristoph Lameter 10687e2ab150SChristoph Lameter /* 10697e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10707e2ab150SChristoph Lameter * layout as much as possible. 107139743889SChristoph Lameter * 107239743889SChristoph Lameter * Returns the number of page that could not be moved. 107339743889SChristoph Lameter */ 10740ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10750ce72d4fSAndrew Morton const nodemask_t *to, int flags) 107639743889SChristoph Lameter { 10777e2ab150SChristoph Lameter int busy = 0; 10780aedadf9SChristoph Lameter int err; 10797e2ab150SChristoph Lameter nodemask_t tmp; 108039743889SChristoph Lameter 10810aedadf9SChristoph Lameter err = migrate_prep(); 10820aedadf9SChristoph Lameter if (err) 10830aedadf9SChristoph Lameter return err; 10840aedadf9SChristoph Lameter 108539743889SChristoph Lameter down_read(&mm->mmap_sem); 1086d4984711SChristoph Lameter 10877e2ab150SChristoph Lameter /* 10887e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 10897e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 10907e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 10917e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 10927e2ab150SChristoph Lameter * 10937e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 10947e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 10957e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 10967e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 10977e2ab150SChristoph Lameter * 10987e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 10997e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11007e2ab150SChristoph Lameter * (nothing left to migrate). 11017e2ab150SChristoph Lameter * 11027e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11037e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11047e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11057e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11067e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11077e2ab150SChristoph Lameter * 11087e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11097e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11107e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11117e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1112ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11137e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11147e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11157e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11167e2ab150SChristoph Lameter */ 11177e2ab150SChristoph Lameter 11180ce72d4fSAndrew Morton tmp = *from; 11197e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11207e2ab150SChristoph Lameter int s,d; 1121b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11227e2ab150SChristoph Lameter int dest = 0; 11237e2ab150SChristoph Lameter 11247e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11254a5b18ccSLarry Woodman 11264a5b18ccSLarry Woodman /* 11274a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11284a5b18ccSLarry Woodman * node relationship of the pages established between 11294a5b18ccSLarry Woodman * threads and memory areas. 11304a5b18ccSLarry Woodman * 11314a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11324a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11334a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11344a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11354a5b18ccSLarry Woodman * mask. 11364a5b18ccSLarry Woodman * 11374a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11384a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11394a5b18ccSLarry Woodman */ 11404a5b18ccSLarry Woodman 11410ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11420ce72d4fSAndrew Morton (node_isset(s, *to))) 11434a5b18ccSLarry Woodman continue; 11444a5b18ccSLarry Woodman 11450ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11467e2ab150SChristoph Lameter if (s == d) 11477e2ab150SChristoph Lameter continue; 11487e2ab150SChristoph Lameter 11497e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11507e2ab150SChristoph Lameter dest = d; 11517e2ab150SChristoph Lameter 11527e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11537e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11547e2ab150SChristoph Lameter break; 11557e2ab150SChristoph Lameter } 1156b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11577e2ab150SChristoph Lameter break; 11587e2ab150SChristoph Lameter 11597e2ab150SChristoph Lameter node_clear(source, tmp); 11607e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11617e2ab150SChristoph Lameter if (err > 0) 11627e2ab150SChristoph Lameter busy += err; 11637e2ab150SChristoph Lameter if (err < 0) 11647e2ab150SChristoph Lameter break; 116539743889SChristoph Lameter } 116639743889SChristoph Lameter up_read(&mm->mmap_sem); 11677e2ab150SChristoph Lameter if (err < 0) 11687e2ab150SChristoph Lameter return err; 11697e2ab150SChristoph Lameter return busy; 1170b20a3503SChristoph Lameter 117139743889SChristoph Lameter } 117239743889SChristoph Lameter 11733ad33b24SLee Schermerhorn /* 11743ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1175d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 11763ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11773ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11783ad33b24SLee Schermerhorn * is in virtual address order. 11793ad33b24SLee Schermerhorn */ 1180666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 118195a402c3SChristoph Lameter { 1182d05f0cdcSHugh Dickins struct vm_area_struct *vma; 11833ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 118495a402c3SChristoph Lameter 1185d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 11863ad33b24SLee Schermerhorn while (vma) { 11873ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 11883ad33b24SLee Schermerhorn if (address != -EFAULT) 11893ad33b24SLee Schermerhorn break; 11903ad33b24SLee Schermerhorn vma = vma->vm_next; 11913ad33b24SLee Schermerhorn } 11923ad33b24SLee Schermerhorn 119311c731e8SWanpeng Li if (PageHuge(page)) { 1194389c8178SMichal Hocko return alloc_huge_page_vma(page_hstate(compound_head(page)), 1195389c8178SMichal Hocko vma, address); 119694723aafSMichal Hocko } else if (PageTransHuge(page)) { 1197c8633798SNaoya Horiguchi struct page *thp; 1198c8633798SNaoya Horiguchi 119919deb769SDavid Rientjes thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 120019deb769SDavid Rientjes HPAGE_PMD_ORDER); 1201c8633798SNaoya Horiguchi if (!thp) 1202c8633798SNaoya Horiguchi return NULL; 1203c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1204c8633798SNaoya Horiguchi return thp; 120511c731e8SWanpeng Li } 120611c731e8SWanpeng Li /* 120711c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 120811c731e8SWanpeng Li */ 12090f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 12100f556856SMichal Hocko vma, address); 121195a402c3SChristoph Lameter } 1212b20a3503SChristoph Lameter #else 1213b20a3503SChristoph Lameter 1214a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1215b20a3503SChristoph Lameter unsigned long flags) 1216b20a3503SChristoph Lameter { 1217a53190a4SYang Shi return -EIO; 1218b20a3503SChristoph Lameter } 1219b20a3503SChristoph Lameter 12200ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12210ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1222b20a3503SChristoph Lameter { 1223b20a3503SChristoph Lameter return -ENOSYS; 1224b20a3503SChristoph Lameter } 122595a402c3SChristoph Lameter 1226666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 122795a402c3SChristoph Lameter { 122895a402c3SChristoph Lameter return NULL; 122995a402c3SChristoph Lameter } 1230b20a3503SChristoph Lameter #endif 1231b20a3503SChristoph Lameter 1232dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1233028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1234028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12356ce3c4c0SChristoph Lameter { 12366ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12376ce3c4c0SChristoph Lameter struct mempolicy *new; 12386ce3c4c0SChristoph Lameter unsigned long end; 12396ce3c4c0SChristoph Lameter int err; 1240d8835445SYang Shi int ret; 12416ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12426ce3c4c0SChristoph Lameter 1243b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12446ce3c4c0SChristoph Lameter return -EINVAL; 124574c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12466ce3c4c0SChristoph Lameter return -EPERM; 12476ce3c4c0SChristoph Lameter 12486ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12496ce3c4c0SChristoph Lameter return -EINVAL; 12506ce3c4c0SChristoph Lameter 12516ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12526ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12536ce3c4c0SChristoph Lameter 12546ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12556ce3c4c0SChristoph Lameter end = start + len; 12566ce3c4c0SChristoph Lameter 12576ce3c4c0SChristoph Lameter if (end < start) 12586ce3c4c0SChristoph Lameter return -EINVAL; 12596ce3c4c0SChristoph Lameter if (end == start) 12606ce3c4c0SChristoph Lameter return 0; 12616ce3c4c0SChristoph Lameter 1262028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12636ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12646ce3c4c0SChristoph Lameter return PTR_ERR(new); 12656ce3c4c0SChristoph Lameter 1266b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1267b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1268b24f53a0SLee Schermerhorn 12696ce3c4c0SChristoph Lameter /* 12706ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12716ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12726ce3c4c0SChristoph Lameter */ 12736ce3c4c0SChristoph Lameter if (!new) 12746ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12756ce3c4c0SChristoph Lameter 1276028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1277028fec41SDavid Rientjes start, start + len, mode, mode_flags, 127800ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12796ce3c4c0SChristoph Lameter 12800aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12810aedadf9SChristoph Lameter 12820aedadf9SChristoph Lameter err = migrate_prep(); 12830aedadf9SChristoph Lameter if (err) 1284b05ca738SKOSAKI Motohiro goto mpol_out; 12850aedadf9SChristoph Lameter } 12864bfc4495SKAMEZAWA Hiroyuki { 12874bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 12884bfc4495SKAMEZAWA Hiroyuki if (scratch) { 12896ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 129058568d2aSMiao Xie task_lock(current); 12914bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 129258568d2aSMiao Xie task_unlock(current); 12934bfc4495SKAMEZAWA Hiroyuki if (err) 129458568d2aSMiao Xie up_write(&mm->mmap_sem); 12954bfc4495SKAMEZAWA Hiroyuki } else 12964bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 12974bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 12984bfc4495SKAMEZAWA Hiroyuki } 1299b05ca738SKOSAKI Motohiro if (err) 1300b05ca738SKOSAKI Motohiro goto mpol_out; 1301b05ca738SKOSAKI Motohiro 1302d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13036ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1304d8835445SYang Shi 1305d8835445SYang Shi if (ret < 0) { 1306a85dfc30SYang Shi err = ret; 1307d8835445SYang Shi goto up_out; 1308d8835445SYang Shi } 1309d8835445SYang Shi 13109d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13117e2ab150SChristoph Lameter 1312b24f53a0SLee Schermerhorn if (!err) { 1313b24f53a0SLee Schermerhorn int nr_failed = 0; 1314b24f53a0SLee Schermerhorn 1315cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1316b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1317d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1318d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1319cf608ac1SMinchan Kim if (nr_failed) 132074060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1321cf608ac1SMinchan Kim } 13226ce3c4c0SChristoph Lameter 1323d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13246ce3c4c0SChristoph Lameter err = -EIO; 1325a85dfc30SYang Shi } else { 1326d8835445SYang Shi up_out: 1327a85dfc30SYang Shi if (!list_empty(&pagelist)) 1328a85dfc30SYang Shi putback_movable_pages(&pagelist); 1329a85dfc30SYang Shi } 1330a85dfc30SYang Shi 13316ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1332b05ca738SKOSAKI Motohiro mpol_out: 1333f0be3d32SLee Schermerhorn mpol_put(new); 13346ce3c4c0SChristoph Lameter return err; 13356ce3c4c0SChristoph Lameter } 13366ce3c4c0SChristoph Lameter 133739743889SChristoph Lameter /* 13388bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13398bccd85fSChristoph Lameter */ 13408bccd85fSChristoph Lameter 13418bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 134239743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13438bccd85fSChristoph Lameter unsigned long maxnode) 13448bccd85fSChristoph Lameter { 13458bccd85fSChristoph Lameter unsigned long k; 134656521e7aSYisheng Xie unsigned long t; 13478bccd85fSChristoph Lameter unsigned long nlongs; 13488bccd85fSChristoph Lameter unsigned long endmask; 13498bccd85fSChristoph Lameter 13508bccd85fSChristoph Lameter --maxnode; 13518bccd85fSChristoph Lameter nodes_clear(*nodes); 13528bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13538bccd85fSChristoph Lameter return 0; 1354a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1355636f13c1SChris Wright return -EINVAL; 13568bccd85fSChristoph Lameter 13578bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 13588bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 13598bccd85fSChristoph Lameter endmask = ~0UL; 13608bccd85fSChristoph Lameter else 13618bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 13628bccd85fSChristoph Lameter 136356521e7aSYisheng Xie /* 136456521e7aSYisheng Xie * When the user specified more nodes than supported just check 136556521e7aSYisheng Xie * if the non supported part is all zero. 136656521e7aSYisheng Xie * 136756521e7aSYisheng Xie * If maxnode have more longs than MAX_NUMNODES, check 136856521e7aSYisheng Xie * the bits in that area first. And then go through to 136956521e7aSYisheng Xie * check the rest bits which equal or bigger than MAX_NUMNODES. 137056521e7aSYisheng Xie * Otherwise, just check bits [MAX_NUMNODES, maxnode). 137156521e7aSYisheng Xie */ 13728bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 13738bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 13748bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 13758bccd85fSChristoph Lameter return -EFAULT; 13768bccd85fSChristoph Lameter if (k == nlongs - 1) { 13778bccd85fSChristoph Lameter if (t & endmask) 13788bccd85fSChristoph Lameter return -EINVAL; 13798bccd85fSChristoph Lameter } else if (t) 13808bccd85fSChristoph Lameter return -EINVAL; 13818bccd85fSChristoph Lameter } 13828bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 13838bccd85fSChristoph Lameter endmask = ~0UL; 13848bccd85fSChristoph Lameter } 13858bccd85fSChristoph Lameter 138656521e7aSYisheng Xie if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { 138756521e7aSYisheng Xie unsigned long valid_mask = endmask; 138856521e7aSYisheng Xie 138956521e7aSYisheng Xie valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 139056521e7aSYisheng Xie if (get_user(t, nmask + nlongs - 1)) 139156521e7aSYisheng Xie return -EFAULT; 139256521e7aSYisheng Xie if (t & valid_mask) 139356521e7aSYisheng Xie return -EINVAL; 139456521e7aSYisheng Xie } 139556521e7aSYisheng Xie 13968bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 13978bccd85fSChristoph Lameter return -EFAULT; 13988bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 13998bccd85fSChristoph Lameter return 0; 14008bccd85fSChristoph Lameter } 14018bccd85fSChristoph Lameter 14028bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14038bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14048bccd85fSChristoph Lameter nodemask_t *nodes) 14058bccd85fSChristoph Lameter { 14068bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1407050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 14088bccd85fSChristoph Lameter 14098bccd85fSChristoph Lameter if (copy > nbytes) { 14108bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14118bccd85fSChristoph Lameter return -EINVAL; 14128bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14138bccd85fSChristoph Lameter return -EFAULT; 14148bccd85fSChristoph Lameter copy = nbytes; 14158bccd85fSChristoph Lameter } 14168bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14178bccd85fSChristoph Lameter } 14188bccd85fSChristoph Lameter 1419e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1420e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1421e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14228bccd85fSChristoph Lameter { 14238bccd85fSChristoph Lameter nodemask_t nodes; 14248bccd85fSChristoph Lameter int err; 1425028fec41SDavid Rientjes unsigned short mode_flags; 14268bccd85fSChristoph Lameter 1427057d3389SAndrey Konovalov start = untagged_addr(start); 1428028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1429028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1430a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1431a3b51e01SDavid Rientjes return -EINVAL; 14324c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 14334c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 14344c50bc01SDavid Rientjes return -EINVAL; 14358bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14368bccd85fSChristoph Lameter if (err) 14378bccd85fSChristoph Lameter return err; 1438028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 14398bccd85fSChristoph Lameter } 14408bccd85fSChristoph Lameter 1441e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1442e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1443e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1444e7dc9ad6SDominik Brodowski { 1445e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1446e7dc9ad6SDominik Brodowski } 1447e7dc9ad6SDominik Brodowski 14488bccd85fSChristoph Lameter /* Set the process memory policy */ 1449af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1450af03c4acSDominik Brodowski unsigned long maxnode) 14518bccd85fSChristoph Lameter { 14528bccd85fSChristoph Lameter int err; 14538bccd85fSChristoph Lameter nodemask_t nodes; 1454028fec41SDavid Rientjes unsigned short flags; 14558bccd85fSChristoph Lameter 1456028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1457028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1458028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 14598bccd85fSChristoph Lameter return -EINVAL; 14604c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 14614c50bc01SDavid Rientjes return -EINVAL; 14628bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14638bccd85fSChristoph Lameter if (err) 14648bccd85fSChristoph Lameter return err; 1465028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 14668bccd85fSChristoph Lameter } 14678bccd85fSChristoph Lameter 1468af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1469af03c4acSDominik Brodowski unsigned long, maxnode) 1470af03c4acSDominik Brodowski { 1471af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1472af03c4acSDominik Brodowski } 1473af03c4acSDominik Brodowski 1474b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1475b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1476b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 147739743889SChristoph Lameter { 1478596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 147939743889SChristoph Lameter struct task_struct *task; 148039743889SChristoph Lameter nodemask_t task_nodes; 148139743889SChristoph Lameter int err; 1482596d7cfaSKOSAKI Motohiro nodemask_t *old; 1483596d7cfaSKOSAKI Motohiro nodemask_t *new; 1484596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 148539743889SChristoph Lameter 1486596d7cfaSKOSAKI Motohiro if (!scratch) 1487596d7cfaSKOSAKI Motohiro return -ENOMEM; 148839743889SChristoph Lameter 1489596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1490596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1491596d7cfaSKOSAKI Motohiro 1492596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 149339743889SChristoph Lameter if (err) 1494596d7cfaSKOSAKI Motohiro goto out; 1495596d7cfaSKOSAKI Motohiro 1496596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1497596d7cfaSKOSAKI Motohiro if (err) 1498596d7cfaSKOSAKI Motohiro goto out; 149939743889SChristoph Lameter 150039743889SChristoph Lameter /* Find the mm_struct */ 150155cfaa3cSZeng Zhaoming rcu_read_lock(); 1502228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 150339743889SChristoph Lameter if (!task) { 150455cfaa3cSZeng Zhaoming rcu_read_unlock(); 1505596d7cfaSKOSAKI Motohiro err = -ESRCH; 1506596d7cfaSKOSAKI Motohiro goto out; 150739743889SChristoph Lameter } 15083268c63eSChristoph Lameter get_task_struct(task); 150939743889SChristoph Lameter 1510596d7cfaSKOSAKI Motohiro err = -EINVAL; 151139743889SChristoph Lameter 151239743889SChristoph Lameter /* 151331367466SOtto Ebeling * Check if this process has the right to modify the specified process. 151431367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 151539743889SChristoph Lameter */ 151631367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1517c69e8d9cSDavid Howells rcu_read_unlock(); 151839743889SChristoph Lameter err = -EPERM; 15193268c63eSChristoph Lameter goto out_put; 152039743889SChristoph Lameter } 1521c69e8d9cSDavid Howells rcu_read_unlock(); 152239743889SChristoph Lameter 152339743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 152439743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1525596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 152639743889SChristoph Lameter err = -EPERM; 15273268c63eSChristoph Lameter goto out_put; 152839743889SChristoph Lameter } 152939743889SChristoph Lameter 15300486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 15310486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 15320486a38bSYisheng Xie if (nodes_empty(*new)) 15333268c63eSChristoph Lameter goto out_put; 15340486a38bSYisheng Xie 153586c3a764SDavid Quigley err = security_task_movememory(task); 153686c3a764SDavid Quigley if (err) 15373268c63eSChristoph Lameter goto out_put; 153886c3a764SDavid Quigley 15393268c63eSChristoph Lameter mm = get_task_mm(task); 15403268c63eSChristoph Lameter put_task_struct(task); 1541f2a9ef88SSasha Levin 1542f2a9ef88SSasha Levin if (!mm) { 1543f2a9ef88SSasha Levin err = -EINVAL; 1544f2a9ef88SSasha Levin goto out; 1545f2a9ef88SSasha Levin } 1546f2a9ef88SSasha Levin 1547596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 154874c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 15493268c63eSChristoph Lameter 155039743889SChristoph Lameter mmput(mm); 15513268c63eSChristoph Lameter out: 1552596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1553596d7cfaSKOSAKI Motohiro 155439743889SChristoph Lameter return err; 15553268c63eSChristoph Lameter 15563268c63eSChristoph Lameter out_put: 15573268c63eSChristoph Lameter put_task_struct(task); 15583268c63eSChristoph Lameter goto out; 15593268c63eSChristoph Lameter 156039743889SChristoph Lameter } 156139743889SChristoph Lameter 1562b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1563b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1564b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1565b6e9b0baSDominik Brodowski { 1566b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1567b6e9b0baSDominik Brodowski } 1568b6e9b0baSDominik Brodowski 156939743889SChristoph Lameter 15708bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1571af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1572af03c4acSDominik Brodowski unsigned long __user *nmask, 1573af03c4acSDominik Brodowski unsigned long maxnode, 1574af03c4acSDominik Brodowski unsigned long addr, 1575af03c4acSDominik Brodowski unsigned long flags) 15768bccd85fSChristoph Lameter { 1577dbcb0f19SAdrian Bunk int err; 1578dbcb0f19SAdrian Bunk int uninitialized_var(pval); 15798bccd85fSChristoph Lameter nodemask_t nodes; 15808bccd85fSChristoph Lameter 1581057d3389SAndrey Konovalov addr = untagged_addr(addr); 1582057d3389SAndrey Konovalov 1583050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 15848bccd85fSChristoph Lameter return -EINVAL; 15858bccd85fSChristoph Lameter 15868bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 15878bccd85fSChristoph Lameter 15888bccd85fSChristoph Lameter if (err) 15898bccd85fSChristoph Lameter return err; 15908bccd85fSChristoph Lameter 15918bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 15928bccd85fSChristoph Lameter return -EFAULT; 15938bccd85fSChristoph Lameter 15948bccd85fSChristoph Lameter if (nmask) 15958bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 15968bccd85fSChristoph Lameter 15978bccd85fSChristoph Lameter return err; 15988bccd85fSChristoph Lameter } 15998bccd85fSChristoph Lameter 1600af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1601af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1602af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1603af03c4acSDominik Brodowski { 1604af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1605af03c4acSDominik Brodowski } 1606af03c4acSDominik Brodowski 16071da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 16081da177e4SLinus Torvalds 1609c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1610c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1611c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1612c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 16131da177e4SLinus Torvalds { 16141da177e4SLinus Torvalds long err; 16151da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16161da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16171da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16181da177e4SLinus Torvalds 1619050c17f2SRalph Campbell nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); 16201da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16211da177e4SLinus Torvalds 16221da177e4SLinus Torvalds if (nmask) 16231da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 16241da177e4SLinus Torvalds 1625af03c4acSDominik Brodowski err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 16261da177e4SLinus Torvalds 16271da177e4SLinus Torvalds if (!err && nmask) { 16282bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 16292bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 16302bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 16311da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 16321da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 16331da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 16341da177e4SLinus Torvalds } 16351da177e4SLinus Torvalds 16361da177e4SLinus Torvalds return err; 16371da177e4SLinus Torvalds } 16381da177e4SLinus Torvalds 1639c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1640c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 16411da177e4SLinus Torvalds { 16421da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16431da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16441da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16451da177e4SLinus Torvalds 16461da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16471da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16481da177e4SLinus Torvalds 16491da177e4SLinus Torvalds if (nmask) { 1650cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 16511da177e4SLinus Torvalds return -EFAULT; 1652cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1653cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1654cf01fb99SChris Salls return -EFAULT; 1655cf01fb99SChris Salls } 16561da177e4SLinus Torvalds 1657af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nm, nr_bits+1); 16581da177e4SLinus Torvalds } 16591da177e4SLinus Torvalds 1660c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1661c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1662c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 16631da177e4SLinus Torvalds { 16641da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16651da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1666dfcd3c0dSAndi Kleen nodemask_t bm; 16671da177e4SLinus Torvalds 16681da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16691da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16701da177e4SLinus Torvalds 16711da177e4SLinus Torvalds if (nmask) { 1672cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 16731da177e4SLinus Torvalds return -EFAULT; 1674cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1675cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1676cf01fb99SChris Salls return -EFAULT; 1677cf01fb99SChris Salls } 16781da177e4SLinus Torvalds 1679e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); 16801da177e4SLinus Torvalds } 16811da177e4SLinus Torvalds 1682b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, 1683b6e9b0baSDominik Brodowski compat_ulong_t, maxnode, 1684b6e9b0baSDominik Brodowski const compat_ulong_t __user *, old_nodes, 1685b6e9b0baSDominik Brodowski const compat_ulong_t __user *, new_nodes) 1686b6e9b0baSDominik Brodowski { 1687b6e9b0baSDominik Brodowski unsigned long __user *old = NULL; 1688b6e9b0baSDominik Brodowski unsigned long __user *new = NULL; 1689b6e9b0baSDominik Brodowski nodemask_t tmp_mask; 1690b6e9b0baSDominik Brodowski unsigned long nr_bits; 1691b6e9b0baSDominik Brodowski unsigned long size; 1692b6e9b0baSDominik Brodowski 1693b6e9b0baSDominik Brodowski nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1694b6e9b0baSDominik Brodowski size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1695b6e9b0baSDominik Brodowski if (old_nodes) { 1696b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1697b6e9b0baSDominik Brodowski return -EFAULT; 1698b6e9b0baSDominik Brodowski old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1699b6e9b0baSDominik Brodowski if (new_nodes) 1700b6e9b0baSDominik Brodowski new = old + size / sizeof(unsigned long); 1701b6e9b0baSDominik Brodowski if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1702b6e9b0baSDominik Brodowski return -EFAULT; 1703b6e9b0baSDominik Brodowski } 1704b6e9b0baSDominik Brodowski if (new_nodes) { 1705b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1706b6e9b0baSDominik Brodowski return -EFAULT; 1707b6e9b0baSDominik Brodowski if (new == NULL) 1708b6e9b0baSDominik Brodowski new = compat_alloc_user_space(size); 1709b6e9b0baSDominik Brodowski if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1710b6e9b0baSDominik Brodowski return -EFAULT; 1711b6e9b0baSDominik Brodowski } 1712b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, nr_bits + 1, old, new); 1713b6e9b0baSDominik Brodowski } 1714b6e9b0baSDominik Brodowski 1715b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */ 17161da177e4SLinus Torvalds 171774d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 171874d2c3a0SOleg Nesterov unsigned long addr) 17191da177e4SLinus Torvalds { 17208d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17211da177e4SLinus Torvalds 17221da177e4SLinus Torvalds if (vma) { 1723480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17248d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 172500442ad0SMel Gorman } else if (vma->vm_policy) { 17261da177e4SLinus Torvalds pol = vma->vm_policy; 172700442ad0SMel Gorman 172800442ad0SMel Gorman /* 172900442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 173000442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 173100442ad0SMel Gorman * count on these policies which will be dropped by 173200442ad0SMel Gorman * mpol_cond_put() later 173300442ad0SMel Gorman */ 173400442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 173500442ad0SMel Gorman mpol_get(pol); 173600442ad0SMel Gorman } 17371da177e4SLinus Torvalds } 1738f15ca78eSOleg Nesterov 173974d2c3a0SOleg Nesterov return pol; 174074d2c3a0SOleg Nesterov } 174174d2c3a0SOleg Nesterov 174274d2c3a0SOleg Nesterov /* 1743dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 174474d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 174574d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 174674d2c3a0SOleg Nesterov * 174774d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1748dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 174974d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 175074d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 175174d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 175274d2c3a0SOleg Nesterov * extra reference for shared policies. 175374d2c3a0SOleg Nesterov */ 1754ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1755dd6eecb9SOleg Nesterov unsigned long addr) 175674d2c3a0SOleg Nesterov { 175774d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 175874d2c3a0SOleg Nesterov 17598d90274bSOleg Nesterov if (!pol) 1760dd6eecb9SOleg Nesterov pol = get_task_policy(current); 17618d90274bSOleg Nesterov 17621da177e4SLinus Torvalds return pol; 17631da177e4SLinus Torvalds } 17641da177e4SLinus Torvalds 17656b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1766fc314724SMel Gorman { 17676b6482bbSOleg Nesterov struct mempolicy *pol; 1768f15ca78eSOleg Nesterov 1769fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1770fc314724SMel Gorman bool ret = false; 1771fc314724SMel Gorman 1772fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1773fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1774fc314724SMel Gorman ret = true; 1775fc314724SMel Gorman mpol_cond_put(pol); 1776fc314724SMel Gorman 1777fc314724SMel Gorman return ret; 17788d90274bSOleg Nesterov } 17798d90274bSOleg Nesterov 1780fc314724SMel Gorman pol = vma->vm_policy; 17818d90274bSOleg Nesterov if (!pol) 17826b6482bbSOleg Nesterov pol = get_task_policy(current); 1783fc314724SMel Gorman 1784fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1785fc314724SMel Gorman } 1786fc314724SMel Gorman 1787d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1788d3eb1570SLai Jiangshan { 1789d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1790d3eb1570SLai Jiangshan 1791d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1792d3eb1570SLai Jiangshan 1793d3eb1570SLai Jiangshan /* 1794d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1795d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1796d3eb1570SLai Jiangshan * 1797d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1798d3eb1570SLai Jiangshan * so if the following test faile, it implies 1799d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1800d3eb1570SLai Jiangshan */ 1801d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1802d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1803d3eb1570SLai Jiangshan 1804d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1805d3eb1570SLai Jiangshan } 1806d3eb1570SLai Jiangshan 180752cd3b07SLee Schermerhorn /* 180852cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 180952cd3b07SLee Schermerhorn * page allocation 181052cd3b07SLee Schermerhorn */ 181152cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 181219770b32SMel Gorman { 181319770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 181445c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1815d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 181619770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 181719770b32SMel Gorman return &policy->v.nodes; 181819770b32SMel Gorman 181919770b32SMel Gorman return NULL; 182019770b32SMel Gorman } 182119770b32SMel Gorman 182204ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */ 182304ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy, 18242f5f9486SAndi Kleen int nd) 18251da177e4SLinus Torvalds { 18266d840958SMichal Hocko if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) 18271da177e4SLinus Torvalds nd = policy->v.preferred_node; 18286d840958SMichal Hocko else { 182919770b32SMel Gorman /* 18306d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18316d840958SMichal Hocko * because we might easily break the expectation to stay on the 18326d840958SMichal Hocko * requested node and not break the policy. 183319770b32SMel Gorman */ 18346d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 18351da177e4SLinus Torvalds } 18366d840958SMichal Hocko 183704ec6264SVlastimil Babka return nd; 18381da177e4SLinus Torvalds } 18391da177e4SLinus Torvalds 18401da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 18411da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 18421da177e4SLinus Torvalds { 184345816682SVlastimil Babka unsigned next; 18441da177e4SLinus Torvalds struct task_struct *me = current; 18451da177e4SLinus Torvalds 184645816682SVlastimil Babka next = next_node_in(me->il_prev, policy->v.nodes); 1847f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 184845816682SVlastimil Babka me->il_prev = next; 184945816682SVlastimil Babka return next; 18501da177e4SLinus Torvalds } 18511da177e4SLinus Torvalds 1852dc85da15SChristoph Lameter /* 1853dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1854dc85da15SChristoph Lameter * next slab entry. 1855dc85da15SChristoph Lameter */ 18562a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1857dc85da15SChristoph Lameter { 1858e7b691b0SAndi Kleen struct mempolicy *policy; 18592a389610SDavid Rientjes int node = numa_mem_id(); 1860e7b691b0SAndi Kleen 1861e7b691b0SAndi Kleen if (in_interrupt()) 18622a389610SDavid Rientjes return node; 1863e7b691b0SAndi Kleen 1864e7b691b0SAndi Kleen policy = current->mempolicy; 1865fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 18662a389610SDavid Rientjes return node; 1867765c4507SChristoph Lameter 1868bea904d5SLee Schermerhorn switch (policy->mode) { 1869bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1870fc36b8d3SLee Schermerhorn /* 1871fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1872fc36b8d3SLee Schermerhorn */ 1873bea904d5SLee Schermerhorn return policy->v.preferred_node; 1874bea904d5SLee Schermerhorn 1875dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1876dc85da15SChristoph Lameter return interleave_nodes(policy); 1877dc85da15SChristoph Lameter 1878dd1a239fSMel Gorman case MPOL_BIND: { 1879c33d6c06SMel Gorman struct zoneref *z; 1880c33d6c06SMel Gorman 1881dc85da15SChristoph Lameter /* 1882dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1883dc85da15SChristoph Lameter * first node. 1884dc85da15SChristoph Lameter */ 188519770b32SMel Gorman struct zonelist *zonelist; 188619770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1887c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1888c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1889c33d6c06SMel Gorman &policy->v.nodes); 1890c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1891dd1a239fSMel Gorman } 1892dc85da15SChristoph Lameter 1893dc85da15SChristoph Lameter default: 1894bea904d5SLee Schermerhorn BUG(); 1895dc85da15SChristoph Lameter } 1896dc85da15SChristoph Lameter } 1897dc85da15SChristoph Lameter 1898fee83b3aSAndrew Morton /* 1899fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1900fee83b3aSAndrew Morton * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1901fee83b3aSAndrew Morton * number of present nodes. 1902fee83b3aSAndrew Morton */ 190398c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19041da177e4SLinus Torvalds { 1905dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1906f5b087b5SDavid Rientjes unsigned target; 1907fee83b3aSAndrew Morton int i; 1908fee83b3aSAndrew Morton int nid; 19091da177e4SLinus Torvalds 1910f5b087b5SDavid Rientjes if (!nnodes) 1911f5b087b5SDavid Rientjes return numa_node_id(); 1912fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1913fee83b3aSAndrew Morton nid = first_node(pol->v.nodes); 1914fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1915dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 19161da177e4SLinus Torvalds return nid; 19171da177e4SLinus Torvalds } 19181da177e4SLinus Torvalds 19195da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19205da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19215da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19225da7ca86SChristoph Lameter { 19235da7ca86SChristoph Lameter if (vma) { 19245da7ca86SChristoph Lameter unsigned long off; 19255da7ca86SChristoph Lameter 19263b98b087SNishanth Aravamudan /* 19273b98b087SNishanth Aravamudan * for small pages, there is no difference between 19283b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 19293b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 19303b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 19313b98b087SNishanth Aravamudan * a useful offset. 19323b98b087SNishanth Aravamudan */ 19333b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 19343b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 19355da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 193698c70baaSLaurent Dufour return offset_il_node(pol, off); 19375da7ca86SChristoph Lameter } else 19385da7ca86SChristoph Lameter return interleave_nodes(pol); 19395da7ca86SChristoph Lameter } 19405da7ca86SChristoph Lameter 194100ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1942480eccf9SLee Schermerhorn /* 194304ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 1944b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1945b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1946b46e14acSFabian Frederick * @gfp_flags: for requested zone 1947b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1948b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1949480eccf9SLee Schermerhorn * 195004ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 195152cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 195252cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 195352cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1954c0ff7453SMiao Xie * 1955d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 1956480eccf9SLee Schermerhorn */ 195704ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 195804ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 19595da7ca86SChristoph Lameter { 196004ec6264SVlastimil Babka int nid; 19615da7ca86SChristoph Lameter 1962dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 196319770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 19645da7ca86SChristoph Lameter 196552cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 196604ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 196704ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 196852cd3b07SLee Schermerhorn } else { 196904ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 197052cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 197152cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1972480eccf9SLee Schermerhorn } 197304ec6264SVlastimil Babka return nid; 19745da7ca86SChristoph Lameter } 197506808b08SLee Schermerhorn 197606808b08SLee Schermerhorn /* 197706808b08SLee Schermerhorn * init_nodemask_of_mempolicy 197806808b08SLee Schermerhorn * 197906808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 198006808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 198106808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 198206808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 198306808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 198406808b08SLee Schermerhorn * of non-default mempolicy. 198506808b08SLee Schermerhorn * 198606808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 198706808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 198806808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 198906808b08SLee Schermerhorn * 199006808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 199106808b08SLee Schermerhorn */ 199206808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 199306808b08SLee Schermerhorn { 199406808b08SLee Schermerhorn struct mempolicy *mempolicy; 199506808b08SLee Schermerhorn int nid; 199606808b08SLee Schermerhorn 199706808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 199806808b08SLee Schermerhorn return false; 199906808b08SLee Schermerhorn 2000c0ff7453SMiao Xie task_lock(current); 200106808b08SLee Schermerhorn mempolicy = current->mempolicy; 200206808b08SLee Schermerhorn switch (mempolicy->mode) { 200306808b08SLee Schermerhorn case MPOL_PREFERRED: 200406808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 200506808b08SLee Schermerhorn nid = numa_node_id(); 200606808b08SLee Schermerhorn else 200706808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 200806808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 200906808b08SLee Schermerhorn break; 201006808b08SLee Schermerhorn 201106808b08SLee Schermerhorn case MPOL_BIND: 201206808b08SLee Schermerhorn /* Fall through */ 201306808b08SLee Schermerhorn case MPOL_INTERLEAVE: 201406808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 201506808b08SLee Schermerhorn break; 201606808b08SLee Schermerhorn 201706808b08SLee Schermerhorn default: 201806808b08SLee Schermerhorn BUG(); 201906808b08SLee Schermerhorn } 2020c0ff7453SMiao Xie task_unlock(current); 202106808b08SLee Schermerhorn 202206808b08SLee Schermerhorn return true; 202306808b08SLee Schermerhorn } 202400ac59adSChen, Kenneth W #endif 20255da7ca86SChristoph Lameter 20266f48d0ebSDavid Rientjes /* 20276f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 20286f48d0ebSDavid Rientjes * 20296f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 20306f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 20316f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 20326f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 20336f48d0ebSDavid Rientjes * 20346f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 20356f48d0ebSDavid Rientjes */ 20366f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 20376f48d0ebSDavid Rientjes const nodemask_t *mask) 20386f48d0ebSDavid Rientjes { 20396f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 20406f48d0ebSDavid Rientjes bool ret = true; 20416f48d0ebSDavid Rientjes 20426f48d0ebSDavid Rientjes if (!mask) 20436f48d0ebSDavid Rientjes return ret; 20446f48d0ebSDavid Rientjes task_lock(tsk); 20456f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 20466f48d0ebSDavid Rientjes if (!mempolicy) 20476f48d0ebSDavid Rientjes goto out; 20486f48d0ebSDavid Rientjes 20496f48d0ebSDavid Rientjes switch (mempolicy->mode) { 20506f48d0ebSDavid Rientjes case MPOL_PREFERRED: 20516f48d0ebSDavid Rientjes /* 20526f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 20536f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 20546f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 20556f48d0ebSDavid Rientjes * nodes in mask. 20566f48d0ebSDavid Rientjes */ 20576f48d0ebSDavid Rientjes break; 20586f48d0ebSDavid Rientjes case MPOL_BIND: 20596f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 20606f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 20616f48d0ebSDavid Rientjes break; 20626f48d0ebSDavid Rientjes default: 20636f48d0ebSDavid Rientjes BUG(); 20646f48d0ebSDavid Rientjes } 20656f48d0ebSDavid Rientjes out: 20666f48d0ebSDavid Rientjes task_unlock(tsk); 20676f48d0ebSDavid Rientjes return ret; 20686f48d0ebSDavid Rientjes } 20696f48d0ebSDavid Rientjes 20701da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 20711da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2072662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2073662f3a0bSAndi Kleen unsigned nid) 20741da177e4SLinus Torvalds { 20751da177e4SLinus Torvalds struct page *page; 20761da177e4SLinus Torvalds 207704ec6264SVlastimil Babka page = __alloc_pages(gfp, order, nid); 20784518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 20794518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 20804518085eSKemi Wang return page; 2081de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2082de55c8b2SAndrey Ryabinin preempt_disable(); 2083de55c8b2SAndrey Ryabinin __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT); 2084de55c8b2SAndrey Ryabinin preempt_enable(); 2085de55c8b2SAndrey Ryabinin } 20861da177e4SLinus Torvalds return page; 20871da177e4SLinus Torvalds } 20881da177e4SLinus Torvalds 20891da177e4SLinus Torvalds /** 20900bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 20911da177e4SLinus Torvalds * 20921da177e4SLinus Torvalds * @gfp: 20931da177e4SLinus Torvalds * %GFP_USER user allocation. 20941da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 20951da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 20961da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 20971da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 20981da177e4SLinus Torvalds * 20990bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 21001da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 21011da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 2102be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 210319deb769SDavid Rientjes * @hugepage: for hugepages try only the preferred node if possible 21041da177e4SLinus Torvalds * 21051da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 21061da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 21071da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 21081da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 2109be97a41bSVlastimil Babka * all allocations for pages that will be mapped into user space. Returns 2110be97a41bSVlastimil Babka * NULL when no page can be allocated. 21111da177e4SLinus Torvalds */ 21121da177e4SLinus Torvalds struct page * 21130bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 211419deb769SDavid Rientjes unsigned long addr, int node, bool hugepage) 21151da177e4SLinus Torvalds { 2116cc9a6c87SMel Gorman struct mempolicy *pol; 2117c0ff7453SMiao Xie struct page *page; 211804ec6264SVlastimil Babka int preferred_nid; 2119be97a41bSVlastimil Babka nodemask_t *nmask; 21201da177e4SLinus Torvalds 2121dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2122cc9a6c87SMel Gorman 2123be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 21241da177e4SLinus Torvalds unsigned nid; 21255da7ca86SChristoph Lameter 21268eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 212752cd3b07SLee Schermerhorn mpol_cond_put(pol); 21280bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2129be97a41bSVlastimil Babka goto out; 21301da177e4SLinus Torvalds } 21311da177e4SLinus Torvalds 213219deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 213319deb769SDavid Rientjes int hpage_node = node; 213419deb769SDavid Rientjes 213519deb769SDavid Rientjes /* 213619deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 213719deb769SDavid Rientjes * allows the current node (or other explicitly preferred 213819deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 213919deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 214019deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 214119deb769SDavid Rientjes * 214219deb769SDavid Rientjes * If the policy is interleave, or does not allow the current 214319deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 214419deb769SDavid Rientjes */ 214519deb769SDavid Rientjes if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) 214619deb769SDavid Rientjes hpage_node = pol->v.preferred_node; 214719deb769SDavid Rientjes 214819deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 214919deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 215019deb769SDavid Rientjes mpol_cond_put(pol); 215119deb769SDavid Rientjes page = __alloc_pages_node(hpage_node, 215219deb769SDavid Rientjes gfp | __GFP_THISNODE, order); 215376e654ccSDavid Rientjes 215476e654ccSDavid Rientjes /* 215576e654ccSDavid Rientjes * If hugepage allocations are configured to always 215676e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 215776e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 215876e654ccSDavid Rientjes * memory as well. 215976e654ccSDavid Rientjes */ 216076e654ccSDavid Rientjes if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 216176e654ccSDavid Rientjes page = __alloc_pages_node(hpage_node, 216276e654ccSDavid Rientjes gfp | __GFP_NORETRY, order); 216376e654ccSDavid Rientjes 216419deb769SDavid Rientjes goto out; 216519deb769SDavid Rientjes } 216619deb769SDavid Rientjes } 216719deb769SDavid Rientjes 2168077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 216904ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 217004ec6264SVlastimil Babka page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); 2171d51e9894SVlastimil Babka mpol_cond_put(pol); 2172be97a41bSVlastimil Babka out: 2173077fcf11SAneesh Kumar K.V return page; 2174077fcf11SAneesh Kumar K.V } 217569262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma); 2176077fcf11SAneesh Kumar K.V 21771da177e4SLinus Torvalds /** 21781da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 21791da177e4SLinus Torvalds * 21801da177e4SLinus Torvalds * @gfp: 21811da177e4SLinus Torvalds * %GFP_USER user allocation, 21821da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 21831da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 21841da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 21851da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 21861da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 21871da177e4SLinus Torvalds * 21881da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 21891da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 21901da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 21911da177e4SLinus Torvalds */ 2192dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 21931da177e4SLinus Torvalds { 21948d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2195c0ff7453SMiao Xie struct page *page; 21961da177e4SLinus Torvalds 21978d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 21988d90274bSOleg Nesterov pol = get_task_policy(current); 219952cd3b07SLee Schermerhorn 220052cd3b07SLee Schermerhorn /* 220152cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 220252cd3b07SLee Schermerhorn * nor system default_policy 220352cd3b07SLee Schermerhorn */ 220445c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2205c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2206c0ff7453SMiao Xie else 2207c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 220804ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 22095c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2210cc9a6c87SMel Gorman 2211c0ff7453SMiao Xie return page; 22121da177e4SLinus Torvalds } 22131da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 22141da177e4SLinus Torvalds 2215ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2216ef0855d3SOleg Nesterov { 2217ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2218ef0855d3SOleg Nesterov 2219ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2220ef0855d3SOleg Nesterov return PTR_ERR(pol); 2221ef0855d3SOleg Nesterov dst->vm_policy = pol; 2222ef0855d3SOleg Nesterov return 0; 2223ef0855d3SOleg Nesterov } 2224ef0855d3SOleg Nesterov 22254225399aSPaul Jackson /* 2226846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 22274225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 22284225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 22294225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 22304225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2231708c1bbcSMiao Xie * 2232708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2233708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 22344225399aSPaul Jackson */ 22354225399aSPaul Jackson 2236846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2237846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 22381da177e4SLinus Torvalds { 22391da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 22401da177e4SLinus Torvalds 22411da177e4SLinus Torvalds if (!new) 22421da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2243708c1bbcSMiao Xie 2244708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2245708c1bbcSMiao Xie if (old == current->mempolicy) { 2246708c1bbcSMiao Xie task_lock(current); 2247708c1bbcSMiao Xie *new = *old; 2248708c1bbcSMiao Xie task_unlock(current); 2249708c1bbcSMiao Xie } else 2250708c1bbcSMiao Xie *new = *old; 2251708c1bbcSMiao Xie 22524225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 22534225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2254213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 22554225399aSPaul Jackson } 22561da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 22571da177e4SLinus Torvalds return new; 22581da177e4SLinus Torvalds } 22591da177e4SLinus Torvalds 22601da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2261fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 22621da177e4SLinus Torvalds { 22631da177e4SLinus Torvalds if (!a || !b) 2264fcfb4dccSKOSAKI Motohiro return false; 226545c4745aSLee Schermerhorn if (a->mode != b->mode) 2266fcfb4dccSKOSAKI Motohiro return false; 226719800502SBob Liu if (a->flags != b->flags) 2268fcfb4dccSKOSAKI Motohiro return false; 226919800502SBob Liu if (mpol_store_user_nodemask(a)) 227019800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2271fcfb4dccSKOSAKI Motohiro return false; 227219800502SBob Liu 227345c4745aSLee Schermerhorn switch (a->mode) { 227419770b32SMel Gorman case MPOL_BIND: 227519770b32SMel Gorman /* Fall through */ 22761da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2277fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 22781da177e4SLinus Torvalds case MPOL_PREFERRED: 22798970a63eSYisheng Xie /* a's ->flags is the same as b's */ 22808970a63eSYisheng Xie if (a->flags & MPOL_F_LOCAL) 22818970a63eSYisheng Xie return true; 228275719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 22831da177e4SLinus Torvalds default: 22841da177e4SLinus Torvalds BUG(); 2285fcfb4dccSKOSAKI Motohiro return false; 22861da177e4SLinus Torvalds } 22871da177e4SLinus Torvalds } 22881da177e4SLinus Torvalds 22891da177e4SLinus Torvalds /* 22901da177e4SLinus Torvalds * Shared memory backing store policy support. 22911da177e4SLinus Torvalds * 22921da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 22931da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 22944a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 22951da177e4SLinus Torvalds * for any accesses to the tree. 22961da177e4SLinus Torvalds */ 22971da177e4SLinus Torvalds 22984a8c7bb5SNathan Zimmer /* 22994a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 23004a8c7bb5SNathan Zimmer * reading or for writing 23014a8c7bb5SNathan Zimmer */ 23021da177e4SLinus Torvalds static struct sp_node * 23031da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 23041da177e4SLinus Torvalds { 23051da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 23061da177e4SLinus Torvalds 23071da177e4SLinus Torvalds while (n) { 23081da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 23091da177e4SLinus Torvalds 23101da177e4SLinus Torvalds if (start >= p->end) 23111da177e4SLinus Torvalds n = n->rb_right; 23121da177e4SLinus Torvalds else if (end <= p->start) 23131da177e4SLinus Torvalds n = n->rb_left; 23141da177e4SLinus Torvalds else 23151da177e4SLinus Torvalds break; 23161da177e4SLinus Torvalds } 23171da177e4SLinus Torvalds if (!n) 23181da177e4SLinus Torvalds return NULL; 23191da177e4SLinus Torvalds for (;;) { 23201da177e4SLinus Torvalds struct sp_node *w = NULL; 23211da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 23221da177e4SLinus Torvalds if (!prev) 23231da177e4SLinus Torvalds break; 23241da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 23251da177e4SLinus Torvalds if (w->end <= start) 23261da177e4SLinus Torvalds break; 23271da177e4SLinus Torvalds n = prev; 23281da177e4SLinus Torvalds } 23291da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 23301da177e4SLinus Torvalds } 23311da177e4SLinus Torvalds 23324a8c7bb5SNathan Zimmer /* 23334a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 23344a8c7bb5SNathan Zimmer * writing. 23354a8c7bb5SNathan Zimmer */ 23361da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 23371da177e4SLinus Torvalds { 23381da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 23391da177e4SLinus Torvalds struct rb_node *parent = NULL; 23401da177e4SLinus Torvalds struct sp_node *nd; 23411da177e4SLinus Torvalds 23421da177e4SLinus Torvalds while (*p) { 23431da177e4SLinus Torvalds parent = *p; 23441da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 23451da177e4SLinus Torvalds if (new->start < nd->start) 23461da177e4SLinus Torvalds p = &(*p)->rb_left; 23471da177e4SLinus Torvalds else if (new->end > nd->end) 23481da177e4SLinus Torvalds p = &(*p)->rb_right; 23491da177e4SLinus Torvalds else 23501da177e4SLinus Torvalds BUG(); 23511da177e4SLinus Torvalds } 23521da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 23531da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2354140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 235545c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 23561da177e4SLinus Torvalds } 23571da177e4SLinus Torvalds 23581da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 23591da177e4SLinus Torvalds struct mempolicy * 23601da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 23611da177e4SLinus Torvalds { 23621da177e4SLinus Torvalds struct mempolicy *pol = NULL; 23631da177e4SLinus Torvalds struct sp_node *sn; 23641da177e4SLinus Torvalds 23651da177e4SLinus Torvalds if (!sp->root.rb_node) 23661da177e4SLinus Torvalds return NULL; 23674a8c7bb5SNathan Zimmer read_lock(&sp->lock); 23681da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 23691da177e4SLinus Torvalds if (sn) { 23701da177e4SLinus Torvalds mpol_get(sn->policy); 23711da177e4SLinus Torvalds pol = sn->policy; 23721da177e4SLinus Torvalds } 23734a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 23741da177e4SLinus Torvalds return pol; 23751da177e4SLinus Torvalds } 23761da177e4SLinus Torvalds 237763f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 237863f74ca2SKOSAKI Motohiro { 237963f74ca2SKOSAKI Motohiro mpol_put(n->policy); 238063f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 238163f74ca2SKOSAKI Motohiro } 238263f74ca2SKOSAKI Motohiro 2383771fb4d8SLee Schermerhorn /** 2384771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2385771fb4d8SLee Schermerhorn * 2386b46e14acSFabian Frederick * @page: page to be checked 2387b46e14acSFabian Frederick * @vma: vm area where page mapped 2388b46e14acSFabian Frederick * @addr: virtual address where page mapped 2389771fb4d8SLee Schermerhorn * 2390771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2391771fb4d8SLee Schermerhorn * node id. 2392771fb4d8SLee Schermerhorn * 2393771fb4d8SLee Schermerhorn * Returns: 2394771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2395771fb4d8SLee Schermerhorn * node - node id where the page should be 2396771fb4d8SLee Schermerhorn * 2397771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2398771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2399771fb4d8SLee Schermerhorn */ 2400771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2401771fb4d8SLee Schermerhorn { 2402771fb4d8SLee Schermerhorn struct mempolicy *pol; 2403c33d6c06SMel Gorman struct zoneref *z; 2404771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2405771fb4d8SLee Schermerhorn unsigned long pgoff; 240690572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 240790572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 240898fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2409771fb4d8SLee Schermerhorn int ret = -1; 2410771fb4d8SLee Schermerhorn 2411dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2412771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2413771fb4d8SLee Schermerhorn goto out; 2414771fb4d8SLee Schermerhorn 2415771fb4d8SLee Schermerhorn switch (pol->mode) { 2416771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2417771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2418771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 241998c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2420771fb4d8SLee Schermerhorn break; 2421771fb4d8SLee Schermerhorn 2422771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2423771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2424771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2425771fb4d8SLee Schermerhorn else 2426771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2427771fb4d8SLee Schermerhorn break; 2428771fb4d8SLee Schermerhorn 2429771fb4d8SLee Schermerhorn case MPOL_BIND: 2430c33d6c06SMel Gorman 2431771fb4d8SLee Schermerhorn /* 2432771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2433771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2434771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2435771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2436771fb4d8SLee Schermerhorn */ 2437771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2438771fb4d8SLee Schermerhorn goto out; 2439c33d6c06SMel Gorman z = first_zones_zonelist( 2440771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2441771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2442c33d6c06SMel Gorman &pol->v.nodes); 2443c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2444771fb4d8SLee Schermerhorn break; 2445771fb4d8SLee Schermerhorn 2446771fb4d8SLee Schermerhorn default: 2447771fb4d8SLee Schermerhorn BUG(); 2448771fb4d8SLee Schermerhorn } 24495606e387SMel Gorman 24505606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2451e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 245290572890SPeter Zijlstra polnid = thisnid; 24535606e387SMel Gorman 245410f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2455de1c9ce6SRik van Riel goto out; 2456de1c9ce6SRik van Riel } 2457e42c8ff2SMel Gorman 2458771fb4d8SLee Schermerhorn if (curnid != polnid) 2459771fb4d8SLee Schermerhorn ret = polnid; 2460771fb4d8SLee Schermerhorn out: 2461771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2462771fb4d8SLee Schermerhorn 2463771fb4d8SLee Schermerhorn return ret; 2464771fb4d8SLee Schermerhorn } 2465771fb4d8SLee Schermerhorn 2466c11600e4SDavid Rientjes /* 2467c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2468c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2469c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2470c11600e4SDavid Rientjes * policy. 2471c11600e4SDavid Rientjes */ 2472c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2473c11600e4SDavid Rientjes { 2474c11600e4SDavid Rientjes struct mempolicy *pol; 2475c11600e4SDavid Rientjes 2476c11600e4SDavid Rientjes task_lock(task); 2477c11600e4SDavid Rientjes pol = task->mempolicy; 2478c11600e4SDavid Rientjes task->mempolicy = NULL; 2479c11600e4SDavid Rientjes task_unlock(task); 2480c11600e4SDavid Rientjes mpol_put(pol); 2481c11600e4SDavid Rientjes } 2482c11600e4SDavid Rientjes 24831da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 24841da177e4SLinus Torvalds { 2485140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 24861da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 248763f74ca2SKOSAKI Motohiro sp_free(n); 24881da177e4SLinus Torvalds } 24891da177e4SLinus Torvalds 249042288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 249142288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 249242288fe3SMel Gorman { 249342288fe3SMel Gorman node->start = start; 249442288fe3SMel Gorman node->end = end; 249542288fe3SMel Gorman node->policy = pol; 249642288fe3SMel Gorman } 249742288fe3SMel Gorman 2498dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2499dbcb0f19SAdrian Bunk struct mempolicy *pol) 25001da177e4SLinus Torvalds { 2501869833f2SKOSAKI Motohiro struct sp_node *n; 2502869833f2SKOSAKI Motohiro struct mempolicy *newpol; 25031da177e4SLinus Torvalds 2504869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 25051da177e4SLinus Torvalds if (!n) 25061da177e4SLinus Torvalds return NULL; 2507869833f2SKOSAKI Motohiro 2508869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2509869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2510869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2511869833f2SKOSAKI Motohiro return NULL; 2512869833f2SKOSAKI Motohiro } 2513869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 251442288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2515869833f2SKOSAKI Motohiro 25161da177e4SLinus Torvalds return n; 25171da177e4SLinus Torvalds } 25181da177e4SLinus Torvalds 25191da177e4SLinus Torvalds /* Replace a policy range. */ 25201da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 25211da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 25221da177e4SLinus Torvalds { 2523b22d127aSMel Gorman struct sp_node *n; 252442288fe3SMel Gorman struct sp_node *n_new = NULL; 252542288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2526b22d127aSMel Gorman int ret = 0; 25271da177e4SLinus Torvalds 252842288fe3SMel Gorman restart: 25294a8c7bb5SNathan Zimmer write_lock(&sp->lock); 25301da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 25311da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 25321da177e4SLinus Torvalds while (n && n->start < end) { 25331da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 25341da177e4SLinus Torvalds if (n->start >= start) { 25351da177e4SLinus Torvalds if (n->end <= end) 25361da177e4SLinus Torvalds sp_delete(sp, n); 25371da177e4SLinus Torvalds else 25381da177e4SLinus Torvalds n->start = end; 25391da177e4SLinus Torvalds } else { 25401da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 25411da177e4SLinus Torvalds if (n->end > end) { 254242288fe3SMel Gorman if (!n_new) 254342288fe3SMel Gorman goto alloc_new; 254442288fe3SMel Gorman 254542288fe3SMel Gorman *mpol_new = *n->policy; 254642288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 25477880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 25481da177e4SLinus Torvalds n->end = start; 25495ca39575SHillf Danton sp_insert(sp, n_new); 255042288fe3SMel Gorman n_new = NULL; 255142288fe3SMel Gorman mpol_new = NULL; 25521da177e4SLinus Torvalds break; 25531da177e4SLinus Torvalds } else 25541da177e4SLinus Torvalds n->end = start; 25551da177e4SLinus Torvalds } 25561da177e4SLinus Torvalds if (!next) 25571da177e4SLinus Torvalds break; 25581da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25591da177e4SLinus Torvalds } 25601da177e4SLinus Torvalds if (new) 25611da177e4SLinus Torvalds sp_insert(sp, new); 25624a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 256342288fe3SMel Gorman ret = 0; 256442288fe3SMel Gorman 256542288fe3SMel Gorman err_out: 256642288fe3SMel Gorman if (mpol_new) 256742288fe3SMel Gorman mpol_put(mpol_new); 256842288fe3SMel Gorman if (n_new) 256942288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 257042288fe3SMel Gorman 2571b22d127aSMel Gorman return ret; 257242288fe3SMel Gorman 257342288fe3SMel Gorman alloc_new: 25744a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 257542288fe3SMel Gorman ret = -ENOMEM; 257642288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 257742288fe3SMel Gorman if (!n_new) 257842288fe3SMel Gorman goto err_out; 257942288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 258042288fe3SMel Gorman if (!mpol_new) 258142288fe3SMel Gorman goto err_out; 258242288fe3SMel Gorman goto restart; 25831da177e4SLinus Torvalds } 25841da177e4SLinus Torvalds 258571fe804bSLee Schermerhorn /** 258671fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 258771fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 258871fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 258971fe804bSLee Schermerhorn * 259071fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 259171fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 259271fe804bSLee Schermerhorn * This must be released on exit. 25934bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 259471fe804bSLee Schermerhorn */ 259571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 25967339ff83SRobin Holt { 259758568d2aSMiao Xie int ret; 259858568d2aSMiao Xie 259971fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 26004a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 26017339ff83SRobin Holt 260271fe804bSLee Schermerhorn if (mpol) { 26037339ff83SRobin Holt struct vm_area_struct pvma; 260471fe804bSLee Schermerhorn struct mempolicy *new; 26054bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 26067339ff83SRobin Holt 26074bfc4495SKAMEZAWA Hiroyuki if (!scratch) 26085c0c1654SLee Schermerhorn goto put_mpol; 260971fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 261071fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 261115d77835SLee Schermerhorn if (IS_ERR(new)) 26120cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 261358568d2aSMiao Xie 261458568d2aSMiao Xie task_lock(current); 26154bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 261658568d2aSMiao Xie task_unlock(current); 261715d77835SLee Schermerhorn if (ret) 26185c0c1654SLee Schermerhorn goto put_new; 261971fe804bSLee Schermerhorn 262071fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 26212c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 262271fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 262371fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 262415d77835SLee Schermerhorn 26255c0c1654SLee Schermerhorn put_new: 262671fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 26270cae3457SDan Carpenter free_scratch: 26284bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 26295c0c1654SLee Schermerhorn put_mpol: 26305c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 26317339ff83SRobin Holt } 26327339ff83SRobin Holt } 26337339ff83SRobin Holt 26341da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 26351da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 26361da177e4SLinus Torvalds { 26371da177e4SLinus Torvalds int err; 26381da177e4SLinus Torvalds struct sp_node *new = NULL; 26391da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 26401da177e4SLinus Torvalds 2641028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 26421da177e4SLinus Torvalds vma->vm_pgoff, 264345c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2644028fec41SDavid Rientjes npol ? npol->flags : -1, 264500ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 26461da177e4SLinus Torvalds 26471da177e4SLinus Torvalds if (npol) { 26481da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 26491da177e4SLinus Torvalds if (!new) 26501da177e4SLinus Torvalds return -ENOMEM; 26511da177e4SLinus Torvalds } 26521da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 26531da177e4SLinus Torvalds if (err && new) 265463f74ca2SKOSAKI Motohiro sp_free(new); 26551da177e4SLinus Torvalds return err; 26561da177e4SLinus Torvalds } 26571da177e4SLinus Torvalds 26581da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 26591da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 26601da177e4SLinus Torvalds { 26611da177e4SLinus Torvalds struct sp_node *n; 26621da177e4SLinus Torvalds struct rb_node *next; 26631da177e4SLinus Torvalds 26641da177e4SLinus Torvalds if (!p->root.rb_node) 26651da177e4SLinus Torvalds return; 26664a8c7bb5SNathan Zimmer write_lock(&p->lock); 26671da177e4SLinus Torvalds next = rb_first(&p->root); 26681da177e4SLinus Torvalds while (next) { 26691da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 26701da177e4SLinus Torvalds next = rb_next(&n->nd); 267163f74ca2SKOSAKI Motohiro sp_delete(p, n); 26721da177e4SLinus Torvalds } 26734a8c7bb5SNathan Zimmer write_unlock(&p->lock); 26741da177e4SLinus Torvalds } 26751da177e4SLinus Torvalds 26761a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2677c297663cSMel Gorman static int __initdata numabalancing_override; 26781a687c2eSMel Gorman 26791a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 26801a687c2eSMel Gorman { 26811a687c2eSMel Gorman bool numabalancing_default = false; 26821a687c2eSMel Gorman 26831a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 26841a687c2eSMel Gorman numabalancing_default = true; 26851a687c2eSMel Gorman 2686c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2687c297663cSMel Gorman if (numabalancing_override) 2688c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2689c297663cSMel Gorman 2690b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2691756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2692c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 26931a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 26941a687c2eSMel Gorman } 26951a687c2eSMel Gorman } 26961a687c2eSMel Gorman 26971a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 26981a687c2eSMel Gorman { 26991a687c2eSMel Gorman int ret = 0; 27001a687c2eSMel Gorman if (!str) 27011a687c2eSMel Gorman goto out; 27021a687c2eSMel Gorman 27031a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2704c297663cSMel Gorman numabalancing_override = 1; 27051a687c2eSMel Gorman ret = 1; 27061a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2707c297663cSMel Gorman numabalancing_override = -1; 27081a687c2eSMel Gorman ret = 1; 27091a687c2eSMel Gorman } 27101a687c2eSMel Gorman out: 27111a687c2eSMel Gorman if (!ret) 27124a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 27131a687c2eSMel Gorman 27141a687c2eSMel Gorman return ret; 27151a687c2eSMel Gorman } 27161a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 27171a687c2eSMel Gorman #else 27181a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 27191a687c2eSMel Gorman { 27201a687c2eSMel Gorman } 27211a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 27221a687c2eSMel Gorman 27231da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 27241da177e4SLinus Torvalds void __init numa_policy_init(void) 27251da177e4SLinus Torvalds { 2726b71636e2SPaul Mundt nodemask_t interleave_nodes; 2727b71636e2SPaul Mundt unsigned long largest = 0; 2728b71636e2SPaul Mundt int nid, prefer = 0; 2729b71636e2SPaul Mundt 27301da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 27311da177e4SLinus Torvalds sizeof(struct mempolicy), 273220c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27331da177e4SLinus Torvalds 27341da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 27351da177e4SLinus Torvalds sizeof(struct sp_node), 273620c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27371da177e4SLinus Torvalds 27385606e387SMel Gorman for_each_node(nid) { 27395606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 27405606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 27415606e387SMel Gorman .mode = MPOL_PREFERRED, 27425606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 27435606e387SMel Gorman .v = { .preferred_node = nid, }, 27445606e387SMel Gorman }; 27455606e387SMel Gorman } 27465606e387SMel Gorman 2747b71636e2SPaul Mundt /* 2748b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2749b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2750b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2751b71636e2SPaul Mundt */ 2752b71636e2SPaul Mundt nodes_clear(interleave_nodes); 275301f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2754b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 27551da177e4SLinus Torvalds 2756b71636e2SPaul Mundt /* Preserve the largest node */ 2757b71636e2SPaul Mundt if (largest < total_pages) { 2758b71636e2SPaul Mundt largest = total_pages; 2759b71636e2SPaul Mundt prefer = nid; 2760b71636e2SPaul Mundt } 2761b71636e2SPaul Mundt 2762b71636e2SPaul Mundt /* Interleave this node? */ 2763b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2764b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2765b71636e2SPaul Mundt } 2766b71636e2SPaul Mundt 2767b71636e2SPaul Mundt /* All too small, use the largest */ 2768b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2769b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2770b71636e2SPaul Mundt 2771028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2772b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 27731a687c2eSMel Gorman 27741a687c2eSMel Gorman check_numabalancing_enable(); 27751da177e4SLinus Torvalds } 27761da177e4SLinus Torvalds 27778bccd85fSChristoph Lameter /* Reset policy of current process to default */ 27781da177e4SLinus Torvalds void numa_default_policy(void) 27791da177e4SLinus Torvalds { 2780028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 27811da177e4SLinus Torvalds } 278268860ec1SPaul Jackson 27834225399aSPaul Jackson /* 2784095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2785095f1fc4SLee Schermerhorn */ 2786095f1fc4SLee Schermerhorn 2787095f1fc4SLee Schermerhorn /* 2788f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 27891a75a6c8SChristoph Lameter */ 2790345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2791345ace9cSLee Schermerhorn { 2792345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2793345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2794345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2795345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2796d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2797345ace9cSLee Schermerhorn }; 27981a75a6c8SChristoph Lameter 2799095f1fc4SLee Schermerhorn 2800095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2801095f1fc4SLee Schermerhorn /** 2802f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2803095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 280471fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2805095f1fc4SLee Schermerhorn * 2806095f1fc4SLee Schermerhorn * Format of input: 2807095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2808095f1fc4SLee Schermerhorn * 280971fe804bSLee Schermerhorn * On success, returns 0, else 1 2810095f1fc4SLee Schermerhorn */ 2811a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2812095f1fc4SLee Schermerhorn { 281371fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2814f2a07f40SHugh Dickins unsigned short mode_flags; 281571fe804bSLee Schermerhorn nodemask_t nodes; 2816095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2817095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2818dedf2c73Szhong jiang int err = 1, mode; 2819095f1fc4SLee Schermerhorn 2820095f1fc4SLee Schermerhorn if (nodelist) { 2821095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2822095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 282371fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2824095f1fc4SLee Schermerhorn goto out; 282501f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2826095f1fc4SLee Schermerhorn goto out; 282771fe804bSLee Schermerhorn } else 282871fe804bSLee Schermerhorn nodes_clear(nodes); 282971fe804bSLee Schermerhorn 2830095f1fc4SLee Schermerhorn if (flags) 2831095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2832095f1fc4SLee Schermerhorn 2833dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 2834dedf2c73Szhong jiang if (mode < 0) 2835095f1fc4SLee Schermerhorn goto out; 2836095f1fc4SLee Schermerhorn 283771fe804bSLee Schermerhorn switch (mode) { 2838095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 283971fe804bSLee Schermerhorn /* 284071fe804bSLee Schermerhorn * Insist on a nodelist of one node only 284171fe804bSLee Schermerhorn */ 2842095f1fc4SLee Schermerhorn if (nodelist) { 2843095f1fc4SLee Schermerhorn char *rest = nodelist; 2844095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2845095f1fc4SLee Schermerhorn rest++; 2846926f2ae0SKOSAKI Motohiro if (*rest) 2847926f2ae0SKOSAKI Motohiro goto out; 2848095f1fc4SLee Schermerhorn } 2849095f1fc4SLee Schermerhorn break; 2850095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2851095f1fc4SLee Schermerhorn /* 2852095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2853095f1fc4SLee Schermerhorn */ 2854095f1fc4SLee Schermerhorn if (!nodelist) 285501f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 28563f226aa1SLee Schermerhorn break; 285771fe804bSLee Schermerhorn case MPOL_LOCAL: 28583f226aa1SLee Schermerhorn /* 285971fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 28603f226aa1SLee Schermerhorn */ 286171fe804bSLee Schermerhorn if (nodelist) 28623f226aa1SLee Schermerhorn goto out; 286371fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 28643f226aa1SLee Schermerhorn break; 2865413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2866413b43deSRavikiran G Thirumalai /* 2867413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2868413b43deSRavikiran G Thirumalai */ 2869413b43deSRavikiran G Thirumalai if (!nodelist) 2870413b43deSRavikiran G Thirumalai err = 0; 2871413b43deSRavikiran G Thirumalai goto out; 2872d69b2e63SKOSAKI Motohiro case MPOL_BIND: 287371fe804bSLee Schermerhorn /* 2874d69b2e63SKOSAKI Motohiro * Insist on a nodelist 287571fe804bSLee Schermerhorn */ 2876d69b2e63SKOSAKI Motohiro if (!nodelist) 2877d69b2e63SKOSAKI Motohiro goto out; 2878095f1fc4SLee Schermerhorn } 2879095f1fc4SLee Schermerhorn 288071fe804bSLee Schermerhorn mode_flags = 0; 2881095f1fc4SLee Schermerhorn if (flags) { 2882095f1fc4SLee Schermerhorn /* 2883095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2884095f1fc4SLee Schermerhorn * mode flags. 2885095f1fc4SLee Schermerhorn */ 2886095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 288771fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2888095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 288971fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2890095f1fc4SLee Schermerhorn else 2891926f2ae0SKOSAKI Motohiro goto out; 2892095f1fc4SLee Schermerhorn } 289371fe804bSLee Schermerhorn 289471fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 289571fe804bSLee Schermerhorn if (IS_ERR(new)) 2896926f2ae0SKOSAKI Motohiro goto out; 2897926f2ae0SKOSAKI Motohiro 2898f2a07f40SHugh Dickins /* 2899f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2900f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2901f2a07f40SHugh Dickins */ 2902f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2903f2a07f40SHugh Dickins new->v.nodes = nodes; 2904f2a07f40SHugh Dickins else if (nodelist) 2905f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2906f2a07f40SHugh Dickins else 2907f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2908f2a07f40SHugh Dickins 2909f2a07f40SHugh Dickins /* 2910f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2911f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2912f2a07f40SHugh Dickins */ 2913e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2914f2a07f40SHugh Dickins 2915926f2ae0SKOSAKI Motohiro err = 0; 291671fe804bSLee Schermerhorn 2917095f1fc4SLee Schermerhorn out: 2918095f1fc4SLee Schermerhorn /* Restore string for error message */ 2919095f1fc4SLee Schermerhorn if (nodelist) 2920095f1fc4SLee Schermerhorn *--nodelist = ':'; 2921095f1fc4SLee Schermerhorn if (flags) 2922095f1fc4SLee Schermerhorn *--flags = '='; 292371fe804bSLee Schermerhorn if (!err) 292471fe804bSLee Schermerhorn *mpol = new; 2925095f1fc4SLee Schermerhorn return err; 2926095f1fc4SLee Schermerhorn } 2927095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2928095f1fc4SLee Schermerhorn 292971fe804bSLee Schermerhorn /** 293071fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 293171fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 293271fe804bSLee Schermerhorn * @maxlen: length of @buffer 293371fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 293471fe804bSLee Schermerhorn * 2935948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2936948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2937948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 29381a75a6c8SChristoph Lameter */ 2939948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 29401a75a6c8SChristoph Lameter { 29411a75a6c8SChristoph Lameter char *p = buffer; 2942948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2943948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2944948927eeSDavid Rientjes unsigned short flags = 0; 29451a75a6c8SChristoph Lameter 29468790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2947bea904d5SLee Schermerhorn mode = pol->mode; 2948948927eeSDavid Rientjes flags = pol->flags; 2949948927eeSDavid Rientjes } 2950bea904d5SLee Schermerhorn 29511a75a6c8SChristoph Lameter switch (mode) { 29521a75a6c8SChristoph Lameter case MPOL_DEFAULT: 29531a75a6c8SChristoph Lameter break; 29541a75a6c8SChristoph Lameter case MPOL_PREFERRED: 2955fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 2956f2a07f40SHugh Dickins mode = MPOL_LOCAL; 295753f2556bSLee Schermerhorn else 2958fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 29591a75a6c8SChristoph Lameter break; 29601a75a6c8SChristoph Lameter case MPOL_BIND: 29611a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 29621a75a6c8SChristoph Lameter nodes = pol->v.nodes; 29631a75a6c8SChristoph Lameter break; 29641a75a6c8SChristoph Lameter default: 2965948927eeSDavid Rientjes WARN_ON_ONCE(1); 2966948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 2967948927eeSDavid Rientjes return; 29681a75a6c8SChristoph Lameter } 29691a75a6c8SChristoph Lameter 2970b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 29711a75a6c8SChristoph Lameter 2972fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2973948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 2974f5b087b5SDavid Rientjes 29752291990aSLee Schermerhorn /* 29762291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 29772291990aSLee Schermerhorn */ 2978f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 29792291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 29802291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 29812291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2982f5b087b5SDavid Rientjes } 2983f5b087b5SDavid Rientjes 29849e763e0fSTejun Heo if (!nodes_empty(nodes)) 29859e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 29869e763e0fSTejun Heo nodemask_pr_args(&nodes)); 29871a75a6c8SChristoph Lameter } 2988