146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 34b27abaccSDave Hansen * preferred many Try a set of nodes first before normal fallback. This is 35b27abaccSDave Hansen * similar to preferred without the special case. 36b27abaccSDave Hansen * 371da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 381da177e4SLinus Torvalds * use the process policy. This is what Linux always did 391da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 401da177e4SLinus Torvalds * 411da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 421da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 431da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 441da177e4SLinus Torvalds * allocations for a VMA in the VM. 451da177e4SLinus Torvalds * 461da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 471da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 481da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 491da177e4SLinus Torvalds * 501da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 511da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 521da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 531da177e4SLinus Torvalds * Same with GFP_DMA allocations. 541da177e4SLinus Torvalds * 551da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 561da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 571da177e4SLinus Torvalds */ 581da177e4SLinus Torvalds 591da177e4SLinus Torvalds /* Notebook: 601da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 611da177e4SLinus Torvalds object 621da177e4SLinus Torvalds statistics for bigpages 631da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 641da177e4SLinus Torvalds first item above. 651da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 661da177e4SLinus Torvalds grows down? 671da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 681da177e4SLinus Torvalds kernel is not always grateful with that. 691da177e4SLinus Torvalds */ 701da177e4SLinus Torvalds 71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72b1de0d13SMitchel Humpherys 731da177e4SLinus Torvalds #include <linux/mempolicy.h> 74a520110eSChristoph Hellwig #include <linux/pagewalk.h> 751da177e4SLinus Torvalds #include <linux/highmem.h> 761da177e4SLinus Torvalds #include <linux/hugetlb.h> 771da177e4SLinus Torvalds #include <linux/kernel.h> 781da177e4SLinus Torvalds #include <linux/sched.h> 796e84f315SIngo Molnar #include <linux/sched/mm.h> 806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 81f719ff9bSIngo Molnar #include <linux/sched/task.h> 821da177e4SLinus Torvalds #include <linux/nodemask.h> 831da177e4SLinus Torvalds #include <linux/cpuset.h> 841da177e4SLinus Torvalds #include <linux/slab.h> 851da177e4SLinus Torvalds #include <linux/string.h> 86b95f1b31SPaul Gortmaker #include <linux/export.h> 87b488893aSPavel Emelyanov #include <linux/nsproxy.h> 881da177e4SLinus Torvalds #include <linux/interrupt.h> 891da177e4SLinus Torvalds #include <linux/init.h> 901da177e4SLinus Torvalds #include <linux/compat.h> 9131367466SOtto Ebeling #include <linux/ptrace.h> 92dc9aa5b9SChristoph Lameter #include <linux/swap.h> 931a75a6c8SChristoph Lameter #include <linux/seq_file.h> 941a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 95b20a3503SChristoph Lameter #include <linux/migrate.h> 9662b61f61SHugh Dickins #include <linux/ksm.h> 9795a402c3SChristoph Lameter #include <linux/rmap.h> 9886c3a764SDavid Quigley #include <linux/security.h> 99dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 100095f1fc4SLee Schermerhorn #include <linux/ctype.h> 1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 103b1de0d13SMitchel Humpherys #include <linux/printk.h> 104c8633798SNaoya Horiguchi #include <linux/swapops.h> 105dc9aa5b9SChristoph Lameter 1061da177e4SLinus Torvalds #include <asm/tlbflush.h> 1077c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1081da177e4SLinus Torvalds 10962695a84SNick Piggin #include "internal.h" 11062695a84SNick Piggin 11138e35860SChristoph Lameter /* Internal flags */ 112dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11338e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 114dc9aa5b9SChristoph Lameter 115fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 116fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1171da177e4SLinus Torvalds 1181da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1191da177e4SLinus Torvalds policied. */ 1206267276fSChristoph Lameter enum zone_type policy_zone = 0; 1211da177e4SLinus Torvalds 122bea904d5SLee Schermerhorn /* 123bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 124bea904d5SLee Schermerhorn */ 125e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1261da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 1277858d7bcSFeng Tang .mode = MPOL_LOCAL, 1281da177e4SLinus Torvalds }; 1291da177e4SLinus Torvalds 1305606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1315606e387SMel Gorman 132b2ca916cSDan Williams /** 133b2ca916cSDan Williams * numa_map_to_online_node - Find closest online node 134f6e92f40SKrzysztof Kozlowski * @node: Node id to start the search 135b2ca916cSDan Williams * 136b2ca916cSDan Williams * Lookup the next closest node by distance if @nid is not online. 137b2ca916cSDan Williams */ 138b2ca916cSDan Williams int numa_map_to_online_node(int node) 139b2ca916cSDan Williams { 1404fcbe96eSDan Williams int min_dist = INT_MAX, dist, n, min_node; 141b2ca916cSDan Williams 1424fcbe96eSDan Williams if (node == NUMA_NO_NODE || node_online(node)) 1434fcbe96eSDan Williams return node; 144b2ca916cSDan Williams 145b2ca916cSDan Williams min_node = node; 146b2ca916cSDan Williams for_each_online_node(n) { 147b2ca916cSDan Williams dist = node_distance(node, n); 148b2ca916cSDan Williams if (dist < min_dist) { 149b2ca916cSDan Williams min_dist = dist; 150b2ca916cSDan Williams min_node = n; 151b2ca916cSDan Williams } 152b2ca916cSDan Williams } 153b2ca916cSDan Williams 154b2ca916cSDan Williams return min_node; 155b2ca916cSDan Williams } 156b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node); 157b2ca916cSDan Williams 15874d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1595606e387SMel Gorman { 1605606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 161f15ca78eSOleg Nesterov int node; 1625606e387SMel Gorman 163f15ca78eSOleg Nesterov if (pol) 164f15ca78eSOleg Nesterov return pol; 1655606e387SMel Gorman 166f15ca78eSOleg Nesterov node = numa_node_id(); 1671da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1681da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 169f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 170f15ca78eSOleg Nesterov if (pol->mode) 171f15ca78eSOleg Nesterov return pol; 1721da6f0e1SJianguo Wu } 1735606e387SMel Gorman 174f15ca78eSOleg Nesterov return &default_policy; 1755606e387SMel Gorman } 1765606e387SMel Gorman 17737012946SDavid Rientjes static const struct mempolicy_operations { 17837012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 179213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 18037012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 18137012946SDavid Rientjes 182f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 183f5b087b5SDavid Rientjes { 1846d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1854c50bc01SDavid Rientjes } 1864c50bc01SDavid Rientjes 1874c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1884c50bc01SDavid Rientjes const nodemask_t *rel) 1894c50bc01SDavid Rientjes { 1904c50bc01SDavid Rientjes nodemask_t tmp; 1914c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1924c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 193f5b087b5SDavid Rientjes } 194f5b087b5SDavid Rientjes 195be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 19637012946SDavid Rientjes { 19737012946SDavid Rientjes if (nodes_empty(*nodes)) 19837012946SDavid Rientjes return -EINVAL; 199269fbe72SBen Widawsky pol->nodes = *nodes; 20037012946SDavid Rientjes return 0; 20137012946SDavid Rientjes } 20237012946SDavid Rientjes 20337012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 20437012946SDavid Rientjes { 2057858d7bcSFeng Tang if (nodes_empty(*nodes)) 2067858d7bcSFeng Tang return -EINVAL; 207269fbe72SBen Widawsky 208269fbe72SBen Widawsky nodes_clear(pol->nodes); 209269fbe72SBen Widawsky node_set(first_node(*nodes), pol->nodes); 21037012946SDavid Rientjes return 0; 21137012946SDavid Rientjes } 21237012946SDavid Rientjes 21358568d2aSMiao Xie /* 21458568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 21558568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 2167858d7bcSFeng Tang * parameter with respect to the policy mode and flags. 21758568d2aSMiao Xie * 21858568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 219c1e8d7c6SMichel Lespinasse * and mempolicy. May also be called holding the mmap_lock for write. 22058568d2aSMiao Xie */ 2214bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2224bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22358568d2aSMiao Xie { 22458568d2aSMiao Xie int ret; 22558568d2aSMiao Xie 2267858d7bcSFeng Tang /* 2277858d7bcSFeng Tang * Default (pol==NULL) resp. local memory policies are not a 2287858d7bcSFeng Tang * subject of any remapping. They also do not need any special 2297858d7bcSFeng Tang * constructor. 2307858d7bcSFeng Tang */ 2317858d7bcSFeng Tang if (!pol || pol->mode == MPOL_LOCAL) 23258568d2aSMiao Xie return 0; 2337858d7bcSFeng Tang 23401f13bd6SLai Jiangshan /* Check N_MEMORY */ 2354bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 23601f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 23758568d2aSMiao Xie 23858568d2aSMiao Xie VM_BUG_ON(!nodes); 2397858d7bcSFeng Tang 24058568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2414bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 24258568d2aSMiao Xie else 2434bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2444bfc4495SKAMEZAWA Hiroyuki 24558568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 24658568d2aSMiao Xie pol->w.user_nodemask = *nodes; 24758568d2aSMiao Xie else 2487858d7bcSFeng Tang pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 24958568d2aSMiao Xie 2504bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 25158568d2aSMiao Xie return ret; 25258568d2aSMiao Xie } 25358568d2aSMiao Xie 25458568d2aSMiao Xie /* 25558568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25658568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 25758568d2aSMiao Xie */ 258028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 259028fec41SDavid Rientjes nodemask_t *nodes) 2601da177e4SLinus Torvalds { 2611da177e4SLinus Torvalds struct mempolicy *policy; 2621da177e4SLinus Torvalds 263028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26400ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 265140d5a49SPaul Mundt 2663e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2673e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 26837012946SDavid Rientjes return ERR_PTR(-EINVAL); 269d3a71033SLee Schermerhorn return NULL; 27037012946SDavid Rientjes } 2713e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2723e1f0645SDavid Rientjes 2733e1f0645SDavid Rientjes /* 2743e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2753e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2763e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2773e1f0645SDavid Rientjes */ 2783e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2793e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2803e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2813e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2823e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2837858d7bcSFeng Tang 2847858d7bcSFeng Tang mode = MPOL_LOCAL; 2853e1f0645SDavid Rientjes } 286479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2878d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2888d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2898d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 290479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 2913e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2923e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2931da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2941da177e4SLinus Torvalds if (!policy) 2951da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2961da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 29745c4745aSLee Schermerhorn policy->mode = mode; 29837012946SDavid Rientjes policy->flags = flags; 2993e1f0645SDavid Rientjes 30037012946SDavid Rientjes return policy; 30137012946SDavid Rientjes } 30237012946SDavid Rientjes 30352cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30452cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 30552cd3b07SLee Schermerhorn { 30652cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 30752cd3b07SLee Schermerhorn return; 30852cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 30952cd3b07SLee Schermerhorn } 31052cd3b07SLee Schermerhorn 311213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 31237012946SDavid Rientjes { 31337012946SDavid Rientjes } 31437012946SDavid Rientjes 315213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 3161d0d2680SDavid Rientjes { 3171d0d2680SDavid Rientjes nodemask_t tmp; 3181d0d2680SDavid Rientjes 31937012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32037012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32137012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32237012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3231d0d2680SDavid Rientjes else { 324269fbe72SBen Widawsky nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 325213980c0SVlastimil Babka *nodes); 32629b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3271d0d2680SDavid Rientjes } 32837012946SDavid Rientjes 329708c1bbcSMiao Xie if (nodes_empty(tmp)) 330708c1bbcSMiao Xie tmp = *nodes; 331708c1bbcSMiao Xie 332269fbe72SBen Widawsky pol->nodes = tmp; 33337012946SDavid Rientjes } 33437012946SDavid Rientjes 33537012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 336213980c0SVlastimil Babka const nodemask_t *nodes) 33737012946SDavid Rientjes { 33837012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3391d0d2680SDavid Rientjes } 34037012946SDavid Rientjes 341708c1bbcSMiao Xie /* 342708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 343708c1bbcSMiao Xie * 344c1e8d7c6SMichel Lespinasse * Per-vma policies are protected by mmap_lock. Allocations using per-task 345213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 346213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 347708c1bbcSMiao Xie */ 348213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 34937012946SDavid Rientjes { 35037012946SDavid Rientjes if (!pol) 35137012946SDavid Rientjes return; 3527858d7bcSFeng Tang if (!mpol_store_user_nodemask(pol) && 35337012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35437012946SDavid Rientjes return; 355708c1bbcSMiao Xie 356213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3571d0d2680SDavid Rientjes } 3581d0d2680SDavid Rientjes 3591d0d2680SDavid Rientjes /* 3601d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3611d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36258568d2aSMiao Xie * 36358568d2aSMiao Xie * Called with task's alloc_lock held. 3641d0d2680SDavid Rientjes */ 3651d0d2680SDavid Rientjes 366213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3671d0d2680SDavid Rientjes { 368213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3691d0d2680SDavid Rientjes } 3701d0d2680SDavid Rientjes 3711d0d2680SDavid Rientjes /* 3721d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3731d0d2680SDavid Rientjes * 374c1e8d7c6SMichel Lespinasse * Call holding a reference to mm. Takes mm->mmap_lock during call. 3751d0d2680SDavid Rientjes */ 3761d0d2680SDavid Rientjes 3771d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3781d0d2680SDavid Rientjes { 3791d0d2680SDavid Rientjes struct vm_area_struct *vma; 3801d0d2680SDavid Rientjes 381d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 3821d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 383213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 384d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3851d0d2680SDavid Rientjes } 3861d0d2680SDavid Rientjes 38737012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 38837012946SDavid Rientjes [MPOL_DEFAULT] = { 38937012946SDavid Rientjes .rebind = mpol_rebind_default, 39037012946SDavid Rientjes }, 39137012946SDavid Rientjes [MPOL_INTERLEAVE] = { 392be897d48SFeng Tang .create = mpol_new_nodemask, 39337012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39437012946SDavid Rientjes }, 39537012946SDavid Rientjes [MPOL_PREFERRED] = { 39637012946SDavid Rientjes .create = mpol_new_preferred, 39737012946SDavid Rientjes .rebind = mpol_rebind_preferred, 39837012946SDavid Rientjes }, 39937012946SDavid Rientjes [MPOL_BIND] = { 400be897d48SFeng Tang .create = mpol_new_nodemask, 40137012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40237012946SDavid Rientjes }, 4037858d7bcSFeng Tang [MPOL_LOCAL] = { 4047858d7bcSFeng Tang .rebind = mpol_rebind_default, 4057858d7bcSFeng Tang }, 406b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = { 407be897d48SFeng Tang .create = mpol_new_nodemask, 408b27abaccSDave Hansen .rebind = mpol_rebind_preferred, 409b27abaccSDave Hansen }, 41037012946SDavid Rientjes }; 41137012946SDavid Rientjes 412a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 413fc301289SChristoph Lameter unsigned long flags); 4141a75a6c8SChristoph Lameter 4156f4576e3SNaoya Horiguchi struct queue_pages { 4166f4576e3SNaoya Horiguchi struct list_head *pagelist; 4176f4576e3SNaoya Horiguchi unsigned long flags; 4186f4576e3SNaoya Horiguchi nodemask_t *nmask; 419f18da660SLi Xinhai unsigned long start; 420f18da660SLi Xinhai unsigned long end; 421f18da660SLi Xinhai struct vm_area_struct *first; 4226f4576e3SNaoya Horiguchi }; 4236f4576e3SNaoya Horiguchi 42498094945SNaoya Horiguchi /* 42588aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 42688aaa2a1SNaoya Horiguchi * 42788aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 42888aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 42988aaa2a1SNaoya Horiguchi */ 43088aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 43188aaa2a1SNaoya Horiguchi struct queue_pages *qp) 43288aaa2a1SNaoya Horiguchi { 43388aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 43488aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 43588aaa2a1SNaoya Horiguchi 43688aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 43788aaa2a1SNaoya Horiguchi } 43888aaa2a1SNaoya Horiguchi 439a7f40cfeSYang Shi /* 440d8835445SYang Shi * queue_pages_pmd() has four possible return values: 441e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 442e5947d23SYang Shi * special page is met, i.e. huge zero page. 443d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 444d8835445SYang Shi * specified. 445d8835445SYang Shi * 2 - THP was split. 446d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 447d8835445SYang Shi * existing page was already on a node that does not follow the 448d8835445SYang Shi * policy. 449a7f40cfeSYang Shi */ 450c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 451c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 452959a7e13SJules Irenge __releases(ptl) 453c8633798SNaoya Horiguchi { 454c8633798SNaoya Horiguchi int ret = 0; 455c8633798SNaoya Horiguchi struct page *page; 456c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 457c8633798SNaoya Horiguchi unsigned long flags; 458c8633798SNaoya Horiguchi 459c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 460a7f40cfeSYang Shi ret = -EIO; 461c8633798SNaoya Horiguchi goto unlock; 462c8633798SNaoya Horiguchi } 463c8633798SNaoya Horiguchi page = pmd_page(*pmd); 464c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 465c8633798SNaoya Horiguchi spin_unlock(ptl); 466e5947d23SYang Shi walk->action = ACTION_CONTINUE; 467c8633798SNaoya Horiguchi goto out; 468c8633798SNaoya Horiguchi } 469d8835445SYang Shi if (!queue_pages_required(page, qp)) 470c8633798SNaoya Horiguchi goto unlock; 471c8633798SNaoya Horiguchi 472c8633798SNaoya Horiguchi flags = qp->flags; 473c8633798SNaoya Horiguchi /* go to thp migration */ 474a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 475a53190a4SYang Shi if (!vma_migratable(walk->vma) || 476a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 477d8835445SYang Shi ret = 1; 478a7f40cfeSYang Shi goto unlock; 479a7f40cfeSYang Shi } 480a7f40cfeSYang Shi } else 481a7f40cfeSYang Shi ret = -EIO; 482c8633798SNaoya Horiguchi unlock: 483c8633798SNaoya Horiguchi spin_unlock(ptl); 484c8633798SNaoya Horiguchi out: 485c8633798SNaoya Horiguchi return ret; 486c8633798SNaoya Horiguchi } 487c8633798SNaoya Horiguchi 48888aaa2a1SNaoya Horiguchi /* 48998094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 49098094945SNaoya Horiguchi * and move them to the pagelist if they do. 491d8835445SYang Shi * 492d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 493e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 494e5947d23SYang Shi * special page is met, i.e. zero page. 495d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 496d8835445SYang Shi * specified. 497d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 498d8835445SYang Shi * on a node that does not follow the policy. 49998094945SNaoya Horiguchi */ 5006f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 5016f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 5021da177e4SLinus Torvalds { 5036f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5046f4576e3SNaoya Horiguchi struct page *page; 5056f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5066f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 507c8633798SNaoya Horiguchi int ret; 508d8835445SYang Shi bool has_unmovable = false; 5093f088420SShijie Luo pte_t *pte, *mapped_pte; 510705e87c0SHugh Dickins spinlock_t *ptl; 511941150a3SHugh Dickins 512c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 513c8633798SNaoya Horiguchi if (ptl) { 514c8633798SNaoya Horiguchi ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 515d8835445SYang Shi if (ret != 2) 516a7f40cfeSYang Shi return ret; 517248db92dSKirill A. Shutemov } 518d8835445SYang Shi /* THP was split, fall through to pte walk */ 51991612e0dSHugh Dickins 520337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 521337d9abfSNaoya Horiguchi return 0; 52294723aafSMichal Hocko 5233f088420SShijie Luo mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5246f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 52591612e0dSHugh Dickins if (!pte_present(*pte)) 52691612e0dSHugh Dickins continue; 5276aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5286aab341eSLinus Torvalds if (!page) 52991612e0dSHugh Dickins continue; 530053837fcSNick Piggin /* 53162b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 53262b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 533053837fcSNick Piggin */ 534b79bc0a0SHugh Dickins if (PageReserved(page)) 535f4598c8bSChristoph Lameter continue; 53688aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 53738e35860SChristoph Lameter continue; 538a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 539d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 540d8835445SYang Shi if (!vma_migratable(vma)) { 541d8835445SYang Shi has_unmovable = true; 542a7f40cfeSYang Shi break; 543d8835445SYang Shi } 544a53190a4SYang Shi 545a53190a4SYang Shi /* 546a53190a4SYang Shi * Do not abort immediately since there may be 547a53190a4SYang Shi * temporary off LRU pages in the range. Still 548a53190a4SYang Shi * need migrate other LRU pages. 549a53190a4SYang Shi */ 550a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 551a53190a4SYang Shi has_unmovable = true; 552a7f40cfeSYang Shi } else 553a7f40cfeSYang Shi break; 5546f4576e3SNaoya Horiguchi } 5553f088420SShijie Luo pte_unmap_unlock(mapped_pte, ptl); 5566f4576e3SNaoya Horiguchi cond_resched(); 557d8835445SYang Shi 558d8835445SYang Shi if (has_unmovable) 559d8835445SYang Shi return 1; 560d8835445SYang Shi 561a7f40cfeSYang Shi return addr != end ? -EIO : 0; 56291612e0dSHugh Dickins } 56391612e0dSHugh Dickins 5646f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5656f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5666f4576e3SNaoya Horiguchi struct mm_walk *walk) 567e2d8cf40SNaoya Horiguchi { 568dcf17635SLi Xinhai int ret = 0; 569e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5706f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 571dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 572e2d8cf40SNaoya Horiguchi struct page *page; 573cb900f41SKirill A. Shutemov spinlock_t *ptl; 574d4c54919SNaoya Horiguchi pte_t entry; 575e2d8cf40SNaoya Horiguchi 5766f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5776f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 578d4c54919SNaoya Horiguchi if (!pte_present(entry)) 579d4c54919SNaoya Horiguchi goto unlock; 580d4c54919SNaoya Horiguchi page = pte_page(entry); 58188aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 582e2d8cf40SNaoya Horiguchi goto unlock; 583dcf17635SLi Xinhai 584dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 585dcf17635SLi Xinhai /* 586dcf17635SLi Xinhai * STRICT alone means only detecting misplaced page and no 587dcf17635SLi Xinhai * need to further check other vma. 588dcf17635SLi Xinhai */ 589dcf17635SLi Xinhai ret = -EIO; 590dcf17635SLi Xinhai goto unlock; 591dcf17635SLi Xinhai } 592dcf17635SLi Xinhai 593dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 594dcf17635SLi Xinhai /* 595dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 596dcf17635SLi Xinhai * stopped walking current vma. 597dcf17635SLi Xinhai * Detecting misplaced page but allow migrating pages which 598dcf17635SLi Xinhai * have been queued. 599dcf17635SLi Xinhai */ 600dcf17635SLi Xinhai ret = 1; 601dcf17635SLi Xinhai goto unlock; 602dcf17635SLi Xinhai } 603dcf17635SLi Xinhai 604e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 605e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 606dcf17635SLi Xinhai (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { 607dcf17635SLi Xinhai if (!isolate_huge_page(page, qp->pagelist) && 608dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 609dcf17635SLi Xinhai /* 610dcf17635SLi Xinhai * Failed to isolate page but allow migrating pages 611dcf17635SLi Xinhai * which have been queued. 612dcf17635SLi Xinhai */ 613dcf17635SLi Xinhai ret = 1; 614dcf17635SLi Xinhai } 615e2d8cf40SNaoya Horiguchi unlock: 616cb900f41SKirill A. Shutemov spin_unlock(ptl); 617e2d8cf40SNaoya Horiguchi #else 618e2d8cf40SNaoya Horiguchi BUG(); 619e2d8cf40SNaoya Horiguchi #endif 620dcf17635SLi Xinhai return ret; 6211da177e4SLinus Torvalds } 6221da177e4SLinus Torvalds 6235877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 624b24f53a0SLee Schermerhorn /* 6254b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6264b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6274b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6284b10e7d5SMel Gorman * 6294b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6304b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6314b10e7d5SMel Gorman * changes to the core. 632b24f53a0SLee Schermerhorn */ 6334b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6344b10e7d5SMel Gorman unsigned long addr, unsigned long end) 635b24f53a0SLee Schermerhorn { 6364b10e7d5SMel Gorman int nr_updated; 637b24f53a0SLee Schermerhorn 63858705444SPeter Xu nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA); 63903c5a6e1SMel Gorman if (nr_updated) 64003c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 641b24f53a0SLee Schermerhorn 6424b10e7d5SMel Gorman return nr_updated; 643b24f53a0SLee Schermerhorn } 644b24f53a0SLee Schermerhorn #else 645b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 646b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 647b24f53a0SLee Schermerhorn { 648b24f53a0SLee Schermerhorn return 0; 649b24f53a0SLee Schermerhorn } 6505877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 651b24f53a0SLee Schermerhorn 6526f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6536f4576e3SNaoya Horiguchi struct mm_walk *walk) 6541da177e4SLinus Torvalds { 6556f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6566f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6575b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6586f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 659dc9aa5b9SChristoph Lameter 660a18b3ac2SLi Xinhai /* range check first */ 661ce33135cSMiaohe Lin VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 662f18da660SLi Xinhai 663f18da660SLi Xinhai if (!qp->first) { 664f18da660SLi Xinhai qp->first = vma; 665f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 666f18da660SLi Xinhai (qp->start < vma->vm_start)) 667f18da660SLi Xinhai /* hole at head side of range */ 668a18b3ac2SLi Xinhai return -EFAULT; 669a18b3ac2SLi Xinhai } 670f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 671f18da660SLi Xinhai ((vma->vm_end < qp->end) && 672f18da660SLi Xinhai (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) 673f18da660SLi Xinhai /* hole at middle or tail of range */ 674f18da660SLi Xinhai return -EFAULT; 675a18b3ac2SLi Xinhai 676a7f40cfeSYang Shi /* 677a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 678a7f40cfeSYang Shi * regardless of vma_migratable 679a7f40cfeSYang Shi */ 680a7f40cfeSYang Shi if (!vma_migratable(vma) && 681a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 68248684a65SNaoya Horiguchi return 1; 68348684a65SNaoya Horiguchi 6845b952b3cSAndi Kleen if (endvma > end) 6855b952b3cSAndi Kleen endvma = end; 686b24f53a0SLee Schermerhorn 687b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6882c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6893122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 6904355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 691b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6926f4576e3SNaoya Horiguchi return 1; 693b24f53a0SLee Schermerhorn } 694b24f53a0SLee Schermerhorn 6956f4576e3SNaoya Horiguchi /* queue pages from current vma */ 696a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 6976f4576e3SNaoya Horiguchi return 0; 6986f4576e3SNaoya Horiguchi return 1; 6996f4576e3SNaoya Horiguchi } 700b24f53a0SLee Schermerhorn 7017b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 7027b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 7037b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 7047b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 7057b86ac33SChristoph Hellwig }; 7067b86ac33SChristoph Hellwig 7076f4576e3SNaoya Horiguchi /* 7086f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7096f4576e3SNaoya Horiguchi * 7106f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7116f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 712d8835445SYang Shi * passed via @private. 713d8835445SYang Shi * 714d8835445SYang Shi * queue_pages_range() has three possible return values: 715d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 716d8835445SYang Shi * specified. 717d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 718a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 719a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 720a85dfc30SYang Shi * your accessible address space (-EFAULT) 7216f4576e3SNaoya Horiguchi */ 7226f4576e3SNaoya Horiguchi static int 7236f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7246f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 7256f4576e3SNaoya Horiguchi struct list_head *pagelist) 7266f4576e3SNaoya Horiguchi { 727f18da660SLi Xinhai int err; 7286f4576e3SNaoya Horiguchi struct queue_pages qp = { 7296f4576e3SNaoya Horiguchi .pagelist = pagelist, 7306f4576e3SNaoya Horiguchi .flags = flags, 7316f4576e3SNaoya Horiguchi .nmask = nodes, 732f18da660SLi Xinhai .start = start, 733f18da660SLi Xinhai .end = end, 734f18da660SLi Xinhai .first = NULL, 7356f4576e3SNaoya Horiguchi }; 7366f4576e3SNaoya Horiguchi 737f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 738f18da660SLi Xinhai 739f18da660SLi Xinhai if (!qp.first) 740f18da660SLi Xinhai /* whole range in hole */ 741f18da660SLi Xinhai err = -EFAULT; 742f18da660SLi Xinhai 743f18da660SLi Xinhai return err; 7441da177e4SLinus Torvalds } 7451da177e4SLinus Torvalds 746869833f2SKOSAKI Motohiro /* 747869833f2SKOSAKI Motohiro * Apply policy to a single VMA 748c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for writing. 749869833f2SKOSAKI Motohiro */ 750869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 751869833f2SKOSAKI Motohiro struct mempolicy *pol) 7528d34694cSKOSAKI Motohiro { 753869833f2SKOSAKI Motohiro int err; 754869833f2SKOSAKI Motohiro struct mempolicy *old; 755869833f2SKOSAKI Motohiro struct mempolicy *new; 7568d34694cSKOSAKI Motohiro 7578d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7588d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7598d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7608d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7618d34694cSKOSAKI Motohiro 762869833f2SKOSAKI Motohiro new = mpol_dup(pol); 763869833f2SKOSAKI Motohiro if (IS_ERR(new)) 764869833f2SKOSAKI Motohiro return PTR_ERR(new); 765869833f2SKOSAKI Motohiro 766869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7678d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 768869833f2SKOSAKI Motohiro if (err) 769869833f2SKOSAKI Motohiro goto err_out; 7708d34694cSKOSAKI Motohiro } 771869833f2SKOSAKI Motohiro 772869833f2SKOSAKI Motohiro old = vma->vm_policy; 773c1e8d7c6SMichel Lespinasse vma->vm_policy = new; /* protected by mmap_lock */ 774869833f2SKOSAKI Motohiro mpol_put(old); 775869833f2SKOSAKI Motohiro 776869833f2SKOSAKI Motohiro return 0; 777869833f2SKOSAKI Motohiro err_out: 778869833f2SKOSAKI Motohiro mpol_put(new); 7798d34694cSKOSAKI Motohiro return err; 7808d34694cSKOSAKI Motohiro } 7818d34694cSKOSAKI Motohiro 7821da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7839d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7849d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7851da177e4SLinus Torvalds { 7861da177e4SLinus Torvalds struct vm_area_struct *next; 7879d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7889d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7899d8cebd4SKOSAKI Motohiro int err = 0; 790e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7919d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7929d8cebd4SKOSAKI Motohiro unsigned long vmend; 7931da177e4SLinus Torvalds 794097d5910SLinus Torvalds vma = find_vma(mm, start); 795f18da660SLi Xinhai VM_BUG_ON(!vma); 7969d8cebd4SKOSAKI Motohiro 797097d5910SLinus Torvalds prev = vma->vm_prev; 798e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 799e26a5114SKOSAKI Motohiro prev = vma; 800e26a5114SKOSAKI Motohiro 8019d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 8021da177e4SLinus Torvalds next = vma->vm_next; 8039d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 8049d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 8059d8cebd4SKOSAKI Motohiro 806e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 807e26a5114SKOSAKI Motohiro continue; 808e26a5114SKOSAKI Motohiro 809e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 810e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 8119d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 812e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 81319a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 8149d8cebd4SKOSAKI Motohiro if (prev) { 8159d8cebd4SKOSAKI Motohiro vma = prev; 8169d8cebd4SKOSAKI Motohiro next = vma->vm_next; 8173964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 8189d8cebd4SKOSAKI Motohiro continue; 8193964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 8203964acd0SOleg Nesterov goto replace; 8211da177e4SLinus Torvalds } 8229d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 8239d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 8249d8cebd4SKOSAKI Motohiro if (err) 8259d8cebd4SKOSAKI Motohiro goto out; 8269d8cebd4SKOSAKI Motohiro } 8279d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 8289d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 8299d8cebd4SKOSAKI Motohiro if (err) 8309d8cebd4SKOSAKI Motohiro goto out; 8319d8cebd4SKOSAKI Motohiro } 8323964acd0SOleg Nesterov replace: 833869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 8349d8cebd4SKOSAKI Motohiro if (err) 8359d8cebd4SKOSAKI Motohiro goto out; 8369d8cebd4SKOSAKI Motohiro } 8379d8cebd4SKOSAKI Motohiro 8389d8cebd4SKOSAKI Motohiro out: 8391da177e4SLinus Torvalds return err; 8401da177e4SLinus Torvalds } 8411da177e4SLinus Torvalds 8421da177e4SLinus Torvalds /* Set the process memory policy */ 843028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 844028fec41SDavid Rientjes nodemask_t *nodes) 8451da177e4SLinus Torvalds { 84658568d2aSMiao Xie struct mempolicy *new, *old; 8474bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 84858568d2aSMiao Xie int ret; 8491da177e4SLinus Torvalds 8504bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8514bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 852f4e53d91SLee Schermerhorn 8534bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8544bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8554bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8564bfc4495SKAMEZAWA Hiroyuki goto out; 8574bfc4495SKAMEZAWA Hiroyuki } 8582c7c3a7dSOleg Nesterov 859bda420b9SHuang Ying if (flags & MPOL_F_NUMA_BALANCING) { 860bda420b9SHuang Ying if (new && new->mode == MPOL_BIND) { 861bda420b9SHuang Ying new->flags |= (MPOL_F_MOF | MPOL_F_MORON); 862bda420b9SHuang Ying } else { 863bda420b9SHuang Ying ret = -EINVAL; 864bda420b9SHuang Ying mpol_put(new); 865bda420b9SHuang Ying goto out; 866bda420b9SHuang Ying } 867bda420b9SHuang Ying } 868bda420b9SHuang Ying 8694bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 87058568d2aSMiao Xie if (ret) { 87158568d2aSMiao Xie mpol_put(new); 8724bfc4495SKAMEZAWA Hiroyuki goto out; 87358568d2aSMiao Xie } 87478b132e9SWei Yang task_lock(current); 87558568d2aSMiao Xie old = current->mempolicy; 8761da177e4SLinus Torvalds current->mempolicy = new; 87745816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 87845816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 87958568d2aSMiao Xie task_unlock(current); 88058568d2aSMiao Xie mpol_put(old); 8814bfc4495SKAMEZAWA Hiroyuki ret = 0; 8824bfc4495SKAMEZAWA Hiroyuki out: 8834bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8844bfc4495SKAMEZAWA Hiroyuki return ret; 8851da177e4SLinus Torvalds } 8861da177e4SLinus Torvalds 887bea904d5SLee Schermerhorn /* 888bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 88958568d2aSMiao Xie * 89058568d2aSMiao Xie * Called with task's alloc_lock held 891bea904d5SLee Schermerhorn */ 892bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8931da177e4SLinus Torvalds { 894dfcd3c0dSAndi Kleen nodes_clear(*nodes); 895bea904d5SLee Schermerhorn if (p == &default_policy) 896bea904d5SLee Schermerhorn return; 897bea904d5SLee Schermerhorn 89845c4745aSLee Schermerhorn switch (p->mode) { 89919770b32SMel Gorman case MPOL_BIND: 9001da177e4SLinus Torvalds case MPOL_INTERLEAVE: 901269fbe72SBen Widawsky case MPOL_PREFERRED: 902b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 903269fbe72SBen Widawsky *nodes = p->nodes; 9041da177e4SLinus Torvalds break; 9057858d7bcSFeng Tang case MPOL_LOCAL: 9067858d7bcSFeng Tang /* return empty node mask for local allocation */ 9077858d7bcSFeng Tang break; 9081da177e4SLinus Torvalds default: 9091da177e4SLinus Torvalds BUG(); 9101da177e4SLinus Torvalds } 9111da177e4SLinus Torvalds } 9121da177e4SLinus Torvalds 9133b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 9141da177e4SLinus Torvalds { 915ba841078SPeter Xu struct page *p = NULL; 9161da177e4SLinus Torvalds int err; 9171da177e4SLinus Torvalds 9183b9aadf7SAndrea Arcangeli int locked = 1; 9193b9aadf7SAndrea Arcangeli err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); 9202d3a36a4SMichal Hocko if (err > 0) { 9211da177e4SLinus Torvalds err = page_to_nid(p); 9221da177e4SLinus Torvalds put_page(p); 9231da177e4SLinus Torvalds } 9243b9aadf7SAndrea Arcangeli if (locked) 925d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9261da177e4SLinus Torvalds return err; 9271da177e4SLinus Torvalds } 9281da177e4SLinus Torvalds 9291da177e4SLinus Torvalds /* Retrieve NUMA policy */ 930dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9311da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9321da177e4SLinus Torvalds { 9338bccd85fSChristoph Lameter int err; 9341da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9351da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9363b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9371da177e4SLinus Torvalds 938754af6f5SLee Schermerhorn if (flags & 939754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9401da177e4SLinus Torvalds return -EINVAL; 941754af6f5SLee Schermerhorn 942754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 943754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 944754af6f5SLee Schermerhorn return -EINVAL; 945754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 94658568d2aSMiao Xie task_lock(current); 947754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 94858568d2aSMiao Xie task_unlock(current); 949754af6f5SLee Schermerhorn return 0; 950754af6f5SLee Schermerhorn } 951754af6f5SLee Schermerhorn 9521da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 953bea904d5SLee Schermerhorn /* 954bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 955bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 956bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 957bea904d5SLee Schermerhorn */ 958d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 95933e3575cSLiam Howlett vma = vma_lookup(mm, addr); 9601da177e4SLinus Torvalds if (!vma) { 961d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9621da177e4SLinus Torvalds return -EFAULT; 9631da177e4SLinus Torvalds } 9641da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9651da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9661da177e4SLinus Torvalds else 9671da177e4SLinus Torvalds pol = vma->vm_policy; 9681da177e4SLinus Torvalds } else if (addr) 9691da177e4SLinus Torvalds return -EINVAL; 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds if (!pol) 972bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9731da177e4SLinus Torvalds 9741da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9751da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9763b9aadf7SAndrea Arcangeli /* 9773b9aadf7SAndrea Arcangeli * Take a refcount on the mpol, lookup_node() 978baf2f90bSLu Jialin * will drop the mmap_lock, so after calling 9793b9aadf7SAndrea Arcangeli * lookup_node() only "pol" remains valid, "vma" 9803b9aadf7SAndrea Arcangeli * is stale. 9813b9aadf7SAndrea Arcangeli */ 9823b9aadf7SAndrea Arcangeli pol_refcount = pol; 9833b9aadf7SAndrea Arcangeli vma = NULL; 9843b9aadf7SAndrea Arcangeli mpol_get(pol); 9853b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9861da177e4SLinus Torvalds if (err < 0) 9871da177e4SLinus Torvalds goto out; 9888bccd85fSChristoph Lameter *policy = err; 9891da177e4SLinus Torvalds } else if (pol == current->mempolicy && 99045c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 991269fbe72SBen Widawsky *policy = next_node_in(current->il_prev, pol->nodes); 9921da177e4SLinus Torvalds } else { 9931da177e4SLinus Torvalds err = -EINVAL; 9941da177e4SLinus Torvalds goto out; 9951da177e4SLinus Torvalds } 996bea904d5SLee Schermerhorn } else { 997bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 998bea904d5SLee Schermerhorn pol->mode; 999d79df630SDavid Rientjes /* 1000d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 1001d79df630SDavid Rientjes * the policy to userspace. 1002d79df630SDavid Rientjes */ 1003d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 1004bea904d5SLee Schermerhorn } 10051da177e4SLinus Torvalds 10061da177e4SLinus Torvalds err = 0; 100758568d2aSMiao Xie if (nmask) { 1008c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 1009c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 1010c6b6ef8bSLee Schermerhorn } else { 101158568d2aSMiao Xie task_lock(current); 1012bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 101358568d2aSMiao Xie task_unlock(current); 101458568d2aSMiao Xie } 1015c6b6ef8bSLee Schermerhorn } 10161da177e4SLinus Torvalds 10171da177e4SLinus Torvalds out: 101852cd3b07SLee Schermerhorn mpol_cond_put(pol); 10191da177e4SLinus Torvalds if (vma) 1020d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 10213b9aadf7SAndrea Arcangeli if (pol_refcount) 10223b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10231da177e4SLinus Torvalds return err; 10241da177e4SLinus Torvalds } 10251da177e4SLinus Torvalds 1026b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10278bccd85fSChristoph Lameter /* 1028c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 10296ce3c4c0SChristoph Lameter */ 1030a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1031fc301289SChristoph Lameter unsigned long flags) 10326ce3c4c0SChristoph Lameter { 1033c8633798SNaoya Horiguchi struct page *head = compound_head(page); 10346ce3c4c0SChristoph Lameter /* 1035fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 10366ce3c4c0SChristoph Lameter */ 1037c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1038c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 1039c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 1040c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 10419de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head), 10426c357848SMatthew Wilcox (Oracle) thp_nr_pages(head)); 1043a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1044a53190a4SYang Shi /* 1045a53190a4SYang Shi * Non-movable page may reach here. And, there may be 1046a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 1047a53190a4SYang Shi * Treat them as unmovable pages since they can't be 1048a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1049a53190a4SYang Shi * should return -EIO for this case too. 1050a53190a4SYang Shi */ 1051a53190a4SYang Shi return -EIO; 105262695a84SNick Piggin } 105362695a84SNick Piggin } 1054a53190a4SYang Shi 1055a53190a4SYang Shi return 0; 10566ce3c4c0SChristoph Lameter } 10576ce3c4c0SChristoph Lameter 10586ce3c4c0SChristoph Lameter /* 10597e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10607e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10617e2ab150SChristoph Lameter */ 1062dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1063dbcb0f19SAdrian Bunk int flags) 10647e2ab150SChristoph Lameter { 10657e2ab150SChristoph Lameter nodemask_t nmask; 10667e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10677e2ab150SChristoph Lameter int err = 0; 1068a0976311SJoonsoo Kim struct migration_target_control mtc = { 1069a0976311SJoonsoo Kim .nid = dest, 1070a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1071a0976311SJoonsoo Kim }; 10727e2ab150SChristoph Lameter 10737e2ab150SChristoph Lameter nodes_clear(nmask); 10747e2ab150SChristoph Lameter node_set(source, nmask); 10757e2ab150SChristoph Lameter 107608270807SMinchan Kim /* 107708270807SMinchan Kim * This does not "check" the range but isolates all pages that 107808270807SMinchan Kim * need migration. Between passing in the full user address 107908270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 108008270807SMinchan Kim */ 108108270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 108298094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10837e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10847e2ab150SChristoph Lameter 1085cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1086a0976311SJoonsoo Kim err = migrate_pages(&pagelist, alloc_migration_target, NULL, 10875ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1088cf608ac1SMinchan Kim if (err) 1089e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1090cf608ac1SMinchan Kim } 109195a402c3SChristoph Lameter 10927e2ab150SChristoph Lameter return err; 10937e2ab150SChristoph Lameter } 10947e2ab150SChristoph Lameter 10957e2ab150SChristoph Lameter /* 10967e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10977e2ab150SChristoph Lameter * layout as much as possible. 109839743889SChristoph Lameter * 109939743889SChristoph Lameter * Returns the number of page that could not be moved. 110039743889SChristoph Lameter */ 11010ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11020ce72d4fSAndrew Morton const nodemask_t *to, int flags) 110339743889SChristoph Lameter { 11047e2ab150SChristoph Lameter int busy = 0; 1105f555befdSJan Stancek int err = 0; 11067e2ab150SChristoph Lameter nodemask_t tmp; 110739743889SChristoph Lameter 1108361a2a22SMinchan Kim lru_cache_disable(); 11090aedadf9SChristoph Lameter 1110d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1111d4984711SChristoph Lameter 11127e2ab150SChristoph Lameter /* 11137e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11147e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11157e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11167e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11177e2ab150SChristoph Lameter * 11187e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11197e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11207e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11217e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11227e2ab150SChristoph Lameter * 11237e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11247e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11257e2ab150SChristoph Lameter * (nothing left to migrate). 11267e2ab150SChristoph Lameter * 11277e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11287e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11297e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11307e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11317e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11327e2ab150SChristoph Lameter * 11337e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11347e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11357e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11367e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1137ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11387e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11397e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11407e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11417e2ab150SChristoph Lameter */ 11427e2ab150SChristoph Lameter 11430ce72d4fSAndrew Morton tmp = *from; 11447e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11457e2ab150SChristoph Lameter int s, d; 1146b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11477e2ab150SChristoph Lameter int dest = 0; 11487e2ab150SChristoph Lameter 11497e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11504a5b18ccSLarry Woodman 11514a5b18ccSLarry Woodman /* 11524a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11534a5b18ccSLarry Woodman * node relationship of the pages established between 11544a5b18ccSLarry Woodman * threads and memory areas. 11554a5b18ccSLarry Woodman * 11564a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11574a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11584a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11594a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11604a5b18ccSLarry Woodman * mask. 11614a5b18ccSLarry Woodman * 11624a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11634a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11644a5b18ccSLarry Woodman */ 11654a5b18ccSLarry Woodman 11660ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11670ce72d4fSAndrew Morton (node_isset(s, *to))) 11684a5b18ccSLarry Woodman continue; 11694a5b18ccSLarry Woodman 11700ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11717e2ab150SChristoph Lameter if (s == d) 11727e2ab150SChristoph Lameter continue; 11737e2ab150SChristoph Lameter 11747e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11757e2ab150SChristoph Lameter dest = d; 11767e2ab150SChristoph Lameter 11777e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11787e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11797e2ab150SChristoph Lameter break; 11807e2ab150SChristoph Lameter } 1181b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11827e2ab150SChristoph Lameter break; 11837e2ab150SChristoph Lameter 11847e2ab150SChristoph Lameter node_clear(source, tmp); 11857e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11867e2ab150SChristoph Lameter if (err > 0) 11877e2ab150SChristoph Lameter busy += err; 11887e2ab150SChristoph Lameter if (err < 0) 11897e2ab150SChristoph Lameter break; 119039743889SChristoph Lameter } 1191d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1192d479960eSMinchan Kim 1193361a2a22SMinchan Kim lru_cache_enable(); 11947e2ab150SChristoph Lameter if (err < 0) 11957e2ab150SChristoph Lameter return err; 11967e2ab150SChristoph Lameter return busy; 1197b20a3503SChristoph Lameter 119839743889SChristoph Lameter } 119939743889SChristoph Lameter 12003ad33b24SLee Schermerhorn /* 12013ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1202d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 12033ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 12043ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 12053ad33b24SLee Schermerhorn * is in virtual address order. 12063ad33b24SLee Schermerhorn */ 1207666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 120895a402c3SChristoph Lameter { 1209d05f0cdcSHugh Dickins struct vm_area_struct *vma; 12103f649ab7SKees Cook unsigned long address; 121195a402c3SChristoph Lameter 1212d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 12133ad33b24SLee Schermerhorn while (vma) { 12143ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 12153ad33b24SLee Schermerhorn if (address != -EFAULT) 12163ad33b24SLee Schermerhorn break; 12173ad33b24SLee Schermerhorn vma = vma->vm_next; 12183ad33b24SLee Schermerhorn } 12193ad33b24SLee Schermerhorn 122011c731e8SWanpeng Li if (PageHuge(page)) { 1221389c8178SMichal Hocko return alloc_huge_page_vma(page_hstate(compound_head(page)), 1222389c8178SMichal Hocko vma, address); 122394723aafSMichal Hocko } else if (PageTransHuge(page)) { 1224c8633798SNaoya Horiguchi struct page *thp; 1225c8633798SNaoya Horiguchi 122619deb769SDavid Rientjes thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 122719deb769SDavid Rientjes HPAGE_PMD_ORDER); 1228c8633798SNaoya Horiguchi if (!thp) 1229c8633798SNaoya Horiguchi return NULL; 1230c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1231c8633798SNaoya Horiguchi return thp; 123211c731e8SWanpeng Li } 123311c731e8SWanpeng Li /* 123411c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 123511c731e8SWanpeng Li */ 12360f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 12370f556856SMichal Hocko vma, address); 123895a402c3SChristoph Lameter } 1239b20a3503SChristoph Lameter #else 1240b20a3503SChristoph Lameter 1241a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1242b20a3503SChristoph Lameter unsigned long flags) 1243b20a3503SChristoph Lameter { 1244a53190a4SYang Shi return -EIO; 1245b20a3503SChristoph Lameter } 1246b20a3503SChristoph Lameter 12470ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12480ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1249b20a3503SChristoph Lameter { 1250b20a3503SChristoph Lameter return -ENOSYS; 1251b20a3503SChristoph Lameter } 125295a402c3SChristoph Lameter 1253666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 125495a402c3SChristoph Lameter { 125595a402c3SChristoph Lameter return NULL; 125695a402c3SChristoph Lameter } 1257b20a3503SChristoph Lameter #endif 1258b20a3503SChristoph Lameter 1259dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1260028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1261028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12626ce3c4c0SChristoph Lameter { 12636ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12646ce3c4c0SChristoph Lameter struct mempolicy *new; 12656ce3c4c0SChristoph Lameter unsigned long end; 12666ce3c4c0SChristoph Lameter int err; 1267d8835445SYang Shi int ret; 12686ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12696ce3c4c0SChristoph Lameter 1270b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12716ce3c4c0SChristoph Lameter return -EINVAL; 127274c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12736ce3c4c0SChristoph Lameter return -EPERM; 12746ce3c4c0SChristoph Lameter 12756ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12766ce3c4c0SChristoph Lameter return -EINVAL; 12776ce3c4c0SChristoph Lameter 12786ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12796ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12806ce3c4c0SChristoph Lameter 12816ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12826ce3c4c0SChristoph Lameter end = start + len; 12836ce3c4c0SChristoph Lameter 12846ce3c4c0SChristoph Lameter if (end < start) 12856ce3c4c0SChristoph Lameter return -EINVAL; 12866ce3c4c0SChristoph Lameter if (end == start) 12876ce3c4c0SChristoph Lameter return 0; 12886ce3c4c0SChristoph Lameter 1289028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12906ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12916ce3c4c0SChristoph Lameter return PTR_ERR(new); 12926ce3c4c0SChristoph Lameter 1293b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1294b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1295b24f53a0SLee Schermerhorn 12966ce3c4c0SChristoph Lameter /* 12976ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12986ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12996ce3c4c0SChristoph Lameter */ 13006ce3c4c0SChristoph Lameter if (!new) 13016ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 13026ce3c4c0SChristoph Lameter 1303028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1304028fec41SDavid Rientjes start, start + len, mode, mode_flags, 130500ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 13066ce3c4c0SChristoph Lameter 13070aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 13080aedadf9SChristoph Lameter 1309361a2a22SMinchan Kim lru_cache_disable(); 13100aedadf9SChristoph Lameter } 13114bfc4495SKAMEZAWA Hiroyuki { 13124bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 13134bfc4495SKAMEZAWA Hiroyuki if (scratch) { 1314d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 13154bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 13164bfc4495SKAMEZAWA Hiroyuki if (err) 1317d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 13184bfc4495SKAMEZAWA Hiroyuki } else 13194bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13204bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13214bfc4495SKAMEZAWA Hiroyuki } 1322b05ca738SKOSAKI Motohiro if (err) 1323b05ca738SKOSAKI Motohiro goto mpol_out; 1324b05ca738SKOSAKI Motohiro 1325d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13266ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1327d8835445SYang Shi 1328d8835445SYang Shi if (ret < 0) { 1329a85dfc30SYang Shi err = ret; 1330d8835445SYang Shi goto up_out; 1331d8835445SYang Shi } 1332d8835445SYang Shi 13339d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13347e2ab150SChristoph Lameter 1335b24f53a0SLee Schermerhorn if (!err) { 1336b24f53a0SLee Schermerhorn int nr_failed = 0; 1337b24f53a0SLee Schermerhorn 1338cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1339b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1340d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 13415ac95884SYang Shi start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1342cf608ac1SMinchan Kim if (nr_failed) 134374060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1344cf608ac1SMinchan Kim } 13456ce3c4c0SChristoph Lameter 1346d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13476ce3c4c0SChristoph Lameter err = -EIO; 1348a85dfc30SYang Shi } else { 1349d8835445SYang Shi up_out: 1350a85dfc30SYang Shi if (!list_empty(&pagelist)) 1351a85dfc30SYang Shi putback_movable_pages(&pagelist); 1352a85dfc30SYang Shi } 1353a85dfc30SYang Shi 1354d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1355b05ca738SKOSAKI Motohiro mpol_out: 1356f0be3d32SLee Schermerhorn mpol_put(new); 1357d479960eSMinchan Kim if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1358361a2a22SMinchan Kim lru_cache_enable(); 13596ce3c4c0SChristoph Lameter return err; 13606ce3c4c0SChristoph Lameter } 13616ce3c4c0SChristoph Lameter 136239743889SChristoph Lameter /* 13638bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13648bccd85fSChristoph Lameter */ 13658bccd85fSChristoph Lameter 13668bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 136739743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13688bccd85fSChristoph Lameter unsigned long maxnode) 13698bccd85fSChristoph Lameter { 13708bccd85fSChristoph Lameter unsigned long k; 137156521e7aSYisheng Xie unsigned long t; 13728bccd85fSChristoph Lameter unsigned long nlongs; 13738bccd85fSChristoph Lameter unsigned long endmask; 13748bccd85fSChristoph Lameter 13758bccd85fSChristoph Lameter --maxnode; 13768bccd85fSChristoph Lameter nodes_clear(*nodes); 13778bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13788bccd85fSChristoph Lameter return 0; 1379a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1380636f13c1SChris Wright return -EINVAL; 13818bccd85fSChristoph Lameter 13828bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 13838bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 13848bccd85fSChristoph Lameter endmask = ~0UL; 13858bccd85fSChristoph Lameter else 13868bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 13878bccd85fSChristoph Lameter 138856521e7aSYisheng Xie /* 138956521e7aSYisheng Xie * When the user specified more nodes than supported just check 139056521e7aSYisheng Xie * if the non supported part is all zero. 139156521e7aSYisheng Xie * 139256521e7aSYisheng Xie * If maxnode have more longs than MAX_NUMNODES, check 139356521e7aSYisheng Xie * the bits in that area first. And then go through to 139456521e7aSYisheng Xie * check the rest bits which equal or bigger than MAX_NUMNODES. 139556521e7aSYisheng Xie * Otherwise, just check bits [MAX_NUMNODES, maxnode). 139656521e7aSYisheng Xie */ 13978bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 13988bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 13998bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 14008bccd85fSChristoph Lameter return -EFAULT; 14018bccd85fSChristoph Lameter if (k == nlongs - 1) { 14028bccd85fSChristoph Lameter if (t & endmask) 14038bccd85fSChristoph Lameter return -EINVAL; 14048bccd85fSChristoph Lameter } else if (t) 14058bccd85fSChristoph Lameter return -EINVAL; 14068bccd85fSChristoph Lameter } 14078bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 14088bccd85fSChristoph Lameter endmask = ~0UL; 14098bccd85fSChristoph Lameter } 14108bccd85fSChristoph Lameter 141156521e7aSYisheng Xie if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { 141256521e7aSYisheng Xie unsigned long valid_mask = endmask; 141356521e7aSYisheng Xie 141456521e7aSYisheng Xie valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 141556521e7aSYisheng Xie if (get_user(t, nmask + nlongs - 1)) 141656521e7aSYisheng Xie return -EFAULT; 141756521e7aSYisheng Xie if (t & valid_mask) 141856521e7aSYisheng Xie return -EINVAL; 141956521e7aSYisheng Xie } 142056521e7aSYisheng Xie 14218bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 14228bccd85fSChristoph Lameter return -EFAULT; 14238bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 14248bccd85fSChristoph Lameter return 0; 14258bccd85fSChristoph Lameter } 14268bccd85fSChristoph Lameter 14278bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14288bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14298bccd85fSChristoph Lameter nodemask_t *nodes) 14308bccd85fSChristoph Lameter { 14318bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1432050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 14338bccd85fSChristoph Lameter 14348bccd85fSChristoph Lameter if (copy > nbytes) { 14358bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14368bccd85fSChristoph Lameter return -EINVAL; 14378bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14388bccd85fSChristoph Lameter return -EFAULT; 14398bccd85fSChristoph Lameter copy = nbytes; 14408bccd85fSChristoph Lameter } 14418bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14428bccd85fSChristoph Lameter } 14438bccd85fSChristoph Lameter 144495837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 144595837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 144695837924SFeng Tang { 144795837924SFeng Tang *flags = *mode & MPOL_MODE_FLAGS; 144895837924SFeng Tang *mode &= ~MPOL_MODE_FLAGS; 1449b27abaccSDave Hansen 1450a38a59fdSBen Widawsky if ((unsigned int)(*mode) >= MPOL_MAX) 145195837924SFeng Tang return -EINVAL; 145295837924SFeng Tang if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 145395837924SFeng Tang return -EINVAL; 145495837924SFeng Tang 145595837924SFeng Tang return 0; 145695837924SFeng Tang } 145795837924SFeng Tang 1458e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1459e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1460e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14618bccd85fSChristoph Lameter { 1462028fec41SDavid Rientjes unsigned short mode_flags; 146395837924SFeng Tang nodemask_t nodes; 146495837924SFeng Tang int lmode = mode; 146595837924SFeng Tang int err; 14668bccd85fSChristoph Lameter 1467057d3389SAndrey Konovalov start = untagged_addr(start); 146895837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 146995837924SFeng Tang if (err) 147095837924SFeng Tang return err; 147195837924SFeng Tang 14728bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14738bccd85fSChristoph Lameter if (err) 14748bccd85fSChristoph Lameter return err; 147595837924SFeng Tang 147695837924SFeng Tang return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 14778bccd85fSChristoph Lameter } 14788bccd85fSChristoph Lameter 1479e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1480e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1481e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1482e7dc9ad6SDominik Brodowski { 1483e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1484e7dc9ad6SDominik Brodowski } 1485e7dc9ad6SDominik Brodowski 14868bccd85fSChristoph Lameter /* Set the process memory policy */ 1487af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1488af03c4acSDominik Brodowski unsigned long maxnode) 14898bccd85fSChristoph Lameter { 149095837924SFeng Tang unsigned short mode_flags; 14918bccd85fSChristoph Lameter nodemask_t nodes; 149295837924SFeng Tang int lmode = mode; 149395837924SFeng Tang int err; 14948bccd85fSChristoph Lameter 149595837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 149695837924SFeng Tang if (err) 149795837924SFeng Tang return err; 149895837924SFeng Tang 14998bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 15008bccd85fSChristoph Lameter if (err) 15018bccd85fSChristoph Lameter return err; 150295837924SFeng Tang 150395837924SFeng Tang return do_set_mempolicy(lmode, mode_flags, &nodes); 15048bccd85fSChristoph Lameter } 15058bccd85fSChristoph Lameter 1506af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1507af03c4acSDominik Brodowski unsigned long, maxnode) 1508af03c4acSDominik Brodowski { 1509af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1510af03c4acSDominik Brodowski } 1511af03c4acSDominik Brodowski 1512b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1513b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1514b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 151539743889SChristoph Lameter { 1516596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 151739743889SChristoph Lameter struct task_struct *task; 151839743889SChristoph Lameter nodemask_t task_nodes; 151939743889SChristoph Lameter int err; 1520596d7cfaSKOSAKI Motohiro nodemask_t *old; 1521596d7cfaSKOSAKI Motohiro nodemask_t *new; 1522596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 152339743889SChristoph Lameter 1524596d7cfaSKOSAKI Motohiro if (!scratch) 1525596d7cfaSKOSAKI Motohiro return -ENOMEM; 152639743889SChristoph Lameter 1527596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1528596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1529596d7cfaSKOSAKI Motohiro 1530596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 153139743889SChristoph Lameter if (err) 1532596d7cfaSKOSAKI Motohiro goto out; 1533596d7cfaSKOSAKI Motohiro 1534596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1535596d7cfaSKOSAKI Motohiro if (err) 1536596d7cfaSKOSAKI Motohiro goto out; 153739743889SChristoph Lameter 153839743889SChristoph Lameter /* Find the mm_struct */ 153955cfaa3cSZeng Zhaoming rcu_read_lock(); 1540228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 154139743889SChristoph Lameter if (!task) { 154255cfaa3cSZeng Zhaoming rcu_read_unlock(); 1543596d7cfaSKOSAKI Motohiro err = -ESRCH; 1544596d7cfaSKOSAKI Motohiro goto out; 154539743889SChristoph Lameter } 15463268c63eSChristoph Lameter get_task_struct(task); 154739743889SChristoph Lameter 1548596d7cfaSKOSAKI Motohiro err = -EINVAL; 154939743889SChristoph Lameter 155039743889SChristoph Lameter /* 155131367466SOtto Ebeling * Check if this process has the right to modify the specified process. 155231367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 155339743889SChristoph Lameter */ 155431367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1555c69e8d9cSDavid Howells rcu_read_unlock(); 155639743889SChristoph Lameter err = -EPERM; 15573268c63eSChristoph Lameter goto out_put; 155839743889SChristoph Lameter } 1559c69e8d9cSDavid Howells rcu_read_unlock(); 156039743889SChristoph Lameter 156139743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 156239743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1563596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 156439743889SChristoph Lameter err = -EPERM; 15653268c63eSChristoph Lameter goto out_put; 156639743889SChristoph Lameter } 156739743889SChristoph Lameter 15680486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 15690486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 15700486a38bSYisheng Xie if (nodes_empty(*new)) 15713268c63eSChristoph Lameter goto out_put; 15720486a38bSYisheng Xie 157386c3a764SDavid Quigley err = security_task_movememory(task); 157486c3a764SDavid Quigley if (err) 15753268c63eSChristoph Lameter goto out_put; 157686c3a764SDavid Quigley 15773268c63eSChristoph Lameter mm = get_task_mm(task); 15783268c63eSChristoph Lameter put_task_struct(task); 1579f2a9ef88SSasha Levin 1580f2a9ef88SSasha Levin if (!mm) { 1581f2a9ef88SSasha Levin err = -EINVAL; 1582f2a9ef88SSasha Levin goto out; 1583f2a9ef88SSasha Levin } 1584f2a9ef88SSasha Levin 1585596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 158674c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 15873268c63eSChristoph Lameter 158839743889SChristoph Lameter mmput(mm); 15893268c63eSChristoph Lameter out: 1590596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1591596d7cfaSKOSAKI Motohiro 159239743889SChristoph Lameter return err; 15933268c63eSChristoph Lameter 15943268c63eSChristoph Lameter out_put: 15953268c63eSChristoph Lameter put_task_struct(task); 15963268c63eSChristoph Lameter goto out; 15973268c63eSChristoph Lameter 159839743889SChristoph Lameter } 159939743889SChristoph Lameter 1600b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1601b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1602b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1603b6e9b0baSDominik Brodowski { 1604b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1605b6e9b0baSDominik Brodowski } 1606b6e9b0baSDominik Brodowski 160739743889SChristoph Lameter 16088bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1609af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1610af03c4acSDominik Brodowski unsigned long __user *nmask, 1611af03c4acSDominik Brodowski unsigned long maxnode, 1612af03c4acSDominik Brodowski unsigned long addr, 1613af03c4acSDominik Brodowski unsigned long flags) 16148bccd85fSChristoph Lameter { 1615dbcb0f19SAdrian Bunk int err; 16163f649ab7SKees Cook int pval; 16178bccd85fSChristoph Lameter nodemask_t nodes; 16188bccd85fSChristoph Lameter 1619050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 16208bccd85fSChristoph Lameter return -EINVAL; 16218bccd85fSChristoph Lameter 16224605f057SWenchao Hao addr = untagged_addr(addr); 16234605f057SWenchao Hao 16248bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 16258bccd85fSChristoph Lameter 16268bccd85fSChristoph Lameter if (err) 16278bccd85fSChristoph Lameter return err; 16288bccd85fSChristoph Lameter 16298bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 16308bccd85fSChristoph Lameter return -EFAULT; 16318bccd85fSChristoph Lameter 16328bccd85fSChristoph Lameter if (nmask) 16338bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 16348bccd85fSChristoph Lameter 16358bccd85fSChristoph Lameter return err; 16368bccd85fSChristoph Lameter } 16378bccd85fSChristoph Lameter 1638af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1639af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1640af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1641af03c4acSDominik Brodowski { 1642af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1643af03c4acSDominik Brodowski } 1644af03c4acSDominik Brodowski 16451da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 16461da177e4SLinus Torvalds 1647c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1648c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1649c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1650c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 16511da177e4SLinus Torvalds { 16521da177e4SLinus Torvalds long err; 16531da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16541da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16551da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16561da177e4SLinus Torvalds 1657050c17f2SRalph Campbell nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); 16581da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16591da177e4SLinus Torvalds 16601da177e4SLinus Torvalds if (nmask) 16611da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 16621da177e4SLinus Torvalds 1663af03c4acSDominik Brodowski err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 16641da177e4SLinus Torvalds 16651da177e4SLinus Torvalds if (!err && nmask) { 16662bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 16672bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 16682bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 16691da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 16701da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 16711da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 16721da177e4SLinus Torvalds } 16731da177e4SLinus Torvalds 16741da177e4SLinus Torvalds return err; 16751da177e4SLinus Torvalds } 16761da177e4SLinus Torvalds 1677c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1678c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 16791da177e4SLinus Torvalds { 16801da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16811da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16821da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16831da177e4SLinus Torvalds 16841da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16851da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16861da177e4SLinus Torvalds 16871da177e4SLinus Torvalds if (nmask) { 1688cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 16891da177e4SLinus Torvalds return -EFAULT; 1690cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1691cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1692cf01fb99SChris Salls return -EFAULT; 1693cf01fb99SChris Salls } 16941da177e4SLinus Torvalds 1695af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nm, nr_bits+1); 16961da177e4SLinus Torvalds } 16971da177e4SLinus Torvalds 1698c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1699c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1700c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 17011da177e4SLinus Torvalds { 17021da177e4SLinus Torvalds unsigned long __user *nm = NULL; 17031da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1704dfcd3c0dSAndi Kleen nodemask_t bm; 17051da177e4SLinus Torvalds 17061da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 17071da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 17081da177e4SLinus Torvalds 17091da177e4SLinus Torvalds if (nmask) { 1710cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 17111da177e4SLinus Torvalds return -EFAULT; 1712cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1713cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1714cf01fb99SChris Salls return -EFAULT; 1715cf01fb99SChris Salls } 17161da177e4SLinus Torvalds 1717e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); 17181da177e4SLinus Torvalds } 17191da177e4SLinus Torvalds 1720b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, 1721b6e9b0baSDominik Brodowski compat_ulong_t, maxnode, 1722b6e9b0baSDominik Brodowski const compat_ulong_t __user *, old_nodes, 1723b6e9b0baSDominik Brodowski const compat_ulong_t __user *, new_nodes) 1724b6e9b0baSDominik Brodowski { 1725b6e9b0baSDominik Brodowski unsigned long __user *old = NULL; 1726b6e9b0baSDominik Brodowski unsigned long __user *new = NULL; 1727b6e9b0baSDominik Brodowski nodemask_t tmp_mask; 1728b6e9b0baSDominik Brodowski unsigned long nr_bits; 1729b6e9b0baSDominik Brodowski unsigned long size; 1730b6e9b0baSDominik Brodowski 1731b6e9b0baSDominik Brodowski nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1732b6e9b0baSDominik Brodowski size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1733b6e9b0baSDominik Brodowski if (old_nodes) { 1734b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1735b6e9b0baSDominik Brodowski return -EFAULT; 1736b6e9b0baSDominik Brodowski old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1737b6e9b0baSDominik Brodowski if (new_nodes) 1738b6e9b0baSDominik Brodowski new = old + size / sizeof(unsigned long); 1739b6e9b0baSDominik Brodowski if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1740b6e9b0baSDominik Brodowski return -EFAULT; 1741b6e9b0baSDominik Brodowski } 1742b6e9b0baSDominik Brodowski if (new_nodes) { 1743b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1744b6e9b0baSDominik Brodowski return -EFAULT; 1745b6e9b0baSDominik Brodowski if (new == NULL) 1746b6e9b0baSDominik Brodowski new = compat_alloc_user_space(size); 1747b6e9b0baSDominik Brodowski if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1748b6e9b0baSDominik Brodowski return -EFAULT; 1749b6e9b0baSDominik Brodowski } 1750b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, nr_bits + 1, old, new); 1751b6e9b0baSDominik Brodowski } 1752b6e9b0baSDominik Brodowski 1753b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */ 17541da177e4SLinus Torvalds 175520ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 175620ca87f2SLi Xinhai { 175720ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 175820ca87f2SLi Xinhai return false; 175920ca87f2SLi Xinhai 176020ca87f2SLi Xinhai /* 176120ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 176220ca87f2SLi Xinhai * incurring periodic faults. 176320ca87f2SLi Xinhai */ 176420ca87f2SLi Xinhai if (vma_is_dax(vma)) 176520ca87f2SLi Xinhai return false; 176620ca87f2SLi Xinhai 176720ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 176820ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 176920ca87f2SLi Xinhai return false; 177020ca87f2SLi Xinhai 177120ca87f2SLi Xinhai /* 177220ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 177320ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 177420ca87f2SLi Xinhai * possible. 177520ca87f2SLi Xinhai */ 177620ca87f2SLi Xinhai if (vma->vm_file && 177720ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 177820ca87f2SLi Xinhai < policy_zone) 177920ca87f2SLi Xinhai return false; 178020ca87f2SLi Xinhai return true; 178120ca87f2SLi Xinhai } 178220ca87f2SLi Xinhai 178374d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 178474d2c3a0SOleg Nesterov unsigned long addr) 17851da177e4SLinus Torvalds { 17868d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17871da177e4SLinus Torvalds 17881da177e4SLinus Torvalds if (vma) { 1789480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17908d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 179100442ad0SMel Gorman } else if (vma->vm_policy) { 17921da177e4SLinus Torvalds pol = vma->vm_policy; 179300442ad0SMel Gorman 179400442ad0SMel Gorman /* 179500442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 179600442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 179700442ad0SMel Gorman * count on these policies which will be dropped by 179800442ad0SMel Gorman * mpol_cond_put() later 179900442ad0SMel Gorman */ 180000442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 180100442ad0SMel Gorman mpol_get(pol); 180200442ad0SMel Gorman } 18031da177e4SLinus Torvalds } 1804f15ca78eSOleg Nesterov 180574d2c3a0SOleg Nesterov return pol; 180674d2c3a0SOleg Nesterov } 180774d2c3a0SOleg Nesterov 180874d2c3a0SOleg Nesterov /* 1809dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 181074d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 181174d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 181274d2c3a0SOleg Nesterov * 181374d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1814dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 181574d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 181674d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 181774d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 181874d2c3a0SOleg Nesterov * extra reference for shared policies. 181974d2c3a0SOleg Nesterov */ 1820ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1821dd6eecb9SOleg Nesterov unsigned long addr) 182274d2c3a0SOleg Nesterov { 182374d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 182474d2c3a0SOleg Nesterov 18258d90274bSOleg Nesterov if (!pol) 1826dd6eecb9SOleg Nesterov pol = get_task_policy(current); 18278d90274bSOleg Nesterov 18281da177e4SLinus Torvalds return pol; 18291da177e4SLinus Torvalds } 18301da177e4SLinus Torvalds 18316b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1832fc314724SMel Gorman { 18336b6482bbSOleg Nesterov struct mempolicy *pol; 1834f15ca78eSOleg Nesterov 1835fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1836fc314724SMel Gorman bool ret = false; 1837fc314724SMel Gorman 1838fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1839fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1840fc314724SMel Gorman ret = true; 1841fc314724SMel Gorman mpol_cond_put(pol); 1842fc314724SMel Gorman 1843fc314724SMel Gorman return ret; 18448d90274bSOleg Nesterov } 18458d90274bSOleg Nesterov 1846fc314724SMel Gorman pol = vma->vm_policy; 18478d90274bSOleg Nesterov if (!pol) 18486b6482bbSOleg Nesterov pol = get_task_policy(current); 1849fc314724SMel Gorman 1850fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1851fc314724SMel Gorman } 1852fc314724SMel Gorman 1853d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1854d3eb1570SLai Jiangshan { 1855d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1856d3eb1570SLai Jiangshan 1857d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1858d3eb1570SLai Jiangshan 1859d3eb1570SLai Jiangshan /* 1860269fbe72SBen Widawsky * if policy->nodes has movable memory only, 1861d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1862d3eb1570SLai Jiangshan * 1863269fbe72SBen Widawsky * policy->nodes is intersect with node_states[N_MEMORY]. 1864f0953a1bSIngo Molnar * so if the following test fails, it implies 1865269fbe72SBen Widawsky * policy->nodes has movable memory only. 1866d3eb1570SLai Jiangshan */ 1867269fbe72SBen Widawsky if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1868d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1869d3eb1570SLai Jiangshan 1870d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1871d3eb1570SLai Jiangshan } 1872d3eb1570SLai Jiangshan 187352cd3b07SLee Schermerhorn /* 187452cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 187552cd3b07SLee Schermerhorn * page allocation 187652cd3b07SLee Schermerhorn */ 18778ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 187819770b32SMel Gorman { 1879b27abaccSDave Hansen int mode = policy->mode; 1880b27abaccSDave Hansen 188119770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1882b27abaccSDave Hansen if (unlikely(mode == MPOL_BIND) && 1883d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 1884269fbe72SBen Widawsky cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1885269fbe72SBen Widawsky return &policy->nodes; 188619770b32SMel Gorman 1887b27abaccSDave Hansen if (mode == MPOL_PREFERRED_MANY) 1888b27abaccSDave Hansen return &policy->nodes; 1889b27abaccSDave Hansen 189019770b32SMel Gorman return NULL; 189119770b32SMel Gorman } 189219770b32SMel Gorman 1893b27abaccSDave Hansen /* 1894b27abaccSDave Hansen * Return the preferred node id for 'prefer' mempolicy, and return 1895b27abaccSDave Hansen * the given id for all other policies. 1896b27abaccSDave Hansen * 1897b27abaccSDave Hansen * policy_node() is always coupled with policy_nodemask(), which 1898b27abaccSDave Hansen * secures the nodemask limit for 'bind' and 'prefer-many' policy. 1899b27abaccSDave Hansen */ 1900f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 19011da177e4SLinus Torvalds { 19027858d7bcSFeng Tang if (policy->mode == MPOL_PREFERRED) { 1903269fbe72SBen Widawsky nd = first_node(policy->nodes); 19047858d7bcSFeng Tang } else { 190519770b32SMel Gorman /* 19066d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 19076d840958SMichal Hocko * because we might easily break the expectation to stay on the 19086d840958SMichal Hocko * requested node and not break the policy. 190919770b32SMel Gorman */ 19106d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 19111da177e4SLinus Torvalds } 19126d840958SMichal Hocko 191304ec6264SVlastimil Babka return nd; 19141da177e4SLinus Torvalds } 19151da177e4SLinus Torvalds 19161da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 19171da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 19181da177e4SLinus Torvalds { 191945816682SVlastimil Babka unsigned next; 19201da177e4SLinus Torvalds struct task_struct *me = current; 19211da177e4SLinus Torvalds 1922269fbe72SBen Widawsky next = next_node_in(me->il_prev, policy->nodes); 1923f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 192445816682SVlastimil Babka me->il_prev = next; 192545816682SVlastimil Babka return next; 19261da177e4SLinus Torvalds } 19271da177e4SLinus Torvalds 1928dc85da15SChristoph Lameter /* 1929dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1930dc85da15SChristoph Lameter * next slab entry. 1931dc85da15SChristoph Lameter */ 19322a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1933dc85da15SChristoph Lameter { 1934e7b691b0SAndi Kleen struct mempolicy *policy; 19352a389610SDavid Rientjes int node = numa_mem_id(); 1936e7b691b0SAndi Kleen 193738b031ddSVasily Averin if (!in_task()) 19382a389610SDavid Rientjes return node; 1939e7b691b0SAndi Kleen 1940e7b691b0SAndi Kleen policy = current->mempolicy; 19417858d7bcSFeng Tang if (!policy) 19422a389610SDavid Rientjes return node; 1943765c4507SChristoph Lameter 1944bea904d5SLee Schermerhorn switch (policy->mode) { 1945bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1946269fbe72SBen Widawsky return first_node(policy->nodes); 1947bea904d5SLee Schermerhorn 1948dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1949dc85da15SChristoph Lameter return interleave_nodes(policy); 1950dc85da15SChristoph Lameter 1951b27abaccSDave Hansen case MPOL_BIND: 1952b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 1953b27abaccSDave Hansen { 1954c33d6c06SMel Gorman struct zoneref *z; 1955c33d6c06SMel Gorman 1956dc85da15SChristoph Lameter /* 1957dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1958dc85da15SChristoph Lameter * first node. 1959dc85da15SChristoph Lameter */ 196019770b32SMel Gorman struct zonelist *zonelist; 196119770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1962c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1963c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1964269fbe72SBen Widawsky &policy->nodes); 1965c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1966dd1a239fSMel Gorman } 19677858d7bcSFeng Tang case MPOL_LOCAL: 19687858d7bcSFeng Tang return node; 1969dc85da15SChristoph Lameter 1970dc85da15SChristoph Lameter default: 1971bea904d5SLee Schermerhorn BUG(); 1972dc85da15SChristoph Lameter } 1973dc85da15SChristoph Lameter } 1974dc85da15SChristoph Lameter 1975fee83b3aSAndrew Morton /* 1976fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1977269fbe72SBen Widawsky * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1978fee83b3aSAndrew Morton * number of present nodes. 1979fee83b3aSAndrew Morton */ 198098c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19811da177e4SLinus Torvalds { 1982*276aeee1Syanghui nodemask_t nodemask = pol->nodes; 1983*276aeee1Syanghui unsigned int target, nnodes; 1984fee83b3aSAndrew Morton int i; 1985fee83b3aSAndrew Morton int nid; 1986*276aeee1Syanghui /* 1987*276aeee1Syanghui * The barrier will stabilize the nodemask in a register or on 1988*276aeee1Syanghui * the stack so that it will stop changing under the code. 1989*276aeee1Syanghui * 1990*276aeee1Syanghui * Between first_node() and next_node(), pol->nodes could be changed 1991*276aeee1Syanghui * by other threads. So we put pol->nodes in a local stack. 1992*276aeee1Syanghui */ 1993*276aeee1Syanghui barrier(); 19941da177e4SLinus Torvalds 1995*276aeee1Syanghui nnodes = nodes_weight(nodemask); 1996f5b087b5SDavid Rientjes if (!nnodes) 1997f5b087b5SDavid Rientjes return numa_node_id(); 1998fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1999*276aeee1Syanghui nid = first_node(nodemask); 2000fee83b3aSAndrew Morton for (i = 0; i < target; i++) 2001*276aeee1Syanghui nid = next_node(nid, nodemask); 20021da177e4SLinus Torvalds return nid; 20031da177e4SLinus Torvalds } 20041da177e4SLinus Torvalds 20055da7ca86SChristoph Lameter /* Determine a node number for interleave */ 20065da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 20075da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 20085da7ca86SChristoph Lameter { 20095da7ca86SChristoph Lameter if (vma) { 20105da7ca86SChristoph Lameter unsigned long off; 20115da7ca86SChristoph Lameter 20123b98b087SNishanth Aravamudan /* 20133b98b087SNishanth Aravamudan * for small pages, there is no difference between 20143b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 20153b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 20163b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 20173b98b087SNishanth Aravamudan * a useful offset. 20183b98b087SNishanth Aravamudan */ 20193b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 20203b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 20215da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 202298c70baaSLaurent Dufour return offset_il_node(pol, off); 20235da7ca86SChristoph Lameter } else 20245da7ca86SChristoph Lameter return interleave_nodes(pol); 20255da7ca86SChristoph Lameter } 20265da7ca86SChristoph Lameter 202700ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 2028480eccf9SLee Schermerhorn /* 202904ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 2030b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 2031b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 2032b46e14acSFabian Frederick * @gfp_flags: for requested zone 2033b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2034b27abaccSDave Hansen * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 2035480eccf9SLee Schermerhorn * 203604ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 203752cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 2038b27abaccSDave Hansen * If the effective policy is 'bind' or 'prefer-many', returns a pointer 2039b27abaccSDave Hansen * to the mempolicy's @nodemask for filtering the zonelist. 2040c0ff7453SMiao Xie * 2041d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2042480eccf9SLee Schermerhorn */ 204304ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 204404ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20455da7ca86SChristoph Lameter { 204604ec6264SVlastimil Babka int nid; 2047b27abaccSDave Hansen int mode; 20485da7ca86SChristoph Lameter 2049dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 2050b27abaccSDave Hansen *nodemask = NULL; 2051b27abaccSDave Hansen mode = (*mpol)->mode; 20525da7ca86SChristoph Lameter 2053b27abaccSDave Hansen if (unlikely(mode == MPOL_INTERLEAVE)) { 205404ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 205504ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 205652cd3b07SLee Schermerhorn } else { 205704ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 2058b27abaccSDave Hansen if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) 2059269fbe72SBen Widawsky *nodemask = &(*mpol)->nodes; 2060480eccf9SLee Schermerhorn } 206104ec6264SVlastimil Babka return nid; 20625da7ca86SChristoph Lameter } 206306808b08SLee Schermerhorn 206406808b08SLee Schermerhorn /* 206506808b08SLee Schermerhorn * init_nodemask_of_mempolicy 206606808b08SLee Schermerhorn * 206706808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 206806808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 206906808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 207006808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 207106808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 207206808b08SLee Schermerhorn * of non-default mempolicy. 207306808b08SLee Schermerhorn * 207406808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 207506808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 207606808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 207706808b08SLee Schermerhorn * 207806808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 207906808b08SLee Schermerhorn */ 208006808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 208106808b08SLee Schermerhorn { 208206808b08SLee Schermerhorn struct mempolicy *mempolicy; 208306808b08SLee Schermerhorn 208406808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 208506808b08SLee Schermerhorn return false; 208606808b08SLee Schermerhorn 2087c0ff7453SMiao Xie task_lock(current); 208806808b08SLee Schermerhorn mempolicy = current->mempolicy; 208906808b08SLee Schermerhorn switch (mempolicy->mode) { 209006808b08SLee Schermerhorn case MPOL_PREFERRED: 2091b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 209206808b08SLee Schermerhorn case MPOL_BIND: 209306808b08SLee Schermerhorn case MPOL_INTERLEAVE: 2094269fbe72SBen Widawsky *mask = mempolicy->nodes; 209506808b08SLee Schermerhorn break; 209606808b08SLee Schermerhorn 20977858d7bcSFeng Tang case MPOL_LOCAL: 2098269fbe72SBen Widawsky init_nodemask_of_node(mask, numa_node_id()); 20997858d7bcSFeng Tang break; 21007858d7bcSFeng Tang 210106808b08SLee Schermerhorn default: 210206808b08SLee Schermerhorn BUG(); 210306808b08SLee Schermerhorn } 2104c0ff7453SMiao Xie task_unlock(current); 210506808b08SLee Schermerhorn 210606808b08SLee Schermerhorn return true; 210706808b08SLee Schermerhorn } 210800ac59adSChen, Kenneth W #endif 21095da7ca86SChristoph Lameter 21106f48d0ebSDavid Rientjes /* 2111b26e517aSFeng Tang * mempolicy_in_oom_domain 21126f48d0ebSDavid Rientjes * 2113b26e517aSFeng Tang * If tsk's mempolicy is "bind", check for intersection between mask and 2114b26e517aSFeng Tang * the policy nodemask. Otherwise, return true for all other policies 2115b26e517aSFeng Tang * including "interleave", as a tsk with "interleave" policy may have 2116b26e517aSFeng Tang * memory allocated from all nodes in system. 21176f48d0ebSDavid Rientjes * 21186f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 21196f48d0ebSDavid Rientjes */ 2120b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk, 21216f48d0ebSDavid Rientjes const nodemask_t *mask) 21226f48d0ebSDavid Rientjes { 21236f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 21246f48d0ebSDavid Rientjes bool ret = true; 21256f48d0ebSDavid Rientjes 21266f48d0ebSDavid Rientjes if (!mask) 21276f48d0ebSDavid Rientjes return ret; 2128b26e517aSFeng Tang 21296f48d0ebSDavid Rientjes task_lock(tsk); 21306f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 2131b26e517aSFeng Tang if (mempolicy && mempolicy->mode == MPOL_BIND) 2132269fbe72SBen Widawsky ret = nodes_intersects(mempolicy->nodes, *mask); 21336f48d0ebSDavid Rientjes task_unlock(tsk); 2134b26e517aSFeng Tang 21356f48d0ebSDavid Rientjes return ret; 21366f48d0ebSDavid Rientjes } 21376f48d0ebSDavid Rientjes 21381da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 21391da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2140662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2141662f3a0bSAndi Kleen unsigned nid) 21421da177e4SLinus Torvalds { 21431da177e4SLinus Torvalds struct page *page; 21441da177e4SLinus Torvalds 214584172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, nid, NULL); 21464518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21474518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21484518085eSKemi Wang return page; 2149de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2150de55c8b2SAndrey Ryabinin preempt_disable(); 2151f19298b9SMel Gorman __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2152de55c8b2SAndrey Ryabinin preempt_enable(); 2153de55c8b2SAndrey Ryabinin } 21541da177e4SLinus Torvalds return page; 21551da177e4SLinus Torvalds } 21561da177e4SLinus Torvalds 21574c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 21584c54d949SFeng Tang int nid, struct mempolicy *pol) 21594c54d949SFeng Tang { 21604c54d949SFeng Tang struct page *page; 21614c54d949SFeng Tang gfp_t preferred_gfp; 21624c54d949SFeng Tang 21634c54d949SFeng Tang /* 21644c54d949SFeng Tang * This is a two pass approach. The first pass will only try the 21654c54d949SFeng Tang * preferred nodes but skip the direct reclaim and allow the 21664c54d949SFeng Tang * allocation to fail, while the second pass will try all the 21674c54d949SFeng Tang * nodes in system. 21684c54d949SFeng Tang */ 21694c54d949SFeng Tang preferred_gfp = gfp | __GFP_NOWARN; 21704c54d949SFeng Tang preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 21714c54d949SFeng Tang page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); 21724c54d949SFeng Tang if (!page) 21734c54d949SFeng Tang page = __alloc_pages(gfp, order, numa_node_id(), NULL); 21744c54d949SFeng Tang 21754c54d949SFeng Tang return page; 21764c54d949SFeng Tang } 21774c54d949SFeng Tang 21781da177e4SLinus Torvalds /** 21790bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 2180eb350739SMatthew Wilcox (Oracle) * @gfp: GFP flags. 21810bbbc0b3SAndrea Arcangeli * @order: Order of the GFP allocation. 21821da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 2183eb350739SMatthew Wilcox (Oracle) * @addr: Virtual address of the allocation. Must be inside @vma. 2184be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 2185eb350739SMatthew Wilcox (Oracle) * @hugepage: For hugepages try only the preferred node if possible. 21861da177e4SLinus Torvalds * 2187eb350739SMatthew Wilcox (Oracle) * Allocate a page for a specific address in @vma, using the appropriate 2188eb350739SMatthew Wilcox (Oracle) * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2189eb350739SMatthew Wilcox (Oracle) * of the mm_struct of the VMA to prevent it from going away. Should be 2190eb350739SMatthew Wilcox (Oracle) * used for all allocations for pages that will be mapped into user space. 2191eb350739SMatthew Wilcox (Oracle) * 2192eb350739SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 21931da177e4SLinus Torvalds */ 2194eb350739SMatthew Wilcox (Oracle) struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 219519deb769SDavid Rientjes unsigned long addr, int node, bool hugepage) 21961da177e4SLinus Torvalds { 2197cc9a6c87SMel Gorman struct mempolicy *pol; 2198c0ff7453SMiao Xie struct page *page; 219904ec6264SVlastimil Babka int preferred_nid; 2200be97a41bSVlastimil Babka nodemask_t *nmask; 22011da177e4SLinus Torvalds 2202dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2203cc9a6c87SMel Gorman 2204be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 22051da177e4SLinus Torvalds unsigned nid; 22065da7ca86SChristoph Lameter 22078eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 220852cd3b07SLee Schermerhorn mpol_cond_put(pol); 22090bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2210be97a41bSVlastimil Babka goto out; 22111da177e4SLinus Torvalds } 22121da177e4SLinus Torvalds 22134c54d949SFeng Tang if (pol->mode == MPOL_PREFERRED_MANY) { 22144c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, node, pol); 22154c54d949SFeng Tang mpol_cond_put(pol); 22164c54d949SFeng Tang goto out; 22174c54d949SFeng Tang } 22184c54d949SFeng Tang 221919deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 222019deb769SDavid Rientjes int hpage_node = node; 222119deb769SDavid Rientjes 222219deb769SDavid Rientjes /* 222319deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 222419deb769SDavid Rientjes * allows the current node (or other explicitly preferred 222519deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 222619deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 222719deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 222819deb769SDavid Rientjes * 2229b27abaccSDave Hansen * If the policy is interleave or does not allow the current 223019deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 223119deb769SDavid Rientjes */ 22327858d7bcSFeng Tang if (pol->mode == MPOL_PREFERRED) 2233269fbe72SBen Widawsky hpage_node = first_node(pol->nodes); 223419deb769SDavid Rientjes 223519deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 223619deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 223719deb769SDavid Rientjes mpol_cond_put(pol); 2238cc638f32SVlastimil Babka /* 2239cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2240cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2241cc638f32SVlastimil Babka */ 224219deb769SDavid Rientjes page = __alloc_pages_node(hpage_node, 2243cc638f32SVlastimil Babka gfp | __GFP_THISNODE | __GFP_NORETRY, order); 224476e654ccSDavid Rientjes 224576e654ccSDavid Rientjes /* 224676e654ccSDavid Rientjes * If hugepage allocations are configured to always 224776e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 224876e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2249cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 225076e654ccSDavid Rientjes */ 225176e654ccSDavid Rientjes if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 225276e654ccSDavid Rientjes page = __alloc_pages_node(hpage_node, 2253cc638f32SVlastimil Babka gfp, order); 225476e654ccSDavid Rientjes 225519deb769SDavid Rientjes goto out; 225619deb769SDavid Rientjes } 225719deb769SDavid Rientjes } 225819deb769SDavid Rientjes 2259077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 226004ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 226184172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, preferred_nid, nmask); 2262d51e9894SVlastimil Babka mpol_cond_put(pol); 2263be97a41bSVlastimil Babka out: 2264077fcf11SAneesh Kumar K.V return page; 2265077fcf11SAneesh Kumar K.V } 226669262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma); 2267077fcf11SAneesh Kumar K.V 22681da177e4SLinus Torvalds /** 2269d7f946d0SMatthew Wilcox (Oracle) * alloc_pages - Allocate pages. 22706421ec76SMatthew Wilcox (Oracle) * @gfp: GFP flags. 22716421ec76SMatthew Wilcox (Oracle) * @order: Power of two of number of pages to allocate. 22721da177e4SLinus Torvalds * 22736421ec76SMatthew Wilcox (Oracle) * Allocate 1 << @order contiguous pages. The physical address of the 22746421ec76SMatthew Wilcox (Oracle) * first page is naturally aligned (eg an order-3 allocation will be aligned 22756421ec76SMatthew Wilcox (Oracle) * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 22766421ec76SMatthew Wilcox (Oracle) * process is honoured when in process context. 22771da177e4SLinus Torvalds * 22786421ec76SMatthew Wilcox (Oracle) * Context: Can be called from any context, providing the appropriate GFP 22796421ec76SMatthew Wilcox (Oracle) * flags are used. 22806421ec76SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 22811da177e4SLinus Torvalds */ 2282d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order) 22831da177e4SLinus Torvalds { 22848d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2285c0ff7453SMiao Xie struct page *page; 22861da177e4SLinus Torvalds 22878d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22888d90274bSOleg Nesterov pol = get_task_policy(current); 228952cd3b07SLee Schermerhorn 229052cd3b07SLee Schermerhorn /* 229152cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 229252cd3b07SLee Schermerhorn * nor system default_policy 229352cd3b07SLee Schermerhorn */ 229445c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2295c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 22964c54d949SFeng Tang else if (pol->mode == MPOL_PREFERRED_MANY) 22974c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, 22984c54d949SFeng Tang numa_node_id(), pol); 2299c0ff7453SMiao Xie else 230084172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, 230104ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 23025c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2303cc9a6c87SMel Gorman 2304c0ff7453SMiao Xie return page; 23051da177e4SLinus Torvalds } 2306d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages); 23071da177e4SLinus Torvalds 2308ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2309ef0855d3SOleg Nesterov { 2310ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2311ef0855d3SOleg Nesterov 2312ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2313ef0855d3SOleg Nesterov return PTR_ERR(pol); 2314ef0855d3SOleg Nesterov dst->vm_policy = pol; 2315ef0855d3SOleg Nesterov return 0; 2316ef0855d3SOleg Nesterov } 2317ef0855d3SOleg Nesterov 23184225399aSPaul Jackson /* 2319846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 23204225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 23214225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 23224225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 23234225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2324708c1bbcSMiao Xie * 2325708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2326708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 23274225399aSPaul Jackson */ 23284225399aSPaul Jackson 2329846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2330846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 23311da177e4SLinus Torvalds { 23321da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 23331da177e4SLinus Torvalds 23341da177e4SLinus Torvalds if (!new) 23351da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2336708c1bbcSMiao Xie 2337708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2338708c1bbcSMiao Xie if (old == current->mempolicy) { 2339708c1bbcSMiao Xie task_lock(current); 2340708c1bbcSMiao Xie *new = *old; 2341708c1bbcSMiao Xie task_unlock(current); 2342708c1bbcSMiao Xie } else 2343708c1bbcSMiao Xie *new = *old; 2344708c1bbcSMiao Xie 23454225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 23464225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2347213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 23484225399aSPaul Jackson } 23491da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 23501da177e4SLinus Torvalds return new; 23511da177e4SLinus Torvalds } 23521da177e4SLinus Torvalds 23531da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2354fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 23551da177e4SLinus Torvalds { 23561da177e4SLinus Torvalds if (!a || !b) 2357fcfb4dccSKOSAKI Motohiro return false; 235845c4745aSLee Schermerhorn if (a->mode != b->mode) 2359fcfb4dccSKOSAKI Motohiro return false; 236019800502SBob Liu if (a->flags != b->flags) 2361fcfb4dccSKOSAKI Motohiro return false; 236219800502SBob Liu if (mpol_store_user_nodemask(a)) 236319800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2364fcfb4dccSKOSAKI Motohiro return false; 236519800502SBob Liu 236645c4745aSLee Schermerhorn switch (a->mode) { 236719770b32SMel Gorman case MPOL_BIND: 23681da177e4SLinus Torvalds case MPOL_INTERLEAVE: 23691da177e4SLinus Torvalds case MPOL_PREFERRED: 2370b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2371269fbe72SBen Widawsky return !!nodes_equal(a->nodes, b->nodes); 23727858d7bcSFeng Tang case MPOL_LOCAL: 23737858d7bcSFeng Tang return true; 23741da177e4SLinus Torvalds default: 23751da177e4SLinus Torvalds BUG(); 2376fcfb4dccSKOSAKI Motohiro return false; 23771da177e4SLinus Torvalds } 23781da177e4SLinus Torvalds } 23791da177e4SLinus Torvalds 23801da177e4SLinus Torvalds /* 23811da177e4SLinus Torvalds * Shared memory backing store policy support. 23821da177e4SLinus Torvalds * 23831da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 23841da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 23854a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 23861da177e4SLinus Torvalds * for any accesses to the tree. 23871da177e4SLinus Torvalds */ 23881da177e4SLinus Torvalds 23894a8c7bb5SNathan Zimmer /* 23904a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 23914a8c7bb5SNathan Zimmer * reading or for writing 23924a8c7bb5SNathan Zimmer */ 23931da177e4SLinus Torvalds static struct sp_node * 23941da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 23951da177e4SLinus Torvalds { 23961da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 23971da177e4SLinus Torvalds 23981da177e4SLinus Torvalds while (n) { 23991da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 24001da177e4SLinus Torvalds 24011da177e4SLinus Torvalds if (start >= p->end) 24021da177e4SLinus Torvalds n = n->rb_right; 24031da177e4SLinus Torvalds else if (end <= p->start) 24041da177e4SLinus Torvalds n = n->rb_left; 24051da177e4SLinus Torvalds else 24061da177e4SLinus Torvalds break; 24071da177e4SLinus Torvalds } 24081da177e4SLinus Torvalds if (!n) 24091da177e4SLinus Torvalds return NULL; 24101da177e4SLinus Torvalds for (;;) { 24111da177e4SLinus Torvalds struct sp_node *w = NULL; 24121da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 24131da177e4SLinus Torvalds if (!prev) 24141da177e4SLinus Torvalds break; 24151da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 24161da177e4SLinus Torvalds if (w->end <= start) 24171da177e4SLinus Torvalds break; 24181da177e4SLinus Torvalds n = prev; 24191da177e4SLinus Torvalds } 24201da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 24211da177e4SLinus Torvalds } 24221da177e4SLinus Torvalds 24234a8c7bb5SNathan Zimmer /* 24244a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 24254a8c7bb5SNathan Zimmer * writing. 24264a8c7bb5SNathan Zimmer */ 24271da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 24281da177e4SLinus Torvalds { 24291da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 24301da177e4SLinus Torvalds struct rb_node *parent = NULL; 24311da177e4SLinus Torvalds struct sp_node *nd; 24321da177e4SLinus Torvalds 24331da177e4SLinus Torvalds while (*p) { 24341da177e4SLinus Torvalds parent = *p; 24351da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 24361da177e4SLinus Torvalds if (new->start < nd->start) 24371da177e4SLinus Torvalds p = &(*p)->rb_left; 24381da177e4SLinus Torvalds else if (new->end > nd->end) 24391da177e4SLinus Torvalds p = &(*p)->rb_right; 24401da177e4SLinus Torvalds else 24411da177e4SLinus Torvalds BUG(); 24421da177e4SLinus Torvalds } 24431da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 24441da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2445140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 244645c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 24471da177e4SLinus Torvalds } 24481da177e4SLinus Torvalds 24491da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 24501da177e4SLinus Torvalds struct mempolicy * 24511da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 24521da177e4SLinus Torvalds { 24531da177e4SLinus Torvalds struct mempolicy *pol = NULL; 24541da177e4SLinus Torvalds struct sp_node *sn; 24551da177e4SLinus Torvalds 24561da177e4SLinus Torvalds if (!sp->root.rb_node) 24571da177e4SLinus Torvalds return NULL; 24584a8c7bb5SNathan Zimmer read_lock(&sp->lock); 24591da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 24601da177e4SLinus Torvalds if (sn) { 24611da177e4SLinus Torvalds mpol_get(sn->policy); 24621da177e4SLinus Torvalds pol = sn->policy; 24631da177e4SLinus Torvalds } 24644a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 24651da177e4SLinus Torvalds return pol; 24661da177e4SLinus Torvalds } 24671da177e4SLinus Torvalds 246863f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 246963f74ca2SKOSAKI Motohiro { 247063f74ca2SKOSAKI Motohiro mpol_put(n->policy); 247163f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 247263f74ca2SKOSAKI Motohiro } 247363f74ca2SKOSAKI Motohiro 2474771fb4d8SLee Schermerhorn /** 2475771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2476771fb4d8SLee Schermerhorn * 2477b46e14acSFabian Frederick * @page: page to be checked 2478b46e14acSFabian Frederick * @vma: vm area where page mapped 2479b46e14acSFabian Frederick * @addr: virtual address where page mapped 2480771fb4d8SLee Schermerhorn * 2481771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 24825f076944SMatthew Wilcox (Oracle) * node id. Policy determination "mimics" alloc_page_vma(). 2483771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 24845f076944SMatthew Wilcox (Oracle) * 2485062db293SBaolin Wang * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2486062db293SBaolin Wang * policy, or a suitable node ID to allocate a replacement page from. 2487771fb4d8SLee Schermerhorn */ 2488771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2489771fb4d8SLee Schermerhorn { 2490771fb4d8SLee Schermerhorn struct mempolicy *pol; 2491c33d6c06SMel Gorman struct zoneref *z; 2492771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2493771fb4d8SLee Schermerhorn unsigned long pgoff; 249490572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 249590572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 249698fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2497062db293SBaolin Wang int ret = NUMA_NO_NODE; 2498771fb4d8SLee Schermerhorn 2499dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2500771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2501771fb4d8SLee Schermerhorn goto out; 2502771fb4d8SLee Schermerhorn 2503771fb4d8SLee Schermerhorn switch (pol->mode) { 2504771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2505771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2506771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 250798c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2508771fb4d8SLee Schermerhorn break; 2509771fb4d8SLee Schermerhorn 2510771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2511b27abaccSDave Hansen if (node_isset(curnid, pol->nodes)) 2512b27abaccSDave Hansen goto out; 2513269fbe72SBen Widawsky polnid = first_node(pol->nodes); 2514771fb4d8SLee Schermerhorn break; 2515771fb4d8SLee Schermerhorn 25167858d7bcSFeng Tang case MPOL_LOCAL: 25177858d7bcSFeng Tang polnid = numa_node_id(); 25187858d7bcSFeng Tang break; 25197858d7bcSFeng Tang 2520771fb4d8SLee Schermerhorn case MPOL_BIND: 2521bda420b9SHuang Ying /* Optimize placement among multiple nodes via NUMA balancing */ 2522bda420b9SHuang Ying if (pol->flags & MPOL_F_MORON) { 2523269fbe72SBen Widawsky if (node_isset(thisnid, pol->nodes)) 2524bda420b9SHuang Ying break; 2525bda420b9SHuang Ying goto out; 2526bda420b9SHuang Ying } 2527b27abaccSDave Hansen fallthrough; 2528c33d6c06SMel Gorman 2529b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2530771fb4d8SLee Schermerhorn /* 2531771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2532771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2533771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2534771fb4d8SLee Schermerhorn */ 2535269fbe72SBen Widawsky if (node_isset(curnid, pol->nodes)) 2536771fb4d8SLee Schermerhorn goto out; 2537c33d6c06SMel Gorman z = first_zones_zonelist( 2538771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2539771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2540269fbe72SBen Widawsky &pol->nodes); 2541c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2542771fb4d8SLee Schermerhorn break; 2543771fb4d8SLee Schermerhorn 2544771fb4d8SLee Schermerhorn default: 2545771fb4d8SLee Schermerhorn BUG(); 2546771fb4d8SLee Schermerhorn } 25475606e387SMel Gorman 25485606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2549e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 255090572890SPeter Zijlstra polnid = thisnid; 25515606e387SMel Gorman 255210f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2553de1c9ce6SRik van Riel goto out; 2554de1c9ce6SRik van Riel } 2555e42c8ff2SMel Gorman 2556771fb4d8SLee Schermerhorn if (curnid != polnid) 2557771fb4d8SLee Schermerhorn ret = polnid; 2558771fb4d8SLee Schermerhorn out: 2559771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2560771fb4d8SLee Schermerhorn 2561771fb4d8SLee Schermerhorn return ret; 2562771fb4d8SLee Schermerhorn } 2563771fb4d8SLee Schermerhorn 2564c11600e4SDavid Rientjes /* 2565c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2566c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2567c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2568c11600e4SDavid Rientjes * policy. 2569c11600e4SDavid Rientjes */ 2570c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2571c11600e4SDavid Rientjes { 2572c11600e4SDavid Rientjes struct mempolicy *pol; 2573c11600e4SDavid Rientjes 2574c11600e4SDavid Rientjes task_lock(task); 2575c11600e4SDavid Rientjes pol = task->mempolicy; 2576c11600e4SDavid Rientjes task->mempolicy = NULL; 2577c11600e4SDavid Rientjes task_unlock(task); 2578c11600e4SDavid Rientjes mpol_put(pol); 2579c11600e4SDavid Rientjes } 2580c11600e4SDavid Rientjes 25811da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 25821da177e4SLinus Torvalds { 2583140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 25841da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 258563f74ca2SKOSAKI Motohiro sp_free(n); 25861da177e4SLinus Torvalds } 25871da177e4SLinus Torvalds 258842288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 258942288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 259042288fe3SMel Gorman { 259142288fe3SMel Gorman node->start = start; 259242288fe3SMel Gorman node->end = end; 259342288fe3SMel Gorman node->policy = pol; 259442288fe3SMel Gorman } 259542288fe3SMel Gorman 2596dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2597dbcb0f19SAdrian Bunk struct mempolicy *pol) 25981da177e4SLinus Torvalds { 2599869833f2SKOSAKI Motohiro struct sp_node *n; 2600869833f2SKOSAKI Motohiro struct mempolicy *newpol; 26011da177e4SLinus Torvalds 2602869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 26031da177e4SLinus Torvalds if (!n) 26041da177e4SLinus Torvalds return NULL; 2605869833f2SKOSAKI Motohiro 2606869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2607869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2608869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2609869833f2SKOSAKI Motohiro return NULL; 2610869833f2SKOSAKI Motohiro } 2611869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 261242288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2613869833f2SKOSAKI Motohiro 26141da177e4SLinus Torvalds return n; 26151da177e4SLinus Torvalds } 26161da177e4SLinus Torvalds 26171da177e4SLinus Torvalds /* Replace a policy range. */ 26181da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 26191da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 26201da177e4SLinus Torvalds { 2621b22d127aSMel Gorman struct sp_node *n; 262242288fe3SMel Gorman struct sp_node *n_new = NULL; 262342288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2624b22d127aSMel Gorman int ret = 0; 26251da177e4SLinus Torvalds 262642288fe3SMel Gorman restart: 26274a8c7bb5SNathan Zimmer write_lock(&sp->lock); 26281da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 26291da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 26301da177e4SLinus Torvalds while (n && n->start < end) { 26311da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 26321da177e4SLinus Torvalds if (n->start >= start) { 26331da177e4SLinus Torvalds if (n->end <= end) 26341da177e4SLinus Torvalds sp_delete(sp, n); 26351da177e4SLinus Torvalds else 26361da177e4SLinus Torvalds n->start = end; 26371da177e4SLinus Torvalds } else { 26381da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 26391da177e4SLinus Torvalds if (n->end > end) { 264042288fe3SMel Gorman if (!n_new) 264142288fe3SMel Gorman goto alloc_new; 264242288fe3SMel Gorman 264342288fe3SMel Gorman *mpol_new = *n->policy; 264442288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 26457880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 26461da177e4SLinus Torvalds n->end = start; 26475ca39575SHillf Danton sp_insert(sp, n_new); 264842288fe3SMel Gorman n_new = NULL; 264942288fe3SMel Gorman mpol_new = NULL; 26501da177e4SLinus Torvalds break; 26511da177e4SLinus Torvalds } else 26521da177e4SLinus Torvalds n->end = start; 26531da177e4SLinus Torvalds } 26541da177e4SLinus Torvalds if (!next) 26551da177e4SLinus Torvalds break; 26561da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 26571da177e4SLinus Torvalds } 26581da177e4SLinus Torvalds if (new) 26591da177e4SLinus Torvalds sp_insert(sp, new); 26604a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 266142288fe3SMel Gorman ret = 0; 266242288fe3SMel Gorman 266342288fe3SMel Gorman err_out: 266442288fe3SMel Gorman if (mpol_new) 266542288fe3SMel Gorman mpol_put(mpol_new); 266642288fe3SMel Gorman if (n_new) 266742288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 266842288fe3SMel Gorman 2669b22d127aSMel Gorman return ret; 267042288fe3SMel Gorman 267142288fe3SMel Gorman alloc_new: 26724a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 267342288fe3SMel Gorman ret = -ENOMEM; 267442288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 267542288fe3SMel Gorman if (!n_new) 267642288fe3SMel Gorman goto err_out; 267742288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 267842288fe3SMel Gorman if (!mpol_new) 267942288fe3SMel Gorman goto err_out; 268042288fe3SMel Gorman goto restart; 26811da177e4SLinus Torvalds } 26821da177e4SLinus Torvalds 268371fe804bSLee Schermerhorn /** 268471fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 268571fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 268671fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 268771fe804bSLee Schermerhorn * 268871fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 268971fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 269071fe804bSLee Schermerhorn * This must be released on exit. 26914bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 269271fe804bSLee Schermerhorn */ 269371fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 26947339ff83SRobin Holt { 269558568d2aSMiao Xie int ret; 269658568d2aSMiao Xie 269771fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 26984a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 26997339ff83SRobin Holt 270071fe804bSLee Schermerhorn if (mpol) { 27017339ff83SRobin Holt struct vm_area_struct pvma; 270271fe804bSLee Schermerhorn struct mempolicy *new; 27034bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 27047339ff83SRobin Holt 27054bfc4495SKAMEZAWA Hiroyuki if (!scratch) 27065c0c1654SLee Schermerhorn goto put_mpol; 270771fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 270871fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 270915d77835SLee Schermerhorn if (IS_ERR(new)) 27100cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 271158568d2aSMiao Xie 271258568d2aSMiao Xie task_lock(current); 27134bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 271458568d2aSMiao Xie task_unlock(current); 271515d77835SLee Schermerhorn if (ret) 27165c0c1654SLee Schermerhorn goto put_new; 271771fe804bSLee Schermerhorn 271871fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 27192c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 272071fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 272171fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 272215d77835SLee Schermerhorn 27235c0c1654SLee Schermerhorn put_new: 272471fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 27250cae3457SDan Carpenter free_scratch: 27264bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 27275c0c1654SLee Schermerhorn put_mpol: 27285c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 27297339ff83SRobin Holt } 27307339ff83SRobin Holt } 27317339ff83SRobin Holt 27321da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 27331da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 27341da177e4SLinus Torvalds { 27351da177e4SLinus Torvalds int err; 27361da177e4SLinus Torvalds struct sp_node *new = NULL; 27371da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 27381da177e4SLinus Torvalds 2739028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 27401da177e4SLinus Torvalds vma->vm_pgoff, 274145c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2742028fec41SDavid Rientjes npol ? npol->flags : -1, 2743269fbe72SBen Widawsky npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 27441da177e4SLinus Torvalds 27451da177e4SLinus Torvalds if (npol) { 27461da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 27471da177e4SLinus Torvalds if (!new) 27481da177e4SLinus Torvalds return -ENOMEM; 27491da177e4SLinus Torvalds } 27501da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 27511da177e4SLinus Torvalds if (err && new) 275263f74ca2SKOSAKI Motohiro sp_free(new); 27531da177e4SLinus Torvalds return err; 27541da177e4SLinus Torvalds } 27551da177e4SLinus Torvalds 27561da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 27571da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 27581da177e4SLinus Torvalds { 27591da177e4SLinus Torvalds struct sp_node *n; 27601da177e4SLinus Torvalds struct rb_node *next; 27611da177e4SLinus Torvalds 27621da177e4SLinus Torvalds if (!p->root.rb_node) 27631da177e4SLinus Torvalds return; 27644a8c7bb5SNathan Zimmer write_lock(&p->lock); 27651da177e4SLinus Torvalds next = rb_first(&p->root); 27661da177e4SLinus Torvalds while (next) { 27671da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27681da177e4SLinus Torvalds next = rb_next(&n->nd); 276963f74ca2SKOSAKI Motohiro sp_delete(p, n); 27701da177e4SLinus Torvalds } 27714a8c7bb5SNathan Zimmer write_unlock(&p->lock); 27721da177e4SLinus Torvalds } 27731da177e4SLinus Torvalds 27741a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2775c297663cSMel Gorman static int __initdata numabalancing_override; 27761a687c2eSMel Gorman 27771a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 27781a687c2eSMel Gorman { 27791a687c2eSMel Gorman bool numabalancing_default = false; 27801a687c2eSMel Gorman 27811a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 27821a687c2eSMel Gorman numabalancing_default = true; 27831a687c2eSMel Gorman 2784c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2785c297663cSMel Gorman if (numabalancing_override) 2786c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2787c297663cSMel Gorman 2788b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2789756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2790c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 27911a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 27921a687c2eSMel Gorman } 27931a687c2eSMel Gorman } 27941a687c2eSMel Gorman 27951a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 27961a687c2eSMel Gorman { 27971a687c2eSMel Gorman int ret = 0; 27981a687c2eSMel Gorman if (!str) 27991a687c2eSMel Gorman goto out; 28001a687c2eSMel Gorman 28011a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2802c297663cSMel Gorman numabalancing_override = 1; 28031a687c2eSMel Gorman ret = 1; 28041a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2805c297663cSMel Gorman numabalancing_override = -1; 28061a687c2eSMel Gorman ret = 1; 28071a687c2eSMel Gorman } 28081a687c2eSMel Gorman out: 28091a687c2eSMel Gorman if (!ret) 28104a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 28111a687c2eSMel Gorman 28121a687c2eSMel Gorman return ret; 28131a687c2eSMel Gorman } 28141a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 28151a687c2eSMel Gorman #else 28161a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 28171a687c2eSMel Gorman { 28181a687c2eSMel Gorman } 28191a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 28201a687c2eSMel Gorman 28211da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 28221da177e4SLinus Torvalds void __init numa_policy_init(void) 28231da177e4SLinus Torvalds { 2824b71636e2SPaul Mundt nodemask_t interleave_nodes; 2825b71636e2SPaul Mundt unsigned long largest = 0; 2826b71636e2SPaul Mundt int nid, prefer = 0; 2827b71636e2SPaul Mundt 28281da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 28291da177e4SLinus Torvalds sizeof(struct mempolicy), 283020c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 28311da177e4SLinus Torvalds 28321da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 28331da177e4SLinus Torvalds sizeof(struct sp_node), 283420c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 28351da177e4SLinus Torvalds 28365606e387SMel Gorman for_each_node(nid) { 28375606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 28385606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 28395606e387SMel Gorman .mode = MPOL_PREFERRED, 28405606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 2841269fbe72SBen Widawsky .nodes = nodemask_of_node(nid), 28425606e387SMel Gorman }; 28435606e387SMel Gorman } 28445606e387SMel Gorman 2845b71636e2SPaul Mundt /* 2846b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2847b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2848b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2849b71636e2SPaul Mundt */ 2850b71636e2SPaul Mundt nodes_clear(interleave_nodes); 285101f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2852b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 28531da177e4SLinus Torvalds 2854b71636e2SPaul Mundt /* Preserve the largest node */ 2855b71636e2SPaul Mundt if (largest < total_pages) { 2856b71636e2SPaul Mundt largest = total_pages; 2857b71636e2SPaul Mundt prefer = nid; 2858b71636e2SPaul Mundt } 2859b71636e2SPaul Mundt 2860b71636e2SPaul Mundt /* Interleave this node? */ 2861b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2862b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2863b71636e2SPaul Mundt } 2864b71636e2SPaul Mundt 2865b71636e2SPaul Mundt /* All too small, use the largest */ 2866b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2867b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2868b71636e2SPaul Mundt 2869028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2870b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 28711a687c2eSMel Gorman 28721a687c2eSMel Gorman check_numabalancing_enable(); 28731da177e4SLinus Torvalds } 28741da177e4SLinus Torvalds 28758bccd85fSChristoph Lameter /* Reset policy of current process to default */ 28761da177e4SLinus Torvalds void numa_default_policy(void) 28771da177e4SLinus Torvalds { 2878028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 28791da177e4SLinus Torvalds } 288068860ec1SPaul Jackson 28814225399aSPaul Jackson /* 2882095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2883095f1fc4SLee Schermerhorn */ 2884095f1fc4SLee Schermerhorn 2885345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2886345ace9cSLee Schermerhorn { 2887345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2888345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2889345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2890345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2891d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2892b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = "prefer (many)", 2893345ace9cSLee Schermerhorn }; 28941a75a6c8SChristoph Lameter 2895095f1fc4SLee Schermerhorn 2896095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2897095f1fc4SLee Schermerhorn /** 2898f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2899095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 290071fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2901095f1fc4SLee Schermerhorn * 2902095f1fc4SLee Schermerhorn * Format of input: 2903095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2904095f1fc4SLee Schermerhorn * 290571fe804bSLee Schermerhorn * On success, returns 0, else 1 2906095f1fc4SLee Schermerhorn */ 2907a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2908095f1fc4SLee Schermerhorn { 290971fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2910f2a07f40SHugh Dickins unsigned short mode_flags; 291171fe804bSLee Schermerhorn nodemask_t nodes; 2912095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2913095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2914dedf2c73Szhong jiang int err = 1, mode; 2915095f1fc4SLee Schermerhorn 2916c7a91bc7SDan Carpenter if (flags) 2917c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 2918c7a91bc7SDan Carpenter 2919095f1fc4SLee Schermerhorn if (nodelist) { 2920095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2921095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 292271fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2923095f1fc4SLee Schermerhorn goto out; 292401f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2925095f1fc4SLee Schermerhorn goto out; 292671fe804bSLee Schermerhorn } else 292771fe804bSLee Schermerhorn nodes_clear(nodes); 292871fe804bSLee Schermerhorn 2929dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 2930dedf2c73Szhong jiang if (mode < 0) 2931095f1fc4SLee Schermerhorn goto out; 2932095f1fc4SLee Schermerhorn 293371fe804bSLee Schermerhorn switch (mode) { 2934095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 293571fe804bSLee Schermerhorn /* 2936aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 2937aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 2938aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 293971fe804bSLee Schermerhorn */ 2940095f1fc4SLee Schermerhorn if (nodelist) { 2941095f1fc4SLee Schermerhorn char *rest = nodelist; 2942095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2943095f1fc4SLee Schermerhorn rest++; 2944926f2ae0SKOSAKI Motohiro if (*rest) 2945926f2ae0SKOSAKI Motohiro goto out; 2946aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 2947aa9f7d51SRandy Dunlap goto out; 2948095f1fc4SLee Schermerhorn } 2949095f1fc4SLee Schermerhorn break; 2950095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2951095f1fc4SLee Schermerhorn /* 2952095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2953095f1fc4SLee Schermerhorn */ 2954095f1fc4SLee Schermerhorn if (!nodelist) 295501f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 29563f226aa1SLee Schermerhorn break; 295771fe804bSLee Schermerhorn case MPOL_LOCAL: 29583f226aa1SLee Schermerhorn /* 295971fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 29603f226aa1SLee Schermerhorn */ 296171fe804bSLee Schermerhorn if (nodelist) 29623f226aa1SLee Schermerhorn goto out; 29633f226aa1SLee Schermerhorn break; 2964413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2965413b43deSRavikiran G Thirumalai /* 2966413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2967413b43deSRavikiran G Thirumalai */ 2968413b43deSRavikiran G Thirumalai if (!nodelist) 2969413b43deSRavikiran G Thirumalai err = 0; 2970413b43deSRavikiran G Thirumalai goto out; 2971b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2972d69b2e63SKOSAKI Motohiro case MPOL_BIND: 297371fe804bSLee Schermerhorn /* 2974d69b2e63SKOSAKI Motohiro * Insist on a nodelist 297571fe804bSLee Schermerhorn */ 2976d69b2e63SKOSAKI Motohiro if (!nodelist) 2977d69b2e63SKOSAKI Motohiro goto out; 2978095f1fc4SLee Schermerhorn } 2979095f1fc4SLee Schermerhorn 298071fe804bSLee Schermerhorn mode_flags = 0; 2981095f1fc4SLee Schermerhorn if (flags) { 2982095f1fc4SLee Schermerhorn /* 2983095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2984095f1fc4SLee Schermerhorn * mode flags. 2985095f1fc4SLee Schermerhorn */ 2986095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 298771fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2988095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 298971fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2990095f1fc4SLee Schermerhorn else 2991926f2ae0SKOSAKI Motohiro goto out; 2992095f1fc4SLee Schermerhorn } 299371fe804bSLee Schermerhorn 299471fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 299571fe804bSLee Schermerhorn if (IS_ERR(new)) 2996926f2ae0SKOSAKI Motohiro goto out; 2997926f2ae0SKOSAKI Motohiro 2998f2a07f40SHugh Dickins /* 2999f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 3000f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 3001f2a07f40SHugh Dickins */ 3002269fbe72SBen Widawsky if (mode != MPOL_PREFERRED) { 3003269fbe72SBen Widawsky new->nodes = nodes; 3004269fbe72SBen Widawsky } else if (nodelist) { 3005269fbe72SBen Widawsky nodes_clear(new->nodes); 3006269fbe72SBen Widawsky node_set(first_node(nodes), new->nodes); 3007269fbe72SBen Widawsky } else { 30087858d7bcSFeng Tang new->mode = MPOL_LOCAL; 3009269fbe72SBen Widawsky } 3010f2a07f40SHugh Dickins 3011f2a07f40SHugh Dickins /* 3012f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 3013f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 3014f2a07f40SHugh Dickins */ 3015e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 3016f2a07f40SHugh Dickins 3017926f2ae0SKOSAKI Motohiro err = 0; 301871fe804bSLee Schermerhorn 3019095f1fc4SLee Schermerhorn out: 3020095f1fc4SLee Schermerhorn /* Restore string for error message */ 3021095f1fc4SLee Schermerhorn if (nodelist) 3022095f1fc4SLee Schermerhorn *--nodelist = ':'; 3023095f1fc4SLee Schermerhorn if (flags) 3024095f1fc4SLee Schermerhorn *--flags = '='; 302571fe804bSLee Schermerhorn if (!err) 302671fe804bSLee Schermerhorn *mpol = new; 3027095f1fc4SLee Schermerhorn return err; 3028095f1fc4SLee Schermerhorn } 3029095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 3030095f1fc4SLee Schermerhorn 303171fe804bSLee Schermerhorn /** 303271fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 303371fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 303471fe804bSLee Schermerhorn * @maxlen: length of @buffer 303571fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 303671fe804bSLee Schermerhorn * 3037948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 3038948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3039948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 30401a75a6c8SChristoph Lameter */ 3041948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 30421a75a6c8SChristoph Lameter { 30431a75a6c8SChristoph Lameter char *p = buffer; 3044948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 3045948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 3046948927eeSDavid Rientjes unsigned short flags = 0; 30471a75a6c8SChristoph Lameter 30488790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3049bea904d5SLee Schermerhorn mode = pol->mode; 3050948927eeSDavid Rientjes flags = pol->flags; 3051948927eeSDavid Rientjes } 3052bea904d5SLee Schermerhorn 30531a75a6c8SChristoph Lameter switch (mode) { 30541a75a6c8SChristoph Lameter case MPOL_DEFAULT: 30557858d7bcSFeng Tang case MPOL_LOCAL: 30561a75a6c8SChristoph Lameter break; 30571a75a6c8SChristoph Lameter case MPOL_PREFERRED: 3058b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 30591a75a6c8SChristoph Lameter case MPOL_BIND: 30601a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 3061269fbe72SBen Widawsky nodes = pol->nodes; 30621a75a6c8SChristoph Lameter break; 30631a75a6c8SChristoph Lameter default: 3064948927eeSDavid Rientjes WARN_ON_ONCE(1); 3065948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 3066948927eeSDavid Rientjes return; 30671a75a6c8SChristoph Lameter } 30681a75a6c8SChristoph Lameter 3069b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 30701a75a6c8SChristoph Lameter 3071fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3072948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3073f5b087b5SDavid Rientjes 30742291990aSLee Schermerhorn /* 30752291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 30762291990aSLee Schermerhorn */ 3077f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 30782291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 30792291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 30802291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3081f5b087b5SDavid Rientjes } 3082f5b087b5SDavid Rientjes 30839e763e0fSTejun Heo if (!nodes_empty(nodes)) 30849e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 30859e763e0fSTejun Heo nodemask_pr_args(&nodes)); 30861a75a6c8SChristoph Lameter } 308720b51af1SHuang Ying 308820b51af1SHuang Ying bool numa_demotion_enabled = false; 308920b51af1SHuang Ying 309020b51af1SHuang Ying #ifdef CONFIG_SYSFS 309120b51af1SHuang Ying static ssize_t numa_demotion_enabled_show(struct kobject *kobj, 309220b51af1SHuang Ying struct kobj_attribute *attr, char *buf) 309320b51af1SHuang Ying { 309420b51af1SHuang Ying return sysfs_emit(buf, "%s\n", 309520b51af1SHuang Ying numa_demotion_enabled? "true" : "false"); 309620b51af1SHuang Ying } 309720b51af1SHuang Ying 309820b51af1SHuang Ying static ssize_t numa_demotion_enabled_store(struct kobject *kobj, 309920b51af1SHuang Ying struct kobj_attribute *attr, 310020b51af1SHuang Ying const char *buf, size_t count) 310120b51af1SHuang Ying { 310220b51af1SHuang Ying if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 310320b51af1SHuang Ying numa_demotion_enabled = true; 310420b51af1SHuang Ying else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 310520b51af1SHuang Ying numa_demotion_enabled = false; 310620b51af1SHuang Ying else 310720b51af1SHuang Ying return -EINVAL; 310820b51af1SHuang Ying 310920b51af1SHuang Ying return count; 311020b51af1SHuang Ying } 311120b51af1SHuang Ying 311220b51af1SHuang Ying static struct kobj_attribute numa_demotion_enabled_attr = 311320b51af1SHuang Ying __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show, 311420b51af1SHuang Ying numa_demotion_enabled_store); 311520b51af1SHuang Ying 311620b51af1SHuang Ying static struct attribute *numa_attrs[] = { 311720b51af1SHuang Ying &numa_demotion_enabled_attr.attr, 311820b51af1SHuang Ying NULL, 311920b51af1SHuang Ying }; 312020b51af1SHuang Ying 312120b51af1SHuang Ying static const struct attribute_group numa_attr_group = { 312220b51af1SHuang Ying .attrs = numa_attrs, 312320b51af1SHuang Ying }; 312420b51af1SHuang Ying 312520b51af1SHuang Ying static int __init numa_init_sysfs(void) 312620b51af1SHuang Ying { 312720b51af1SHuang Ying int err; 312820b51af1SHuang Ying struct kobject *numa_kobj; 312920b51af1SHuang Ying 313020b51af1SHuang Ying numa_kobj = kobject_create_and_add("numa", mm_kobj); 313120b51af1SHuang Ying if (!numa_kobj) { 313220b51af1SHuang Ying pr_err("failed to create numa kobject\n"); 313320b51af1SHuang Ying return -ENOMEM; 313420b51af1SHuang Ying } 313520b51af1SHuang Ying err = sysfs_create_group(numa_kobj, &numa_attr_group); 313620b51af1SHuang Ying if (err) { 313720b51af1SHuang Ying pr_err("failed to register numa group\n"); 313820b51af1SHuang Ying goto delete_obj; 313920b51af1SHuang Ying } 314020b51af1SHuang Ying return 0; 314120b51af1SHuang Ying 314220b51af1SHuang Ying delete_obj: 314320b51af1SHuang Ying kobject_put(numa_kobj); 314420b51af1SHuang Ying return err; 314520b51af1SHuang Ying } 314620b51af1SHuang Ying subsys_initcall(numa_init_sysfs); 314720b51af1SHuang Ying #endif 3148