146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 71a520110eSChristoph Hellwig #include <linux/pagewalk.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 766e84f315SIngo Molnar #include <linux/sched/mm.h> 776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 78f719ff9bSIngo Molnar #include <linux/sched/task.h> 791da177e4SLinus Torvalds #include <linux/nodemask.h> 801da177e4SLinus Torvalds #include <linux/cpuset.h> 811da177e4SLinus Torvalds #include <linux/slab.h> 821da177e4SLinus Torvalds #include <linux/string.h> 83b95f1b31SPaul Gortmaker #include <linux/export.h> 84b488893aSPavel Emelyanov #include <linux/nsproxy.h> 851da177e4SLinus Torvalds #include <linux/interrupt.h> 861da177e4SLinus Torvalds #include <linux/init.h> 871da177e4SLinus Torvalds #include <linux/compat.h> 8831367466SOtto Ebeling #include <linux/ptrace.h> 89dc9aa5b9SChristoph Lameter #include <linux/swap.h> 901a75a6c8SChristoph Lameter #include <linux/seq_file.h> 911a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 92b20a3503SChristoph Lameter #include <linux/migrate.h> 9362b61f61SHugh Dickins #include <linux/ksm.h> 9495a402c3SChristoph Lameter #include <linux/rmap.h> 9586c3a764SDavid Quigley #include <linux/security.h> 96dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 97095f1fc4SLee Schermerhorn #include <linux/ctype.h> 986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 100b1de0d13SMitchel Humpherys #include <linux/printk.h> 101c8633798SNaoya Horiguchi #include <linux/swapops.h> 102dc9aa5b9SChristoph Lameter 1031da177e4SLinus Torvalds #include <asm/tlbflush.h> 1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1051da177e4SLinus Torvalds 10662695a84SNick Piggin #include "internal.h" 10762695a84SNick Piggin 10838e35860SChristoph Lameter /* Internal flags */ 109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 111dc9aa5b9SChristoph Lameter 112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1141da177e4SLinus Torvalds 1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1161da177e4SLinus Torvalds policied. */ 1176267276fSChristoph Lameter enum zone_type policy_zone = 0; 1181da177e4SLinus Torvalds 119bea904d5SLee Schermerhorn /* 120bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 121bea904d5SLee Schermerhorn */ 122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1231da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 1247858d7bcSFeng Tang .mode = MPOL_LOCAL, 1251da177e4SLinus Torvalds }; 1261da177e4SLinus Torvalds 1275606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1285606e387SMel Gorman 129b2ca916cSDan Williams /** 130b2ca916cSDan Williams * numa_map_to_online_node - Find closest online node 131f6e92f40SKrzysztof Kozlowski * @node: Node id to start the search 132b2ca916cSDan Williams * 133b2ca916cSDan Williams * Lookup the next closest node by distance if @nid is not online. 134b2ca916cSDan Williams */ 135b2ca916cSDan Williams int numa_map_to_online_node(int node) 136b2ca916cSDan Williams { 1374fcbe96eSDan Williams int min_dist = INT_MAX, dist, n, min_node; 138b2ca916cSDan Williams 1394fcbe96eSDan Williams if (node == NUMA_NO_NODE || node_online(node)) 1404fcbe96eSDan Williams return node; 141b2ca916cSDan Williams 142b2ca916cSDan Williams min_node = node; 143b2ca916cSDan Williams for_each_online_node(n) { 144b2ca916cSDan Williams dist = node_distance(node, n); 145b2ca916cSDan Williams if (dist < min_dist) { 146b2ca916cSDan Williams min_dist = dist; 147b2ca916cSDan Williams min_node = n; 148b2ca916cSDan Williams } 149b2ca916cSDan Williams } 150b2ca916cSDan Williams 151b2ca916cSDan Williams return min_node; 152b2ca916cSDan Williams } 153b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node); 154b2ca916cSDan Williams 15574d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1565606e387SMel Gorman { 1575606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 158f15ca78eSOleg Nesterov int node; 1595606e387SMel Gorman 160f15ca78eSOleg Nesterov if (pol) 161f15ca78eSOleg Nesterov return pol; 1625606e387SMel Gorman 163f15ca78eSOleg Nesterov node = numa_node_id(); 1641da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1651da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 166f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 167f15ca78eSOleg Nesterov if (pol->mode) 168f15ca78eSOleg Nesterov return pol; 1691da6f0e1SJianguo Wu } 1705606e387SMel Gorman 171f15ca78eSOleg Nesterov return &default_policy; 1725606e387SMel Gorman } 1735606e387SMel Gorman 17437012946SDavid Rientjes static const struct mempolicy_operations { 17537012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 176213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 17737012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 17837012946SDavid Rientjes 179f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 180f5b087b5SDavid Rientjes { 1816d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1824c50bc01SDavid Rientjes } 1834c50bc01SDavid Rientjes 1844c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1854c50bc01SDavid Rientjes const nodemask_t *rel) 1864c50bc01SDavid Rientjes { 1874c50bc01SDavid Rientjes nodemask_t tmp; 1884c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1894c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 190f5b087b5SDavid Rientjes } 191f5b087b5SDavid Rientjes 19237012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 19337012946SDavid Rientjes { 19437012946SDavid Rientjes if (nodes_empty(*nodes)) 19537012946SDavid Rientjes return -EINVAL; 196269fbe72SBen Widawsky pol->nodes = *nodes; 19737012946SDavid Rientjes return 0; 19837012946SDavid Rientjes } 19937012946SDavid Rientjes 20037012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 20137012946SDavid Rientjes { 2027858d7bcSFeng Tang if (nodes_empty(*nodes)) 2037858d7bcSFeng Tang return -EINVAL; 204269fbe72SBen Widawsky 205269fbe72SBen Widawsky nodes_clear(pol->nodes); 206269fbe72SBen Widawsky node_set(first_node(*nodes), pol->nodes); 20737012946SDavid Rientjes return 0; 20837012946SDavid Rientjes } 20937012946SDavid Rientjes 21037012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 21137012946SDavid Rientjes { 212859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 21337012946SDavid Rientjes return -EINVAL; 214269fbe72SBen Widawsky pol->nodes = *nodes; 21537012946SDavid Rientjes return 0; 21637012946SDavid Rientjes } 21737012946SDavid Rientjes 21858568d2aSMiao Xie /* 21958568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 22058568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 2217858d7bcSFeng Tang * parameter with respect to the policy mode and flags. 22258568d2aSMiao Xie * 22358568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 224c1e8d7c6SMichel Lespinasse * and mempolicy. May also be called holding the mmap_lock for write. 22558568d2aSMiao Xie */ 2264bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2274bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22858568d2aSMiao Xie { 22958568d2aSMiao Xie int ret; 23058568d2aSMiao Xie 2317858d7bcSFeng Tang /* 2327858d7bcSFeng Tang * Default (pol==NULL) resp. local memory policies are not a 2337858d7bcSFeng Tang * subject of any remapping. They also do not need any special 2347858d7bcSFeng Tang * constructor. 2357858d7bcSFeng Tang */ 2367858d7bcSFeng Tang if (!pol || pol->mode == MPOL_LOCAL) 23758568d2aSMiao Xie return 0; 2387858d7bcSFeng Tang 23901f13bd6SLai Jiangshan /* Check N_MEMORY */ 2404bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 24101f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 24258568d2aSMiao Xie 24358568d2aSMiao Xie VM_BUG_ON(!nodes); 2447858d7bcSFeng Tang 24558568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2464bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 24758568d2aSMiao Xie else 2484bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2494bfc4495SKAMEZAWA Hiroyuki 25058568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 25158568d2aSMiao Xie pol->w.user_nodemask = *nodes; 25258568d2aSMiao Xie else 2537858d7bcSFeng Tang pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 25458568d2aSMiao Xie 2554bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 25658568d2aSMiao Xie return ret; 25758568d2aSMiao Xie } 25858568d2aSMiao Xie 25958568d2aSMiao Xie /* 26058568d2aSMiao Xie * This function just creates a new policy, does some check and simple 26158568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 26258568d2aSMiao Xie */ 263028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 264028fec41SDavid Rientjes nodemask_t *nodes) 2651da177e4SLinus Torvalds { 2661da177e4SLinus Torvalds struct mempolicy *policy; 2671da177e4SLinus Torvalds 268028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26900ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 270140d5a49SPaul Mundt 2713e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2723e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 27337012946SDavid Rientjes return ERR_PTR(-EINVAL); 274d3a71033SLee Schermerhorn return NULL; 27537012946SDavid Rientjes } 2763e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2773e1f0645SDavid Rientjes 2783e1f0645SDavid Rientjes /* 2793e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2803e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2813e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2823e1f0645SDavid Rientjes */ 2833e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2843e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2853e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2863e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2873e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2887858d7bcSFeng Tang 2897858d7bcSFeng Tang mode = MPOL_LOCAL; 2903e1f0645SDavid Rientjes } 291479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2928d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2938d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2948d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 295479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 2963e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2973e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2981da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2991da177e4SLinus Torvalds if (!policy) 3001da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 3011da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 30245c4745aSLee Schermerhorn policy->mode = mode; 30337012946SDavid Rientjes policy->flags = flags; 3043e1f0645SDavid Rientjes 30537012946SDavid Rientjes return policy; 30637012946SDavid Rientjes } 30737012946SDavid Rientjes 30852cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30952cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 31052cd3b07SLee Schermerhorn { 31152cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 31252cd3b07SLee Schermerhorn return; 31352cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 31452cd3b07SLee Schermerhorn } 31552cd3b07SLee Schermerhorn 316213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 31737012946SDavid Rientjes { 31837012946SDavid Rientjes } 31937012946SDavid Rientjes 320213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 3211d0d2680SDavid Rientjes { 3221d0d2680SDavid Rientjes nodemask_t tmp; 3231d0d2680SDavid Rientjes 32437012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32537012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32637012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32737012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3281d0d2680SDavid Rientjes else { 329269fbe72SBen Widawsky nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 330213980c0SVlastimil Babka *nodes); 33129b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3321d0d2680SDavid Rientjes } 33337012946SDavid Rientjes 334708c1bbcSMiao Xie if (nodes_empty(tmp)) 335708c1bbcSMiao Xie tmp = *nodes; 336708c1bbcSMiao Xie 337269fbe72SBen Widawsky pol->nodes = tmp; 33837012946SDavid Rientjes } 33937012946SDavid Rientjes 34037012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 341213980c0SVlastimil Babka const nodemask_t *nodes) 34237012946SDavid Rientjes { 34337012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3441d0d2680SDavid Rientjes } 34537012946SDavid Rientjes 346708c1bbcSMiao Xie /* 347708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 348708c1bbcSMiao Xie * 349c1e8d7c6SMichel Lespinasse * Per-vma policies are protected by mmap_lock. Allocations using per-task 350213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 351213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 352708c1bbcSMiao Xie */ 353213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35437012946SDavid Rientjes { 35537012946SDavid Rientjes if (!pol) 35637012946SDavid Rientjes return; 3577858d7bcSFeng Tang if (!mpol_store_user_nodemask(pol) && 35837012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35937012946SDavid Rientjes return; 360708c1bbcSMiao Xie 361213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3621d0d2680SDavid Rientjes } 3631d0d2680SDavid Rientjes 3641d0d2680SDavid Rientjes /* 3651d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3661d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36758568d2aSMiao Xie * 36858568d2aSMiao Xie * Called with task's alloc_lock held. 3691d0d2680SDavid Rientjes */ 3701d0d2680SDavid Rientjes 371213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3721d0d2680SDavid Rientjes { 373213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3741d0d2680SDavid Rientjes } 3751d0d2680SDavid Rientjes 3761d0d2680SDavid Rientjes /* 3771d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3781d0d2680SDavid Rientjes * 379c1e8d7c6SMichel Lespinasse * Call holding a reference to mm. Takes mm->mmap_lock during call. 3801d0d2680SDavid Rientjes */ 3811d0d2680SDavid Rientjes 3821d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3831d0d2680SDavid Rientjes { 3841d0d2680SDavid Rientjes struct vm_area_struct *vma; 3851d0d2680SDavid Rientjes 386d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 3871d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 388213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 389d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3901d0d2680SDavid Rientjes } 3911d0d2680SDavid Rientjes 39237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 39337012946SDavid Rientjes [MPOL_DEFAULT] = { 39437012946SDavid Rientjes .rebind = mpol_rebind_default, 39537012946SDavid Rientjes }, 39637012946SDavid Rientjes [MPOL_INTERLEAVE] = { 39737012946SDavid Rientjes .create = mpol_new_interleave, 39837012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39937012946SDavid Rientjes }, 40037012946SDavid Rientjes [MPOL_PREFERRED] = { 40137012946SDavid Rientjes .create = mpol_new_preferred, 40237012946SDavid Rientjes .rebind = mpol_rebind_preferred, 40337012946SDavid Rientjes }, 40437012946SDavid Rientjes [MPOL_BIND] = { 40537012946SDavid Rientjes .create = mpol_new_bind, 40637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40737012946SDavid Rientjes }, 4087858d7bcSFeng Tang [MPOL_LOCAL] = { 4097858d7bcSFeng Tang .rebind = mpol_rebind_default, 4107858d7bcSFeng Tang }, 41137012946SDavid Rientjes }; 41237012946SDavid Rientjes 413a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 414fc301289SChristoph Lameter unsigned long flags); 4151a75a6c8SChristoph Lameter 4166f4576e3SNaoya Horiguchi struct queue_pages { 4176f4576e3SNaoya Horiguchi struct list_head *pagelist; 4186f4576e3SNaoya Horiguchi unsigned long flags; 4196f4576e3SNaoya Horiguchi nodemask_t *nmask; 420f18da660SLi Xinhai unsigned long start; 421f18da660SLi Xinhai unsigned long end; 422f18da660SLi Xinhai struct vm_area_struct *first; 4236f4576e3SNaoya Horiguchi }; 4246f4576e3SNaoya Horiguchi 42598094945SNaoya Horiguchi /* 42688aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 42788aaa2a1SNaoya Horiguchi * 42888aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 42988aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 43088aaa2a1SNaoya Horiguchi */ 43188aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 43288aaa2a1SNaoya Horiguchi struct queue_pages *qp) 43388aaa2a1SNaoya Horiguchi { 43488aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 43588aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 43688aaa2a1SNaoya Horiguchi 43788aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 43888aaa2a1SNaoya Horiguchi } 43988aaa2a1SNaoya Horiguchi 440a7f40cfeSYang Shi /* 441d8835445SYang Shi * queue_pages_pmd() has four possible return values: 442e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 443e5947d23SYang Shi * special page is met, i.e. huge zero page. 444d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 445d8835445SYang Shi * specified. 446d8835445SYang Shi * 2 - THP was split. 447d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 448d8835445SYang Shi * existing page was already on a node that does not follow the 449d8835445SYang Shi * policy. 450a7f40cfeSYang Shi */ 451c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 452c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 453959a7e13SJules Irenge __releases(ptl) 454c8633798SNaoya Horiguchi { 455c8633798SNaoya Horiguchi int ret = 0; 456c8633798SNaoya Horiguchi struct page *page; 457c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 458c8633798SNaoya Horiguchi unsigned long flags; 459c8633798SNaoya Horiguchi 460c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 461a7f40cfeSYang Shi ret = -EIO; 462c8633798SNaoya Horiguchi goto unlock; 463c8633798SNaoya Horiguchi } 464c8633798SNaoya Horiguchi page = pmd_page(*pmd); 465c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 466c8633798SNaoya Horiguchi spin_unlock(ptl); 467e5947d23SYang Shi walk->action = ACTION_CONTINUE; 468c8633798SNaoya Horiguchi goto out; 469c8633798SNaoya Horiguchi } 470d8835445SYang Shi if (!queue_pages_required(page, qp)) 471c8633798SNaoya Horiguchi goto unlock; 472c8633798SNaoya Horiguchi 473c8633798SNaoya Horiguchi flags = qp->flags; 474c8633798SNaoya Horiguchi /* go to thp migration */ 475a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 476a53190a4SYang Shi if (!vma_migratable(walk->vma) || 477a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 478d8835445SYang Shi ret = 1; 479a7f40cfeSYang Shi goto unlock; 480a7f40cfeSYang Shi } 481a7f40cfeSYang Shi } else 482a7f40cfeSYang Shi ret = -EIO; 483c8633798SNaoya Horiguchi unlock: 484c8633798SNaoya Horiguchi spin_unlock(ptl); 485c8633798SNaoya Horiguchi out: 486c8633798SNaoya Horiguchi return ret; 487c8633798SNaoya Horiguchi } 488c8633798SNaoya Horiguchi 48988aaa2a1SNaoya Horiguchi /* 49098094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 49198094945SNaoya Horiguchi * and move them to the pagelist if they do. 492d8835445SYang Shi * 493d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 494e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 495e5947d23SYang Shi * special page is met, i.e. zero page. 496d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 497d8835445SYang Shi * specified. 498d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 499d8835445SYang Shi * on a node that does not follow the policy. 50098094945SNaoya Horiguchi */ 5016f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 5026f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 5031da177e4SLinus Torvalds { 5046f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5056f4576e3SNaoya Horiguchi struct page *page; 5066f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5076f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 508c8633798SNaoya Horiguchi int ret; 509d8835445SYang Shi bool has_unmovable = false; 5103f088420SShijie Luo pte_t *pte, *mapped_pte; 511705e87c0SHugh Dickins spinlock_t *ptl; 512941150a3SHugh Dickins 513c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 514c8633798SNaoya Horiguchi if (ptl) { 515c8633798SNaoya Horiguchi ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 516d8835445SYang Shi if (ret != 2) 517a7f40cfeSYang Shi return ret; 518248db92dSKirill A. Shutemov } 519d8835445SYang Shi /* THP was split, fall through to pte walk */ 52091612e0dSHugh Dickins 521337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 522337d9abfSNaoya Horiguchi return 0; 52394723aafSMichal Hocko 5243f088420SShijie Luo mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5256f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 52691612e0dSHugh Dickins if (!pte_present(*pte)) 52791612e0dSHugh Dickins continue; 5286aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5296aab341eSLinus Torvalds if (!page) 53091612e0dSHugh Dickins continue; 531053837fcSNick Piggin /* 53262b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 53362b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 534053837fcSNick Piggin */ 535b79bc0a0SHugh Dickins if (PageReserved(page)) 536f4598c8bSChristoph Lameter continue; 53788aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 53838e35860SChristoph Lameter continue; 539a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 540d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 541d8835445SYang Shi if (!vma_migratable(vma)) { 542d8835445SYang Shi has_unmovable = true; 543a7f40cfeSYang Shi break; 544d8835445SYang Shi } 545a53190a4SYang Shi 546a53190a4SYang Shi /* 547a53190a4SYang Shi * Do not abort immediately since there may be 548a53190a4SYang Shi * temporary off LRU pages in the range. Still 549a53190a4SYang Shi * need migrate other LRU pages. 550a53190a4SYang Shi */ 551a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 552a53190a4SYang Shi has_unmovable = true; 553a7f40cfeSYang Shi } else 554a7f40cfeSYang Shi break; 5556f4576e3SNaoya Horiguchi } 5563f088420SShijie Luo pte_unmap_unlock(mapped_pte, ptl); 5576f4576e3SNaoya Horiguchi cond_resched(); 558d8835445SYang Shi 559d8835445SYang Shi if (has_unmovable) 560d8835445SYang Shi return 1; 561d8835445SYang Shi 562a7f40cfeSYang Shi return addr != end ? -EIO : 0; 56391612e0dSHugh Dickins } 56491612e0dSHugh Dickins 5656f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5666f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5676f4576e3SNaoya Horiguchi struct mm_walk *walk) 568e2d8cf40SNaoya Horiguchi { 569dcf17635SLi Xinhai int ret = 0; 570e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5716f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 572dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 573e2d8cf40SNaoya Horiguchi struct page *page; 574cb900f41SKirill A. Shutemov spinlock_t *ptl; 575d4c54919SNaoya Horiguchi pte_t entry; 576e2d8cf40SNaoya Horiguchi 5776f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5786f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 579d4c54919SNaoya Horiguchi if (!pte_present(entry)) 580d4c54919SNaoya Horiguchi goto unlock; 581d4c54919SNaoya Horiguchi page = pte_page(entry); 58288aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 583e2d8cf40SNaoya Horiguchi goto unlock; 584dcf17635SLi Xinhai 585dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 586dcf17635SLi Xinhai /* 587dcf17635SLi Xinhai * STRICT alone means only detecting misplaced page and no 588dcf17635SLi Xinhai * need to further check other vma. 589dcf17635SLi Xinhai */ 590dcf17635SLi Xinhai ret = -EIO; 591dcf17635SLi Xinhai goto unlock; 592dcf17635SLi Xinhai } 593dcf17635SLi Xinhai 594dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 595dcf17635SLi Xinhai /* 596dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 597dcf17635SLi Xinhai * stopped walking current vma. 598dcf17635SLi Xinhai * Detecting misplaced page but allow migrating pages which 599dcf17635SLi Xinhai * have been queued. 600dcf17635SLi Xinhai */ 601dcf17635SLi Xinhai ret = 1; 602dcf17635SLi Xinhai goto unlock; 603dcf17635SLi Xinhai } 604dcf17635SLi Xinhai 605e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 606e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 607dcf17635SLi Xinhai (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { 608dcf17635SLi Xinhai if (!isolate_huge_page(page, qp->pagelist) && 609dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 610dcf17635SLi Xinhai /* 611dcf17635SLi Xinhai * Failed to isolate page but allow migrating pages 612dcf17635SLi Xinhai * which have been queued. 613dcf17635SLi Xinhai */ 614dcf17635SLi Xinhai ret = 1; 615dcf17635SLi Xinhai } 616e2d8cf40SNaoya Horiguchi unlock: 617cb900f41SKirill A. Shutemov spin_unlock(ptl); 618e2d8cf40SNaoya Horiguchi #else 619e2d8cf40SNaoya Horiguchi BUG(); 620e2d8cf40SNaoya Horiguchi #endif 621dcf17635SLi Xinhai return ret; 6221da177e4SLinus Torvalds } 6231da177e4SLinus Torvalds 6245877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 625b24f53a0SLee Schermerhorn /* 6264b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6274b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6284b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6294b10e7d5SMel Gorman * 6304b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6314b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6324b10e7d5SMel Gorman * changes to the core. 633b24f53a0SLee Schermerhorn */ 6344b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6354b10e7d5SMel Gorman unsigned long addr, unsigned long end) 636b24f53a0SLee Schermerhorn { 6374b10e7d5SMel Gorman int nr_updated; 638b24f53a0SLee Schermerhorn 63958705444SPeter Xu nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA); 64003c5a6e1SMel Gorman if (nr_updated) 64103c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 642b24f53a0SLee Schermerhorn 6434b10e7d5SMel Gorman return nr_updated; 644b24f53a0SLee Schermerhorn } 645b24f53a0SLee Schermerhorn #else 646b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 647b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 648b24f53a0SLee Schermerhorn { 649b24f53a0SLee Schermerhorn return 0; 650b24f53a0SLee Schermerhorn } 6515877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 652b24f53a0SLee Schermerhorn 6536f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6546f4576e3SNaoya Horiguchi struct mm_walk *walk) 6551da177e4SLinus Torvalds { 6566f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6576f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6585b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6596f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 660dc9aa5b9SChristoph Lameter 661a18b3ac2SLi Xinhai /* range check first */ 662ce33135cSMiaohe Lin VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 663f18da660SLi Xinhai 664f18da660SLi Xinhai if (!qp->first) { 665f18da660SLi Xinhai qp->first = vma; 666f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 667f18da660SLi Xinhai (qp->start < vma->vm_start)) 668f18da660SLi Xinhai /* hole at head side of range */ 669a18b3ac2SLi Xinhai return -EFAULT; 670a18b3ac2SLi Xinhai } 671f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 672f18da660SLi Xinhai ((vma->vm_end < qp->end) && 673f18da660SLi Xinhai (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) 674f18da660SLi Xinhai /* hole at middle or tail of range */ 675f18da660SLi Xinhai return -EFAULT; 676a18b3ac2SLi Xinhai 677a7f40cfeSYang Shi /* 678a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 679a7f40cfeSYang Shi * regardless of vma_migratable 680a7f40cfeSYang Shi */ 681a7f40cfeSYang Shi if (!vma_migratable(vma) && 682a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 68348684a65SNaoya Horiguchi return 1; 68448684a65SNaoya Horiguchi 6855b952b3cSAndi Kleen if (endvma > end) 6865b952b3cSAndi Kleen endvma = end; 687b24f53a0SLee Schermerhorn 688b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6892c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6903122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 6914355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 692b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6936f4576e3SNaoya Horiguchi return 1; 694b24f53a0SLee Schermerhorn } 695b24f53a0SLee Schermerhorn 6966f4576e3SNaoya Horiguchi /* queue pages from current vma */ 697a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 6986f4576e3SNaoya Horiguchi return 0; 6996f4576e3SNaoya Horiguchi return 1; 7006f4576e3SNaoya Horiguchi } 701b24f53a0SLee Schermerhorn 7027b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 7037b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 7047b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 7057b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 7067b86ac33SChristoph Hellwig }; 7077b86ac33SChristoph Hellwig 7086f4576e3SNaoya Horiguchi /* 7096f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7106f4576e3SNaoya Horiguchi * 7116f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7126f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 713d8835445SYang Shi * passed via @private. 714d8835445SYang Shi * 715d8835445SYang Shi * queue_pages_range() has three possible return values: 716d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 717d8835445SYang Shi * specified. 718d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 719a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 720a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 721a85dfc30SYang Shi * your accessible address space (-EFAULT) 7226f4576e3SNaoya Horiguchi */ 7236f4576e3SNaoya Horiguchi static int 7246f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7256f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 7266f4576e3SNaoya Horiguchi struct list_head *pagelist) 7276f4576e3SNaoya Horiguchi { 728f18da660SLi Xinhai int err; 7296f4576e3SNaoya Horiguchi struct queue_pages qp = { 7306f4576e3SNaoya Horiguchi .pagelist = pagelist, 7316f4576e3SNaoya Horiguchi .flags = flags, 7326f4576e3SNaoya Horiguchi .nmask = nodes, 733f18da660SLi Xinhai .start = start, 734f18da660SLi Xinhai .end = end, 735f18da660SLi Xinhai .first = NULL, 7366f4576e3SNaoya Horiguchi }; 7376f4576e3SNaoya Horiguchi 738f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 739f18da660SLi Xinhai 740f18da660SLi Xinhai if (!qp.first) 741f18da660SLi Xinhai /* whole range in hole */ 742f18da660SLi Xinhai err = -EFAULT; 743f18da660SLi Xinhai 744f18da660SLi Xinhai return err; 7451da177e4SLinus Torvalds } 7461da177e4SLinus Torvalds 747869833f2SKOSAKI Motohiro /* 748869833f2SKOSAKI Motohiro * Apply policy to a single VMA 749c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for writing. 750869833f2SKOSAKI Motohiro */ 751869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 752869833f2SKOSAKI Motohiro struct mempolicy *pol) 7538d34694cSKOSAKI Motohiro { 754869833f2SKOSAKI Motohiro int err; 755869833f2SKOSAKI Motohiro struct mempolicy *old; 756869833f2SKOSAKI Motohiro struct mempolicy *new; 7578d34694cSKOSAKI Motohiro 7588d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7598d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7608d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7618d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7628d34694cSKOSAKI Motohiro 763869833f2SKOSAKI Motohiro new = mpol_dup(pol); 764869833f2SKOSAKI Motohiro if (IS_ERR(new)) 765869833f2SKOSAKI Motohiro return PTR_ERR(new); 766869833f2SKOSAKI Motohiro 767869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7688d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 769869833f2SKOSAKI Motohiro if (err) 770869833f2SKOSAKI Motohiro goto err_out; 7718d34694cSKOSAKI Motohiro } 772869833f2SKOSAKI Motohiro 773869833f2SKOSAKI Motohiro old = vma->vm_policy; 774c1e8d7c6SMichel Lespinasse vma->vm_policy = new; /* protected by mmap_lock */ 775869833f2SKOSAKI Motohiro mpol_put(old); 776869833f2SKOSAKI Motohiro 777869833f2SKOSAKI Motohiro return 0; 778869833f2SKOSAKI Motohiro err_out: 779869833f2SKOSAKI Motohiro mpol_put(new); 7808d34694cSKOSAKI Motohiro return err; 7818d34694cSKOSAKI Motohiro } 7828d34694cSKOSAKI Motohiro 7831da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7849d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7859d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7861da177e4SLinus Torvalds { 7871da177e4SLinus Torvalds struct vm_area_struct *next; 7889d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7899d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7909d8cebd4SKOSAKI Motohiro int err = 0; 791e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7929d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7939d8cebd4SKOSAKI Motohiro unsigned long vmend; 7941da177e4SLinus Torvalds 795097d5910SLinus Torvalds vma = find_vma(mm, start); 796f18da660SLi Xinhai VM_BUG_ON(!vma); 7979d8cebd4SKOSAKI Motohiro 798097d5910SLinus Torvalds prev = vma->vm_prev; 799e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 800e26a5114SKOSAKI Motohiro prev = vma; 801e26a5114SKOSAKI Motohiro 8029d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 8031da177e4SLinus Torvalds next = vma->vm_next; 8049d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 8059d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 8069d8cebd4SKOSAKI Motohiro 807e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 808e26a5114SKOSAKI Motohiro continue; 809e26a5114SKOSAKI Motohiro 810e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 811e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 8129d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 813e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 81419a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 8159d8cebd4SKOSAKI Motohiro if (prev) { 8169d8cebd4SKOSAKI Motohiro vma = prev; 8179d8cebd4SKOSAKI Motohiro next = vma->vm_next; 8183964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 8199d8cebd4SKOSAKI Motohiro continue; 8203964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 8213964acd0SOleg Nesterov goto replace; 8221da177e4SLinus Torvalds } 8239d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 8249d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 8259d8cebd4SKOSAKI Motohiro if (err) 8269d8cebd4SKOSAKI Motohiro goto out; 8279d8cebd4SKOSAKI Motohiro } 8289d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 8299d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 8309d8cebd4SKOSAKI Motohiro if (err) 8319d8cebd4SKOSAKI Motohiro goto out; 8329d8cebd4SKOSAKI Motohiro } 8333964acd0SOleg Nesterov replace: 834869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 8359d8cebd4SKOSAKI Motohiro if (err) 8369d8cebd4SKOSAKI Motohiro goto out; 8379d8cebd4SKOSAKI Motohiro } 8389d8cebd4SKOSAKI Motohiro 8399d8cebd4SKOSAKI Motohiro out: 8401da177e4SLinus Torvalds return err; 8411da177e4SLinus Torvalds } 8421da177e4SLinus Torvalds 8431da177e4SLinus Torvalds /* Set the process memory policy */ 844028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 845028fec41SDavid Rientjes nodemask_t *nodes) 8461da177e4SLinus Torvalds { 84758568d2aSMiao Xie struct mempolicy *new, *old; 8484bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 84958568d2aSMiao Xie int ret; 8501da177e4SLinus Torvalds 8514bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8524bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 853f4e53d91SLee Schermerhorn 8544bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8554bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8564bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8574bfc4495SKAMEZAWA Hiroyuki goto out; 8584bfc4495SKAMEZAWA Hiroyuki } 8592c7c3a7dSOleg Nesterov 860bda420b9SHuang Ying if (flags & MPOL_F_NUMA_BALANCING) { 861bda420b9SHuang Ying if (new && new->mode == MPOL_BIND) { 862bda420b9SHuang Ying new->flags |= (MPOL_F_MOF | MPOL_F_MORON); 863bda420b9SHuang Ying } else { 864bda420b9SHuang Ying ret = -EINVAL; 865bda420b9SHuang Ying mpol_put(new); 866bda420b9SHuang Ying goto out; 867bda420b9SHuang Ying } 868bda420b9SHuang Ying } 869bda420b9SHuang Ying 8704bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 87158568d2aSMiao Xie if (ret) { 87258568d2aSMiao Xie mpol_put(new); 8734bfc4495SKAMEZAWA Hiroyuki goto out; 87458568d2aSMiao Xie } 87578b132e9SWei Yang task_lock(current); 87658568d2aSMiao Xie old = current->mempolicy; 8771da177e4SLinus Torvalds current->mempolicy = new; 87845816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 87945816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 88058568d2aSMiao Xie task_unlock(current); 88158568d2aSMiao Xie mpol_put(old); 8824bfc4495SKAMEZAWA Hiroyuki ret = 0; 8834bfc4495SKAMEZAWA Hiroyuki out: 8844bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8854bfc4495SKAMEZAWA Hiroyuki return ret; 8861da177e4SLinus Torvalds } 8871da177e4SLinus Torvalds 888bea904d5SLee Schermerhorn /* 889bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 89058568d2aSMiao Xie * 89158568d2aSMiao Xie * Called with task's alloc_lock held 892bea904d5SLee Schermerhorn */ 893bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8941da177e4SLinus Torvalds { 895dfcd3c0dSAndi Kleen nodes_clear(*nodes); 896bea904d5SLee Schermerhorn if (p == &default_policy) 897bea904d5SLee Schermerhorn return; 898bea904d5SLee Schermerhorn 89945c4745aSLee Schermerhorn switch (p->mode) { 90019770b32SMel Gorman case MPOL_BIND: 9011da177e4SLinus Torvalds case MPOL_INTERLEAVE: 902269fbe72SBen Widawsky case MPOL_PREFERRED: 903269fbe72SBen Widawsky *nodes = p->nodes; 9041da177e4SLinus Torvalds break; 9057858d7bcSFeng Tang case MPOL_LOCAL: 9067858d7bcSFeng Tang /* return empty node mask for local allocation */ 9077858d7bcSFeng Tang break; 9081da177e4SLinus Torvalds default: 9091da177e4SLinus Torvalds BUG(); 9101da177e4SLinus Torvalds } 9111da177e4SLinus Torvalds } 9121da177e4SLinus Torvalds 9133b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 9141da177e4SLinus Torvalds { 915ba841078SPeter Xu struct page *p = NULL; 9161da177e4SLinus Torvalds int err; 9171da177e4SLinus Torvalds 9183b9aadf7SAndrea Arcangeli int locked = 1; 9193b9aadf7SAndrea Arcangeli err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); 9202d3a36a4SMichal Hocko if (err > 0) { 9211da177e4SLinus Torvalds err = page_to_nid(p); 9221da177e4SLinus Torvalds put_page(p); 9231da177e4SLinus Torvalds } 9243b9aadf7SAndrea Arcangeli if (locked) 925d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9261da177e4SLinus Torvalds return err; 9271da177e4SLinus Torvalds } 9281da177e4SLinus Torvalds 9291da177e4SLinus Torvalds /* Retrieve NUMA policy */ 930dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9311da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9321da177e4SLinus Torvalds { 9338bccd85fSChristoph Lameter int err; 9341da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9351da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9363b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9371da177e4SLinus Torvalds 938754af6f5SLee Schermerhorn if (flags & 939754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9401da177e4SLinus Torvalds return -EINVAL; 941754af6f5SLee Schermerhorn 942754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 943754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 944754af6f5SLee Schermerhorn return -EINVAL; 945754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 94658568d2aSMiao Xie task_lock(current); 947754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 94858568d2aSMiao Xie task_unlock(current); 949754af6f5SLee Schermerhorn return 0; 950754af6f5SLee Schermerhorn } 951754af6f5SLee Schermerhorn 9521da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 953bea904d5SLee Schermerhorn /* 954bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 955bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 956bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 957bea904d5SLee Schermerhorn */ 958d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 95933e3575cSLiam Howlett vma = vma_lookup(mm, addr); 9601da177e4SLinus Torvalds if (!vma) { 961d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9621da177e4SLinus Torvalds return -EFAULT; 9631da177e4SLinus Torvalds } 9641da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9651da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9661da177e4SLinus Torvalds else 9671da177e4SLinus Torvalds pol = vma->vm_policy; 9681da177e4SLinus Torvalds } else if (addr) 9691da177e4SLinus Torvalds return -EINVAL; 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds if (!pol) 972bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9731da177e4SLinus Torvalds 9741da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9751da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9763b9aadf7SAndrea Arcangeli /* 9773b9aadf7SAndrea Arcangeli * Take a refcount on the mpol, lookup_node() 978baf2f90bSLu Jialin * will drop the mmap_lock, so after calling 9793b9aadf7SAndrea Arcangeli * lookup_node() only "pol" remains valid, "vma" 9803b9aadf7SAndrea Arcangeli * is stale. 9813b9aadf7SAndrea Arcangeli */ 9823b9aadf7SAndrea Arcangeli pol_refcount = pol; 9833b9aadf7SAndrea Arcangeli vma = NULL; 9843b9aadf7SAndrea Arcangeli mpol_get(pol); 9853b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9861da177e4SLinus Torvalds if (err < 0) 9871da177e4SLinus Torvalds goto out; 9888bccd85fSChristoph Lameter *policy = err; 9891da177e4SLinus Torvalds } else if (pol == current->mempolicy && 99045c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 991269fbe72SBen Widawsky *policy = next_node_in(current->il_prev, pol->nodes); 9921da177e4SLinus Torvalds } else { 9931da177e4SLinus Torvalds err = -EINVAL; 9941da177e4SLinus Torvalds goto out; 9951da177e4SLinus Torvalds } 996bea904d5SLee Schermerhorn } else { 997bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 998bea904d5SLee Schermerhorn pol->mode; 999d79df630SDavid Rientjes /* 1000d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 1001d79df630SDavid Rientjes * the policy to userspace. 1002d79df630SDavid Rientjes */ 1003d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 1004bea904d5SLee Schermerhorn } 10051da177e4SLinus Torvalds 10061da177e4SLinus Torvalds err = 0; 100758568d2aSMiao Xie if (nmask) { 1008c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 1009c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 1010c6b6ef8bSLee Schermerhorn } else { 101158568d2aSMiao Xie task_lock(current); 1012bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 101358568d2aSMiao Xie task_unlock(current); 101458568d2aSMiao Xie } 1015c6b6ef8bSLee Schermerhorn } 10161da177e4SLinus Torvalds 10171da177e4SLinus Torvalds out: 101852cd3b07SLee Schermerhorn mpol_cond_put(pol); 10191da177e4SLinus Torvalds if (vma) 1020d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 10213b9aadf7SAndrea Arcangeli if (pol_refcount) 10223b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10231da177e4SLinus Torvalds return err; 10241da177e4SLinus Torvalds } 10251da177e4SLinus Torvalds 1026b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10278bccd85fSChristoph Lameter /* 1028c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 10296ce3c4c0SChristoph Lameter */ 1030a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1031fc301289SChristoph Lameter unsigned long flags) 10326ce3c4c0SChristoph Lameter { 1033c8633798SNaoya Horiguchi struct page *head = compound_head(page); 10346ce3c4c0SChristoph Lameter /* 1035fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 10366ce3c4c0SChristoph Lameter */ 1037c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1038c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 1039c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 1040c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 10419de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head), 10426c357848SMatthew Wilcox (Oracle) thp_nr_pages(head)); 1043a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1044a53190a4SYang Shi /* 1045a53190a4SYang Shi * Non-movable page may reach here. And, there may be 1046a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 1047a53190a4SYang Shi * Treat them as unmovable pages since they can't be 1048a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1049a53190a4SYang Shi * should return -EIO for this case too. 1050a53190a4SYang Shi */ 1051a53190a4SYang Shi return -EIO; 105262695a84SNick Piggin } 105362695a84SNick Piggin } 1054a53190a4SYang Shi 1055a53190a4SYang Shi return 0; 10566ce3c4c0SChristoph Lameter } 10576ce3c4c0SChristoph Lameter 10586ce3c4c0SChristoph Lameter /* 10597e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10607e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10617e2ab150SChristoph Lameter */ 1062dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1063dbcb0f19SAdrian Bunk int flags) 10647e2ab150SChristoph Lameter { 10657e2ab150SChristoph Lameter nodemask_t nmask; 10667e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10677e2ab150SChristoph Lameter int err = 0; 1068a0976311SJoonsoo Kim struct migration_target_control mtc = { 1069a0976311SJoonsoo Kim .nid = dest, 1070a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1071a0976311SJoonsoo Kim }; 10727e2ab150SChristoph Lameter 10737e2ab150SChristoph Lameter nodes_clear(nmask); 10747e2ab150SChristoph Lameter node_set(source, nmask); 10757e2ab150SChristoph Lameter 107608270807SMinchan Kim /* 107708270807SMinchan Kim * This does not "check" the range but isolates all pages that 107808270807SMinchan Kim * need migration. Between passing in the full user address 107908270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 108008270807SMinchan Kim */ 108108270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 108298094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10837e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10847e2ab150SChristoph Lameter 1085cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1086a0976311SJoonsoo Kim err = migrate_pages(&pagelist, alloc_migration_target, NULL, 10875ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1088cf608ac1SMinchan Kim if (err) 1089e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1090cf608ac1SMinchan Kim } 109195a402c3SChristoph Lameter 10927e2ab150SChristoph Lameter return err; 10937e2ab150SChristoph Lameter } 10947e2ab150SChristoph Lameter 10957e2ab150SChristoph Lameter /* 10967e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10977e2ab150SChristoph Lameter * layout as much as possible. 109839743889SChristoph Lameter * 109939743889SChristoph Lameter * Returns the number of page that could not be moved. 110039743889SChristoph Lameter */ 11010ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11020ce72d4fSAndrew Morton const nodemask_t *to, int flags) 110339743889SChristoph Lameter { 11047e2ab150SChristoph Lameter int busy = 0; 1105f555befdSJan Stancek int err = 0; 11067e2ab150SChristoph Lameter nodemask_t tmp; 110739743889SChristoph Lameter 1108361a2a22SMinchan Kim lru_cache_disable(); 11090aedadf9SChristoph Lameter 1110d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1111d4984711SChristoph Lameter 11127e2ab150SChristoph Lameter /* 11137e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11147e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11157e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11167e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11177e2ab150SChristoph Lameter * 11187e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11197e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11207e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11217e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11227e2ab150SChristoph Lameter * 11237e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11247e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11257e2ab150SChristoph Lameter * (nothing left to migrate). 11267e2ab150SChristoph Lameter * 11277e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11287e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11297e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11307e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11317e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11327e2ab150SChristoph Lameter * 11337e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11347e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11357e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11367e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1137ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11387e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11397e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11407e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11417e2ab150SChristoph Lameter */ 11427e2ab150SChristoph Lameter 11430ce72d4fSAndrew Morton tmp = *from; 11447e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11457e2ab150SChristoph Lameter int s, d; 1146b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11477e2ab150SChristoph Lameter int dest = 0; 11487e2ab150SChristoph Lameter 11497e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11504a5b18ccSLarry Woodman 11514a5b18ccSLarry Woodman /* 11524a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11534a5b18ccSLarry Woodman * node relationship of the pages established between 11544a5b18ccSLarry Woodman * threads and memory areas. 11554a5b18ccSLarry Woodman * 11564a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11574a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11584a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11594a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11604a5b18ccSLarry Woodman * mask. 11614a5b18ccSLarry Woodman * 11624a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11634a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11644a5b18ccSLarry Woodman */ 11654a5b18ccSLarry Woodman 11660ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11670ce72d4fSAndrew Morton (node_isset(s, *to))) 11684a5b18ccSLarry Woodman continue; 11694a5b18ccSLarry Woodman 11700ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11717e2ab150SChristoph Lameter if (s == d) 11727e2ab150SChristoph Lameter continue; 11737e2ab150SChristoph Lameter 11747e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11757e2ab150SChristoph Lameter dest = d; 11767e2ab150SChristoph Lameter 11777e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11787e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11797e2ab150SChristoph Lameter break; 11807e2ab150SChristoph Lameter } 1181b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11827e2ab150SChristoph Lameter break; 11837e2ab150SChristoph Lameter 11847e2ab150SChristoph Lameter node_clear(source, tmp); 11857e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11867e2ab150SChristoph Lameter if (err > 0) 11877e2ab150SChristoph Lameter busy += err; 11887e2ab150SChristoph Lameter if (err < 0) 11897e2ab150SChristoph Lameter break; 119039743889SChristoph Lameter } 1191d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1192d479960eSMinchan Kim 1193361a2a22SMinchan Kim lru_cache_enable(); 11947e2ab150SChristoph Lameter if (err < 0) 11957e2ab150SChristoph Lameter return err; 11967e2ab150SChristoph Lameter return busy; 1197b20a3503SChristoph Lameter 119839743889SChristoph Lameter } 119939743889SChristoph Lameter 12003ad33b24SLee Schermerhorn /* 12013ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1202d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 12033ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 12043ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 12053ad33b24SLee Schermerhorn * is in virtual address order. 12063ad33b24SLee Schermerhorn */ 1207666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 120895a402c3SChristoph Lameter { 1209d05f0cdcSHugh Dickins struct vm_area_struct *vma; 12103f649ab7SKees Cook unsigned long address; 121195a402c3SChristoph Lameter 1212d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 12133ad33b24SLee Schermerhorn while (vma) { 12143ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 12153ad33b24SLee Schermerhorn if (address != -EFAULT) 12163ad33b24SLee Schermerhorn break; 12173ad33b24SLee Schermerhorn vma = vma->vm_next; 12183ad33b24SLee Schermerhorn } 12193ad33b24SLee Schermerhorn 122011c731e8SWanpeng Li if (PageHuge(page)) { 1221389c8178SMichal Hocko return alloc_huge_page_vma(page_hstate(compound_head(page)), 1222389c8178SMichal Hocko vma, address); 122394723aafSMichal Hocko } else if (PageTransHuge(page)) { 1224c8633798SNaoya Horiguchi struct page *thp; 1225c8633798SNaoya Horiguchi 122619deb769SDavid Rientjes thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 122719deb769SDavid Rientjes HPAGE_PMD_ORDER); 1228c8633798SNaoya Horiguchi if (!thp) 1229c8633798SNaoya Horiguchi return NULL; 1230c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1231c8633798SNaoya Horiguchi return thp; 123211c731e8SWanpeng Li } 123311c731e8SWanpeng Li /* 123411c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 123511c731e8SWanpeng Li */ 12360f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 12370f556856SMichal Hocko vma, address); 123895a402c3SChristoph Lameter } 1239b20a3503SChristoph Lameter #else 1240b20a3503SChristoph Lameter 1241a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1242b20a3503SChristoph Lameter unsigned long flags) 1243b20a3503SChristoph Lameter { 1244a53190a4SYang Shi return -EIO; 1245b20a3503SChristoph Lameter } 1246b20a3503SChristoph Lameter 12470ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12480ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1249b20a3503SChristoph Lameter { 1250b20a3503SChristoph Lameter return -ENOSYS; 1251b20a3503SChristoph Lameter } 125295a402c3SChristoph Lameter 1253666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 125495a402c3SChristoph Lameter { 125595a402c3SChristoph Lameter return NULL; 125695a402c3SChristoph Lameter } 1257b20a3503SChristoph Lameter #endif 1258b20a3503SChristoph Lameter 1259dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1260028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1261028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12626ce3c4c0SChristoph Lameter { 12636ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12646ce3c4c0SChristoph Lameter struct mempolicy *new; 12656ce3c4c0SChristoph Lameter unsigned long end; 12666ce3c4c0SChristoph Lameter int err; 1267d8835445SYang Shi int ret; 12686ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12696ce3c4c0SChristoph Lameter 1270b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12716ce3c4c0SChristoph Lameter return -EINVAL; 127274c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12736ce3c4c0SChristoph Lameter return -EPERM; 12746ce3c4c0SChristoph Lameter 12756ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12766ce3c4c0SChristoph Lameter return -EINVAL; 12776ce3c4c0SChristoph Lameter 12786ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12796ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12806ce3c4c0SChristoph Lameter 12816ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12826ce3c4c0SChristoph Lameter end = start + len; 12836ce3c4c0SChristoph Lameter 12846ce3c4c0SChristoph Lameter if (end < start) 12856ce3c4c0SChristoph Lameter return -EINVAL; 12866ce3c4c0SChristoph Lameter if (end == start) 12876ce3c4c0SChristoph Lameter return 0; 12886ce3c4c0SChristoph Lameter 1289028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12906ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12916ce3c4c0SChristoph Lameter return PTR_ERR(new); 12926ce3c4c0SChristoph Lameter 1293b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1294b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1295b24f53a0SLee Schermerhorn 12966ce3c4c0SChristoph Lameter /* 12976ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12986ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12996ce3c4c0SChristoph Lameter */ 13006ce3c4c0SChristoph Lameter if (!new) 13016ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 13026ce3c4c0SChristoph Lameter 1303028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1304028fec41SDavid Rientjes start, start + len, mode, mode_flags, 130500ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 13066ce3c4c0SChristoph Lameter 13070aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 13080aedadf9SChristoph Lameter 1309361a2a22SMinchan Kim lru_cache_disable(); 13100aedadf9SChristoph Lameter } 13114bfc4495SKAMEZAWA Hiroyuki { 13124bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 13134bfc4495SKAMEZAWA Hiroyuki if (scratch) { 1314d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 13154bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 13164bfc4495SKAMEZAWA Hiroyuki if (err) 1317d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 13184bfc4495SKAMEZAWA Hiroyuki } else 13194bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13204bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13214bfc4495SKAMEZAWA Hiroyuki } 1322b05ca738SKOSAKI Motohiro if (err) 1323b05ca738SKOSAKI Motohiro goto mpol_out; 1324b05ca738SKOSAKI Motohiro 1325d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13266ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1327d8835445SYang Shi 1328d8835445SYang Shi if (ret < 0) { 1329a85dfc30SYang Shi err = ret; 1330d8835445SYang Shi goto up_out; 1331d8835445SYang Shi } 1332d8835445SYang Shi 13339d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13347e2ab150SChristoph Lameter 1335b24f53a0SLee Schermerhorn if (!err) { 1336b24f53a0SLee Schermerhorn int nr_failed = 0; 1337b24f53a0SLee Schermerhorn 1338cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1339b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1340d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 13415ac95884SYang Shi start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1342cf608ac1SMinchan Kim if (nr_failed) 134374060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1344cf608ac1SMinchan Kim } 13456ce3c4c0SChristoph Lameter 1346d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13476ce3c4c0SChristoph Lameter err = -EIO; 1348a85dfc30SYang Shi } else { 1349d8835445SYang Shi up_out: 1350a85dfc30SYang Shi if (!list_empty(&pagelist)) 1351a85dfc30SYang Shi putback_movable_pages(&pagelist); 1352a85dfc30SYang Shi } 1353a85dfc30SYang Shi 1354d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1355b05ca738SKOSAKI Motohiro mpol_out: 1356f0be3d32SLee Schermerhorn mpol_put(new); 1357d479960eSMinchan Kim if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1358361a2a22SMinchan Kim lru_cache_enable(); 13596ce3c4c0SChristoph Lameter return err; 13606ce3c4c0SChristoph Lameter } 13616ce3c4c0SChristoph Lameter 136239743889SChristoph Lameter /* 13638bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13648bccd85fSChristoph Lameter */ 13658bccd85fSChristoph Lameter 13668bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 136739743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13688bccd85fSChristoph Lameter unsigned long maxnode) 13698bccd85fSChristoph Lameter { 13708bccd85fSChristoph Lameter unsigned long k; 137156521e7aSYisheng Xie unsigned long t; 13728bccd85fSChristoph Lameter unsigned long nlongs; 13738bccd85fSChristoph Lameter unsigned long endmask; 13748bccd85fSChristoph Lameter 13758bccd85fSChristoph Lameter --maxnode; 13768bccd85fSChristoph Lameter nodes_clear(*nodes); 13778bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13788bccd85fSChristoph Lameter return 0; 1379a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1380636f13c1SChris Wright return -EINVAL; 13818bccd85fSChristoph Lameter 13828bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 13838bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 13848bccd85fSChristoph Lameter endmask = ~0UL; 13858bccd85fSChristoph Lameter else 13868bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 13878bccd85fSChristoph Lameter 138856521e7aSYisheng Xie /* 138956521e7aSYisheng Xie * When the user specified more nodes than supported just check 139056521e7aSYisheng Xie * if the non supported part is all zero. 139156521e7aSYisheng Xie * 139256521e7aSYisheng Xie * If maxnode have more longs than MAX_NUMNODES, check 139356521e7aSYisheng Xie * the bits in that area first. And then go through to 139456521e7aSYisheng Xie * check the rest bits which equal or bigger than MAX_NUMNODES. 139556521e7aSYisheng Xie * Otherwise, just check bits [MAX_NUMNODES, maxnode). 139656521e7aSYisheng Xie */ 13978bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 13988bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 13998bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 14008bccd85fSChristoph Lameter return -EFAULT; 14018bccd85fSChristoph Lameter if (k == nlongs - 1) { 14028bccd85fSChristoph Lameter if (t & endmask) 14038bccd85fSChristoph Lameter return -EINVAL; 14048bccd85fSChristoph Lameter } else if (t) 14058bccd85fSChristoph Lameter return -EINVAL; 14068bccd85fSChristoph Lameter } 14078bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 14088bccd85fSChristoph Lameter endmask = ~0UL; 14098bccd85fSChristoph Lameter } 14108bccd85fSChristoph Lameter 141156521e7aSYisheng Xie if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { 141256521e7aSYisheng Xie unsigned long valid_mask = endmask; 141356521e7aSYisheng Xie 141456521e7aSYisheng Xie valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 141556521e7aSYisheng Xie if (get_user(t, nmask + nlongs - 1)) 141656521e7aSYisheng Xie return -EFAULT; 141756521e7aSYisheng Xie if (t & valid_mask) 141856521e7aSYisheng Xie return -EINVAL; 141956521e7aSYisheng Xie } 142056521e7aSYisheng Xie 14218bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 14228bccd85fSChristoph Lameter return -EFAULT; 14238bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 14248bccd85fSChristoph Lameter return 0; 14258bccd85fSChristoph Lameter } 14268bccd85fSChristoph Lameter 14278bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14288bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14298bccd85fSChristoph Lameter nodemask_t *nodes) 14308bccd85fSChristoph Lameter { 14318bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1432050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 14338bccd85fSChristoph Lameter 14348bccd85fSChristoph Lameter if (copy > nbytes) { 14358bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14368bccd85fSChristoph Lameter return -EINVAL; 14378bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14388bccd85fSChristoph Lameter return -EFAULT; 14398bccd85fSChristoph Lameter copy = nbytes; 14408bccd85fSChristoph Lameter } 14418bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14428bccd85fSChristoph Lameter } 14438bccd85fSChristoph Lameter 144495837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 144595837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 144695837924SFeng Tang { 144795837924SFeng Tang *flags = *mode & MPOL_MODE_FLAGS; 144895837924SFeng Tang *mode &= ~MPOL_MODE_FLAGS; 144995837924SFeng Tang if ((unsigned int)(*mode) >= MPOL_MAX) 145095837924SFeng Tang return -EINVAL; 145195837924SFeng Tang if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 145295837924SFeng Tang return -EINVAL; 145395837924SFeng Tang 145495837924SFeng Tang return 0; 145595837924SFeng Tang } 145695837924SFeng Tang 1457e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1458e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1459e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14608bccd85fSChristoph Lameter { 1461028fec41SDavid Rientjes unsigned short mode_flags; 146295837924SFeng Tang nodemask_t nodes; 146395837924SFeng Tang int lmode = mode; 146495837924SFeng Tang int err; 14658bccd85fSChristoph Lameter 1466057d3389SAndrey Konovalov start = untagged_addr(start); 146795837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 146895837924SFeng Tang if (err) 146995837924SFeng Tang return err; 147095837924SFeng Tang 14718bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14728bccd85fSChristoph Lameter if (err) 14738bccd85fSChristoph Lameter return err; 147495837924SFeng Tang 147595837924SFeng Tang return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 14768bccd85fSChristoph Lameter } 14778bccd85fSChristoph Lameter 1478e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1479e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1480e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1481e7dc9ad6SDominik Brodowski { 1482e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1483e7dc9ad6SDominik Brodowski } 1484e7dc9ad6SDominik Brodowski 14858bccd85fSChristoph Lameter /* Set the process memory policy */ 1486af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1487af03c4acSDominik Brodowski unsigned long maxnode) 14888bccd85fSChristoph Lameter { 148995837924SFeng Tang unsigned short mode_flags; 14908bccd85fSChristoph Lameter nodemask_t nodes; 149195837924SFeng Tang int lmode = mode; 149295837924SFeng Tang int err; 14938bccd85fSChristoph Lameter 149495837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 149595837924SFeng Tang if (err) 149695837924SFeng Tang return err; 149795837924SFeng Tang 14988bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14998bccd85fSChristoph Lameter if (err) 15008bccd85fSChristoph Lameter return err; 150195837924SFeng Tang 150295837924SFeng Tang return do_set_mempolicy(lmode, mode_flags, &nodes); 15038bccd85fSChristoph Lameter } 15048bccd85fSChristoph Lameter 1505af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1506af03c4acSDominik Brodowski unsigned long, maxnode) 1507af03c4acSDominik Brodowski { 1508af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1509af03c4acSDominik Brodowski } 1510af03c4acSDominik Brodowski 1511b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1512b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1513b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 151439743889SChristoph Lameter { 1515596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 151639743889SChristoph Lameter struct task_struct *task; 151739743889SChristoph Lameter nodemask_t task_nodes; 151839743889SChristoph Lameter int err; 1519596d7cfaSKOSAKI Motohiro nodemask_t *old; 1520596d7cfaSKOSAKI Motohiro nodemask_t *new; 1521596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 152239743889SChristoph Lameter 1523596d7cfaSKOSAKI Motohiro if (!scratch) 1524596d7cfaSKOSAKI Motohiro return -ENOMEM; 152539743889SChristoph Lameter 1526596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1527596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1528596d7cfaSKOSAKI Motohiro 1529596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 153039743889SChristoph Lameter if (err) 1531596d7cfaSKOSAKI Motohiro goto out; 1532596d7cfaSKOSAKI Motohiro 1533596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1534596d7cfaSKOSAKI Motohiro if (err) 1535596d7cfaSKOSAKI Motohiro goto out; 153639743889SChristoph Lameter 153739743889SChristoph Lameter /* Find the mm_struct */ 153855cfaa3cSZeng Zhaoming rcu_read_lock(); 1539228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 154039743889SChristoph Lameter if (!task) { 154155cfaa3cSZeng Zhaoming rcu_read_unlock(); 1542596d7cfaSKOSAKI Motohiro err = -ESRCH; 1543596d7cfaSKOSAKI Motohiro goto out; 154439743889SChristoph Lameter } 15453268c63eSChristoph Lameter get_task_struct(task); 154639743889SChristoph Lameter 1547596d7cfaSKOSAKI Motohiro err = -EINVAL; 154839743889SChristoph Lameter 154939743889SChristoph Lameter /* 155031367466SOtto Ebeling * Check if this process has the right to modify the specified process. 155131367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 155239743889SChristoph Lameter */ 155331367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1554c69e8d9cSDavid Howells rcu_read_unlock(); 155539743889SChristoph Lameter err = -EPERM; 15563268c63eSChristoph Lameter goto out_put; 155739743889SChristoph Lameter } 1558c69e8d9cSDavid Howells rcu_read_unlock(); 155939743889SChristoph Lameter 156039743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 156139743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1562596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 156339743889SChristoph Lameter err = -EPERM; 15643268c63eSChristoph Lameter goto out_put; 156539743889SChristoph Lameter } 156639743889SChristoph Lameter 15670486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 15680486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 15690486a38bSYisheng Xie if (nodes_empty(*new)) 15703268c63eSChristoph Lameter goto out_put; 15710486a38bSYisheng Xie 157286c3a764SDavid Quigley err = security_task_movememory(task); 157386c3a764SDavid Quigley if (err) 15743268c63eSChristoph Lameter goto out_put; 157586c3a764SDavid Quigley 15763268c63eSChristoph Lameter mm = get_task_mm(task); 15773268c63eSChristoph Lameter put_task_struct(task); 1578f2a9ef88SSasha Levin 1579f2a9ef88SSasha Levin if (!mm) { 1580f2a9ef88SSasha Levin err = -EINVAL; 1581f2a9ef88SSasha Levin goto out; 1582f2a9ef88SSasha Levin } 1583f2a9ef88SSasha Levin 1584596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 158574c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 15863268c63eSChristoph Lameter 158739743889SChristoph Lameter mmput(mm); 15883268c63eSChristoph Lameter out: 1589596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1590596d7cfaSKOSAKI Motohiro 159139743889SChristoph Lameter return err; 15923268c63eSChristoph Lameter 15933268c63eSChristoph Lameter out_put: 15943268c63eSChristoph Lameter put_task_struct(task); 15953268c63eSChristoph Lameter goto out; 15963268c63eSChristoph Lameter 159739743889SChristoph Lameter } 159839743889SChristoph Lameter 1599b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1600b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1601b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1602b6e9b0baSDominik Brodowski { 1603b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1604b6e9b0baSDominik Brodowski } 1605b6e9b0baSDominik Brodowski 160639743889SChristoph Lameter 16078bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1608af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1609af03c4acSDominik Brodowski unsigned long __user *nmask, 1610af03c4acSDominik Brodowski unsigned long maxnode, 1611af03c4acSDominik Brodowski unsigned long addr, 1612af03c4acSDominik Brodowski unsigned long flags) 16138bccd85fSChristoph Lameter { 1614dbcb0f19SAdrian Bunk int err; 16153f649ab7SKees Cook int pval; 16168bccd85fSChristoph Lameter nodemask_t nodes; 16178bccd85fSChristoph Lameter 1618050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 16198bccd85fSChristoph Lameter return -EINVAL; 16208bccd85fSChristoph Lameter 16214605f057SWenchao Hao addr = untagged_addr(addr); 16224605f057SWenchao Hao 16238bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 16248bccd85fSChristoph Lameter 16258bccd85fSChristoph Lameter if (err) 16268bccd85fSChristoph Lameter return err; 16278bccd85fSChristoph Lameter 16288bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 16298bccd85fSChristoph Lameter return -EFAULT; 16308bccd85fSChristoph Lameter 16318bccd85fSChristoph Lameter if (nmask) 16328bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 16338bccd85fSChristoph Lameter 16348bccd85fSChristoph Lameter return err; 16358bccd85fSChristoph Lameter } 16368bccd85fSChristoph Lameter 1637af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1638af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1639af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1640af03c4acSDominik Brodowski { 1641af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1642af03c4acSDominik Brodowski } 1643af03c4acSDominik Brodowski 16441da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 16451da177e4SLinus Torvalds 1646c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1647c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1648c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1649c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 16501da177e4SLinus Torvalds { 16511da177e4SLinus Torvalds long err; 16521da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16531da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16541da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16551da177e4SLinus Torvalds 1656050c17f2SRalph Campbell nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); 16571da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16581da177e4SLinus Torvalds 16591da177e4SLinus Torvalds if (nmask) 16601da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 16611da177e4SLinus Torvalds 1662af03c4acSDominik Brodowski err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 16631da177e4SLinus Torvalds 16641da177e4SLinus Torvalds if (!err && nmask) { 16652bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 16662bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 16672bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 16681da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 16691da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 16701da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 16711da177e4SLinus Torvalds } 16721da177e4SLinus Torvalds 16731da177e4SLinus Torvalds return err; 16741da177e4SLinus Torvalds } 16751da177e4SLinus Torvalds 1676c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1677c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 16781da177e4SLinus Torvalds { 16791da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16801da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16811da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16821da177e4SLinus Torvalds 16831da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16841da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16851da177e4SLinus Torvalds 16861da177e4SLinus Torvalds if (nmask) { 1687cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 16881da177e4SLinus Torvalds return -EFAULT; 1689cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1690cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1691cf01fb99SChris Salls return -EFAULT; 1692cf01fb99SChris Salls } 16931da177e4SLinus Torvalds 1694af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nm, nr_bits+1); 16951da177e4SLinus Torvalds } 16961da177e4SLinus Torvalds 1697c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1698c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1699c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 17001da177e4SLinus Torvalds { 17011da177e4SLinus Torvalds unsigned long __user *nm = NULL; 17021da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1703dfcd3c0dSAndi Kleen nodemask_t bm; 17041da177e4SLinus Torvalds 17051da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 17061da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 17071da177e4SLinus Torvalds 17081da177e4SLinus Torvalds if (nmask) { 1709cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 17101da177e4SLinus Torvalds return -EFAULT; 1711cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1712cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1713cf01fb99SChris Salls return -EFAULT; 1714cf01fb99SChris Salls } 17151da177e4SLinus Torvalds 1716e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); 17171da177e4SLinus Torvalds } 17181da177e4SLinus Torvalds 1719b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, 1720b6e9b0baSDominik Brodowski compat_ulong_t, maxnode, 1721b6e9b0baSDominik Brodowski const compat_ulong_t __user *, old_nodes, 1722b6e9b0baSDominik Brodowski const compat_ulong_t __user *, new_nodes) 1723b6e9b0baSDominik Brodowski { 1724b6e9b0baSDominik Brodowski unsigned long __user *old = NULL; 1725b6e9b0baSDominik Brodowski unsigned long __user *new = NULL; 1726b6e9b0baSDominik Brodowski nodemask_t tmp_mask; 1727b6e9b0baSDominik Brodowski unsigned long nr_bits; 1728b6e9b0baSDominik Brodowski unsigned long size; 1729b6e9b0baSDominik Brodowski 1730b6e9b0baSDominik Brodowski nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1731b6e9b0baSDominik Brodowski size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1732b6e9b0baSDominik Brodowski if (old_nodes) { 1733b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1734b6e9b0baSDominik Brodowski return -EFAULT; 1735b6e9b0baSDominik Brodowski old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1736b6e9b0baSDominik Brodowski if (new_nodes) 1737b6e9b0baSDominik Brodowski new = old + size / sizeof(unsigned long); 1738b6e9b0baSDominik Brodowski if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1739b6e9b0baSDominik Brodowski return -EFAULT; 1740b6e9b0baSDominik Brodowski } 1741b6e9b0baSDominik Brodowski if (new_nodes) { 1742b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1743b6e9b0baSDominik Brodowski return -EFAULT; 1744b6e9b0baSDominik Brodowski if (new == NULL) 1745b6e9b0baSDominik Brodowski new = compat_alloc_user_space(size); 1746b6e9b0baSDominik Brodowski if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1747b6e9b0baSDominik Brodowski return -EFAULT; 1748b6e9b0baSDominik Brodowski } 1749b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, nr_bits + 1, old, new); 1750b6e9b0baSDominik Brodowski } 1751b6e9b0baSDominik Brodowski 1752b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */ 17531da177e4SLinus Torvalds 175420ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 175520ca87f2SLi Xinhai { 175620ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 175720ca87f2SLi Xinhai return false; 175820ca87f2SLi Xinhai 175920ca87f2SLi Xinhai /* 176020ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 176120ca87f2SLi Xinhai * incurring periodic faults. 176220ca87f2SLi Xinhai */ 176320ca87f2SLi Xinhai if (vma_is_dax(vma)) 176420ca87f2SLi Xinhai return false; 176520ca87f2SLi Xinhai 176620ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 176720ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 176820ca87f2SLi Xinhai return false; 176920ca87f2SLi Xinhai 177020ca87f2SLi Xinhai /* 177120ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 177220ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 177320ca87f2SLi Xinhai * possible. 177420ca87f2SLi Xinhai */ 177520ca87f2SLi Xinhai if (vma->vm_file && 177620ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 177720ca87f2SLi Xinhai < policy_zone) 177820ca87f2SLi Xinhai return false; 177920ca87f2SLi Xinhai return true; 178020ca87f2SLi Xinhai } 178120ca87f2SLi Xinhai 178274d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 178374d2c3a0SOleg Nesterov unsigned long addr) 17841da177e4SLinus Torvalds { 17858d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17861da177e4SLinus Torvalds 17871da177e4SLinus Torvalds if (vma) { 1788480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17898d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 179000442ad0SMel Gorman } else if (vma->vm_policy) { 17911da177e4SLinus Torvalds pol = vma->vm_policy; 179200442ad0SMel Gorman 179300442ad0SMel Gorman /* 179400442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 179500442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 179600442ad0SMel Gorman * count on these policies which will be dropped by 179700442ad0SMel Gorman * mpol_cond_put() later 179800442ad0SMel Gorman */ 179900442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 180000442ad0SMel Gorman mpol_get(pol); 180100442ad0SMel Gorman } 18021da177e4SLinus Torvalds } 1803f15ca78eSOleg Nesterov 180474d2c3a0SOleg Nesterov return pol; 180574d2c3a0SOleg Nesterov } 180674d2c3a0SOleg Nesterov 180774d2c3a0SOleg Nesterov /* 1808dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 180974d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 181074d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 181174d2c3a0SOleg Nesterov * 181274d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1813dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 181474d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 181574d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 181674d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 181774d2c3a0SOleg Nesterov * extra reference for shared policies. 181874d2c3a0SOleg Nesterov */ 1819ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1820dd6eecb9SOleg Nesterov unsigned long addr) 182174d2c3a0SOleg Nesterov { 182274d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 182374d2c3a0SOleg Nesterov 18248d90274bSOleg Nesterov if (!pol) 1825dd6eecb9SOleg Nesterov pol = get_task_policy(current); 18268d90274bSOleg Nesterov 18271da177e4SLinus Torvalds return pol; 18281da177e4SLinus Torvalds } 18291da177e4SLinus Torvalds 18306b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1831fc314724SMel Gorman { 18326b6482bbSOleg Nesterov struct mempolicy *pol; 1833f15ca78eSOleg Nesterov 1834fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1835fc314724SMel Gorman bool ret = false; 1836fc314724SMel Gorman 1837fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1838fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1839fc314724SMel Gorman ret = true; 1840fc314724SMel Gorman mpol_cond_put(pol); 1841fc314724SMel Gorman 1842fc314724SMel Gorman return ret; 18438d90274bSOleg Nesterov } 18448d90274bSOleg Nesterov 1845fc314724SMel Gorman pol = vma->vm_policy; 18468d90274bSOleg Nesterov if (!pol) 18476b6482bbSOleg Nesterov pol = get_task_policy(current); 1848fc314724SMel Gorman 1849fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1850fc314724SMel Gorman } 1851fc314724SMel Gorman 1852d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1853d3eb1570SLai Jiangshan { 1854d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1855d3eb1570SLai Jiangshan 1856d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1857d3eb1570SLai Jiangshan 1858d3eb1570SLai Jiangshan /* 1859269fbe72SBen Widawsky * if policy->nodes has movable memory only, 1860d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1861d3eb1570SLai Jiangshan * 1862269fbe72SBen Widawsky * policy->nodes is intersect with node_states[N_MEMORY]. 1863f0953a1bSIngo Molnar * so if the following test fails, it implies 1864269fbe72SBen Widawsky * policy->nodes has movable memory only. 1865d3eb1570SLai Jiangshan */ 1866269fbe72SBen Widawsky if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1867d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1868d3eb1570SLai Jiangshan 1869d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1870d3eb1570SLai Jiangshan } 1871d3eb1570SLai Jiangshan 187252cd3b07SLee Schermerhorn /* 187352cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 187452cd3b07SLee Schermerhorn * page allocation 187552cd3b07SLee Schermerhorn */ 18768ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 187719770b32SMel Gorman { 187819770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 187945c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1880d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 1881269fbe72SBen Widawsky cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1882269fbe72SBen Widawsky return &policy->nodes; 188319770b32SMel Gorman 188419770b32SMel Gorman return NULL; 188519770b32SMel Gorman } 188619770b32SMel Gorman 188704ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */ 1888f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 18891da177e4SLinus Torvalds { 18907858d7bcSFeng Tang if (policy->mode == MPOL_PREFERRED) { 1891269fbe72SBen Widawsky nd = first_node(policy->nodes); 18927858d7bcSFeng Tang } else { 189319770b32SMel Gorman /* 18946d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18956d840958SMichal Hocko * because we might easily break the expectation to stay on the 18966d840958SMichal Hocko * requested node and not break the policy. 189719770b32SMel Gorman */ 18986d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 18991da177e4SLinus Torvalds } 19006d840958SMichal Hocko 190104ec6264SVlastimil Babka return nd; 19021da177e4SLinus Torvalds } 19031da177e4SLinus Torvalds 19041da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 19051da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 19061da177e4SLinus Torvalds { 190745816682SVlastimil Babka unsigned next; 19081da177e4SLinus Torvalds struct task_struct *me = current; 19091da177e4SLinus Torvalds 1910269fbe72SBen Widawsky next = next_node_in(me->il_prev, policy->nodes); 1911f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 191245816682SVlastimil Babka me->il_prev = next; 191345816682SVlastimil Babka return next; 19141da177e4SLinus Torvalds } 19151da177e4SLinus Torvalds 1916dc85da15SChristoph Lameter /* 1917dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1918dc85da15SChristoph Lameter * next slab entry. 1919dc85da15SChristoph Lameter */ 19202a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1921dc85da15SChristoph Lameter { 1922e7b691b0SAndi Kleen struct mempolicy *policy; 19232a389610SDavid Rientjes int node = numa_mem_id(); 1924e7b691b0SAndi Kleen 1925e7b691b0SAndi Kleen if (in_interrupt()) 19262a389610SDavid Rientjes return node; 1927e7b691b0SAndi Kleen 1928e7b691b0SAndi Kleen policy = current->mempolicy; 19297858d7bcSFeng Tang if (!policy) 19302a389610SDavid Rientjes return node; 1931765c4507SChristoph Lameter 1932bea904d5SLee Schermerhorn switch (policy->mode) { 1933bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1934269fbe72SBen Widawsky return first_node(policy->nodes); 1935bea904d5SLee Schermerhorn 1936dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1937dc85da15SChristoph Lameter return interleave_nodes(policy); 1938dc85da15SChristoph Lameter 1939dd1a239fSMel Gorman case MPOL_BIND: { 1940c33d6c06SMel Gorman struct zoneref *z; 1941c33d6c06SMel Gorman 1942dc85da15SChristoph Lameter /* 1943dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1944dc85da15SChristoph Lameter * first node. 1945dc85da15SChristoph Lameter */ 194619770b32SMel Gorman struct zonelist *zonelist; 194719770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1948c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1949c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1950269fbe72SBen Widawsky &policy->nodes); 1951c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1952dd1a239fSMel Gorman } 19537858d7bcSFeng Tang case MPOL_LOCAL: 19547858d7bcSFeng Tang return node; 1955dc85da15SChristoph Lameter 1956dc85da15SChristoph Lameter default: 1957bea904d5SLee Schermerhorn BUG(); 1958dc85da15SChristoph Lameter } 1959dc85da15SChristoph Lameter } 1960dc85da15SChristoph Lameter 1961fee83b3aSAndrew Morton /* 1962fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1963269fbe72SBen Widawsky * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1964fee83b3aSAndrew Morton * number of present nodes. 1965fee83b3aSAndrew Morton */ 196698c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19671da177e4SLinus Torvalds { 1968269fbe72SBen Widawsky unsigned nnodes = nodes_weight(pol->nodes); 1969f5b087b5SDavid Rientjes unsigned target; 1970fee83b3aSAndrew Morton int i; 1971fee83b3aSAndrew Morton int nid; 19721da177e4SLinus Torvalds 1973f5b087b5SDavid Rientjes if (!nnodes) 1974f5b087b5SDavid Rientjes return numa_node_id(); 1975fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1976269fbe72SBen Widawsky nid = first_node(pol->nodes); 1977fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1978269fbe72SBen Widawsky nid = next_node(nid, pol->nodes); 19791da177e4SLinus Torvalds return nid; 19801da177e4SLinus Torvalds } 19811da177e4SLinus Torvalds 19825da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19835da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19845da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19855da7ca86SChristoph Lameter { 19865da7ca86SChristoph Lameter if (vma) { 19875da7ca86SChristoph Lameter unsigned long off; 19885da7ca86SChristoph Lameter 19893b98b087SNishanth Aravamudan /* 19903b98b087SNishanth Aravamudan * for small pages, there is no difference between 19913b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 19923b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 19933b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 19943b98b087SNishanth Aravamudan * a useful offset. 19953b98b087SNishanth Aravamudan */ 19963b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 19973b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 19985da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 199998c70baaSLaurent Dufour return offset_il_node(pol, off); 20005da7ca86SChristoph Lameter } else 20015da7ca86SChristoph Lameter return interleave_nodes(pol); 20025da7ca86SChristoph Lameter } 20035da7ca86SChristoph Lameter 200400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 2005480eccf9SLee Schermerhorn /* 200604ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 2007b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 2008b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 2009b46e14acSFabian Frederick * @gfp_flags: for requested zone 2010b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2011b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 2012480eccf9SLee Schermerhorn * 201304ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 201452cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 201552cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 201652cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 2017c0ff7453SMiao Xie * 2018d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2019480eccf9SLee Schermerhorn */ 202004ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 202104ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20225da7ca86SChristoph Lameter { 202304ec6264SVlastimil Babka int nid; 20245da7ca86SChristoph Lameter 2025dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 202619770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 20275da7ca86SChristoph Lameter 202852cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 202904ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 203004ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 203152cd3b07SLee Schermerhorn } else { 203204ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 203352cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 2034269fbe72SBen Widawsky *nodemask = &(*mpol)->nodes; 2035480eccf9SLee Schermerhorn } 203604ec6264SVlastimil Babka return nid; 20375da7ca86SChristoph Lameter } 203806808b08SLee Schermerhorn 203906808b08SLee Schermerhorn /* 204006808b08SLee Schermerhorn * init_nodemask_of_mempolicy 204106808b08SLee Schermerhorn * 204206808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 204306808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 204406808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 204506808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 204606808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 204706808b08SLee Schermerhorn * of non-default mempolicy. 204806808b08SLee Schermerhorn * 204906808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 205006808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 205106808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 205206808b08SLee Schermerhorn * 205306808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 205406808b08SLee Schermerhorn */ 205506808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 205606808b08SLee Schermerhorn { 205706808b08SLee Schermerhorn struct mempolicy *mempolicy; 205806808b08SLee Schermerhorn 205906808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 206006808b08SLee Schermerhorn return false; 206106808b08SLee Schermerhorn 2062c0ff7453SMiao Xie task_lock(current); 206306808b08SLee Schermerhorn mempolicy = current->mempolicy; 206406808b08SLee Schermerhorn switch (mempolicy->mode) { 206506808b08SLee Schermerhorn case MPOL_PREFERRED: 206606808b08SLee Schermerhorn case MPOL_BIND: 206706808b08SLee Schermerhorn case MPOL_INTERLEAVE: 2068269fbe72SBen Widawsky *mask = mempolicy->nodes; 206906808b08SLee Schermerhorn break; 207006808b08SLee Schermerhorn 20717858d7bcSFeng Tang case MPOL_LOCAL: 2072269fbe72SBen Widawsky init_nodemask_of_node(mask, numa_node_id()); 20737858d7bcSFeng Tang break; 20747858d7bcSFeng Tang 207506808b08SLee Schermerhorn default: 207606808b08SLee Schermerhorn BUG(); 207706808b08SLee Schermerhorn } 2078c0ff7453SMiao Xie task_unlock(current); 207906808b08SLee Schermerhorn 208006808b08SLee Schermerhorn return true; 208106808b08SLee Schermerhorn } 208200ac59adSChen, Kenneth W #endif 20835da7ca86SChristoph Lameter 20846f48d0ebSDavid Rientjes /* 2085b26e517aSFeng Tang * mempolicy_in_oom_domain 20866f48d0ebSDavid Rientjes * 2087b26e517aSFeng Tang * If tsk's mempolicy is "bind", check for intersection between mask and 2088b26e517aSFeng Tang * the policy nodemask. Otherwise, return true for all other policies 2089b26e517aSFeng Tang * including "interleave", as a tsk with "interleave" policy may have 2090b26e517aSFeng Tang * memory allocated from all nodes in system. 20916f48d0ebSDavid Rientjes * 20926f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 20936f48d0ebSDavid Rientjes */ 2094b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk, 20956f48d0ebSDavid Rientjes const nodemask_t *mask) 20966f48d0ebSDavid Rientjes { 20976f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 20986f48d0ebSDavid Rientjes bool ret = true; 20996f48d0ebSDavid Rientjes 21006f48d0ebSDavid Rientjes if (!mask) 21016f48d0ebSDavid Rientjes return ret; 2102b26e517aSFeng Tang 21036f48d0ebSDavid Rientjes task_lock(tsk); 21046f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 2105b26e517aSFeng Tang if (mempolicy && mempolicy->mode == MPOL_BIND) 2106269fbe72SBen Widawsky ret = nodes_intersects(mempolicy->nodes, *mask); 21076f48d0ebSDavid Rientjes task_unlock(tsk); 2108b26e517aSFeng Tang 21096f48d0ebSDavid Rientjes return ret; 21106f48d0ebSDavid Rientjes } 21116f48d0ebSDavid Rientjes 21121da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 21131da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2114662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2115662f3a0bSAndi Kleen unsigned nid) 21161da177e4SLinus Torvalds { 21171da177e4SLinus Torvalds struct page *page; 21181da177e4SLinus Torvalds 211984172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, nid, NULL); 21204518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21214518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21224518085eSKemi Wang return page; 2123de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2124de55c8b2SAndrey Ryabinin preempt_disable(); 2125f19298b9SMel Gorman __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2126de55c8b2SAndrey Ryabinin preempt_enable(); 2127de55c8b2SAndrey Ryabinin } 21281da177e4SLinus Torvalds return page; 21291da177e4SLinus Torvalds } 21301da177e4SLinus Torvalds 21311da177e4SLinus Torvalds /** 21320bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 2133eb350739SMatthew Wilcox (Oracle) * @gfp: GFP flags. 21340bbbc0b3SAndrea Arcangeli * @order: Order of the GFP allocation. 21351da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 2136eb350739SMatthew Wilcox (Oracle) * @addr: Virtual address of the allocation. Must be inside @vma. 2137be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 2138eb350739SMatthew Wilcox (Oracle) * @hugepage: For hugepages try only the preferred node if possible. 21391da177e4SLinus Torvalds * 2140eb350739SMatthew Wilcox (Oracle) * Allocate a page for a specific address in @vma, using the appropriate 2141eb350739SMatthew Wilcox (Oracle) * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2142eb350739SMatthew Wilcox (Oracle) * of the mm_struct of the VMA to prevent it from going away. Should be 2143eb350739SMatthew Wilcox (Oracle) * used for all allocations for pages that will be mapped into user space. 2144eb350739SMatthew Wilcox (Oracle) * 2145eb350739SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 21461da177e4SLinus Torvalds */ 2147eb350739SMatthew Wilcox (Oracle) struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 214819deb769SDavid Rientjes unsigned long addr, int node, bool hugepage) 21491da177e4SLinus Torvalds { 2150cc9a6c87SMel Gorman struct mempolicy *pol; 2151c0ff7453SMiao Xie struct page *page; 215204ec6264SVlastimil Babka int preferred_nid; 2153be97a41bSVlastimil Babka nodemask_t *nmask; 21541da177e4SLinus Torvalds 2155dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2156cc9a6c87SMel Gorman 2157be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 21581da177e4SLinus Torvalds unsigned nid; 21595da7ca86SChristoph Lameter 21608eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 216152cd3b07SLee Schermerhorn mpol_cond_put(pol); 21620bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2163be97a41bSVlastimil Babka goto out; 21641da177e4SLinus Torvalds } 21651da177e4SLinus Torvalds 216619deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 216719deb769SDavid Rientjes int hpage_node = node; 216819deb769SDavid Rientjes 216919deb769SDavid Rientjes /* 217019deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 217119deb769SDavid Rientjes * allows the current node (or other explicitly preferred 217219deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 217319deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 217419deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 217519deb769SDavid Rientjes * 217619deb769SDavid Rientjes * If the policy is interleave, or does not allow the current 217719deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 217819deb769SDavid Rientjes */ 21797858d7bcSFeng Tang if (pol->mode == MPOL_PREFERRED) 2180269fbe72SBen Widawsky hpage_node = first_node(pol->nodes); 218119deb769SDavid Rientjes 218219deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 218319deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 218419deb769SDavid Rientjes mpol_cond_put(pol); 2185cc638f32SVlastimil Babka /* 2186cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2187cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2188cc638f32SVlastimil Babka */ 218919deb769SDavid Rientjes page = __alloc_pages_node(hpage_node, 2190cc638f32SVlastimil Babka gfp | __GFP_THISNODE | __GFP_NORETRY, order); 219176e654ccSDavid Rientjes 219276e654ccSDavid Rientjes /* 219376e654ccSDavid Rientjes * If hugepage allocations are configured to always 219476e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 219576e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2196cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 219776e654ccSDavid Rientjes */ 219876e654ccSDavid Rientjes if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 219976e654ccSDavid Rientjes page = __alloc_pages_node(hpage_node, 2200cc638f32SVlastimil Babka gfp, order); 220176e654ccSDavid Rientjes 220219deb769SDavid Rientjes goto out; 220319deb769SDavid Rientjes } 220419deb769SDavid Rientjes } 220519deb769SDavid Rientjes 2206077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 220704ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 220884172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, preferred_nid, nmask); 2209d51e9894SVlastimil Babka mpol_cond_put(pol); 2210be97a41bSVlastimil Babka out: 2211077fcf11SAneesh Kumar K.V return page; 2212077fcf11SAneesh Kumar K.V } 221369262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma); 2214077fcf11SAneesh Kumar K.V 22151da177e4SLinus Torvalds /** 2216d7f946d0SMatthew Wilcox (Oracle) * alloc_pages - Allocate pages. 22176421ec76SMatthew Wilcox (Oracle) * @gfp: GFP flags. 22186421ec76SMatthew Wilcox (Oracle) * @order: Power of two of number of pages to allocate. 22191da177e4SLinus Torvalds * 22206421ec76SMatthew Wilcox (Oracle) * Allocate 1 << @order contiguous pages. The physical address of the 22216421ec76SMatthew Wilcox (Oracle) * first page is naturally aligned (eg an order-3 allocation will be aligned 22226421ec76SMatthew Wilcox (Oracle) * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 22236421ec76SMatthew Wilcox (Oracle) * process is honoured when in process context. 22241da177e4SLinus Torvalds * 22256421ec76SMatthew Wilcox (Oracle) * Context: Can be called from any context, providing the appropriate GFP 22266421ec76SMatthew Wilcox (Oracle) * flags are used. 22276421ec76SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 22281da177e4SLinus Torvalds */ 2229d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order) 22301da177e4SLinus Torvalds { 22318d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2232c0ff7453SMiao Xie struct page *page; 22331da177e4SLinus Torvalds 22348d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22358d90274bSOleg Nesterov pol = get_task_policy(current); 223652cd3b07SLee Schermerhorn 223752cd3b07SLee Schermerhorn /* 223852cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 223952cd3b07SLee Schermerhorn * nor system default_policy 224052cd3b07SLee Schermerhorn */ 224145c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2242c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2243c0ff7453SMiao Xie else 224484172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, 224504ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 22465c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2247cc9a6c87SMel Gorman 2248c0ff7453SMiao Xie return page; 22491da177e4SLinus Torvalds } 2250d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages); 22511da177e4SLinus Torvalds 2252ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2253ef0855d3SOleg Nesterov { 2254ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2255ef0855d3SOleg Nesterov 2256ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2257ef0855d3SOleg Nesterov return PTR_ERR(pol); 2258ef0855d3SOleg Nesterov dst->vm_policy = pol; 2259ef0855d3SOleg Nesterov return 0; 2260ef0855d3SOleg Nesterov } 2261ef0855d3SOleg Nesterov 22624225399aSPaul Jackson /* 2263846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 22644225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 22654225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 22664225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 22674225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2268708c1bbcSMiao Xie * 2269708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2270708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 22714225399aSPaul Jackson */ 22724225399aSPaul Jackson 2273846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2274846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 22751da177e4SLinus Torvalds { 22761da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 22771da177e4SLinus Torvalds 22781da177e4SLinus Torvalds if (!new) 22791da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2280708c1bbcSMiao Xie 2281708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2282708c1bbcSMiao Xie if (old == current->mempolicy) { 2283708c1bbcSMiao Xie task_lock(current); 2284708c1bbcSMiao Xie *new = *old; 2285708c1bbcSMiao Xie task_unlock(current); 2286708c1bbcSMiao Xie } else 2287708c1bbcSMiao Xie *new = *old; 2288708c1bbcSMiao Xie 22894225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 22904225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2291213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 22924225399aSPaul Jackson } 22931da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 22941da177e4SLinus Torvalds return new; 22951da177e4SLinus Torvalds } 22961da177e4SLinus Torvalds 22971da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2298fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 22991da177e4SLinus Torvalds { 23001da177e4SLinus Torvalds if (!a || !b) 2301fcfb4dccSKOSAKI Motohiro return false; 230245c4745aSLee Schermerhorn if (a->mode != b->mode) 2303fcfb4dccSKOSAKI Motohiro return false; 230419800502SBob Liu if (a->flags != b->flags) 2305fcfb4dccSKOSAKI Motohiro return false; 230619800502SBob Liu if (mpol_store_user_nodemask(a)) 230719800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2308fcfb4dccSKOSAKI Motohiro return false; 230919800502SBob Liu 231045c4745aSLee Schermerhorn switch (a->mode) { 231119770b32SMel Gorman case MPOL_BIND: 23121da177e4SLinus Torvalds case MPOL_INTERLEAVE: 23131da177e4SLinus Torvalds case MPOL_PREFERRED: 2314269fbe72SBen Widawsky return !!nodes_equal(a->nodes, b->nodes); 23157858d7bcSFeng Tang case MPOL_LOCAL: 23167858d7bcSFeng Tang return true; 23171da177e4SLinus Torvalds default: 23181da177e4SLinus Torvalds BUG(); 2319fcfb4dccSKOSAKI Motohiro return false; 23201da177e4SLinus Torvalds } 23211da177e4SLinus Torvalds } 23221da177e4SLinus Torvalds 23231da177e4SLinus Torvalds /* 23241da177e4SLinus Torvalds * Shared memory backing store policy support. 23251da177e4SLinus Torvalds * 23261da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 23271da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 23284a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 23291da177e4SLinus Torvalds * for any accesses to the tree. 23301da177e4SLinus Torvalds */ 23311da177e4SLinus Torvalds 23324a8c7bb5SNathan Zimmer /* 23334a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 23344a8c7bb5SNathan Zimmer * reading or for writing 23354a8c7bb5SNathan Zimmer */ 23361da177e4SLinus Torvalds static struct sp_node * 23371da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 23381da177e4SLinus Torvalds { 23391da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 23401da177e4SLinus Torvalds 23411da177e4SLinus Torvalds while (n) { 23421da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 23431da177e4SLinus Torvalds 23441da177e4SLinus Torvalds if (start >= p->end) 23451da177e4SLinus Torvalds n = n->rb_right; 23461da177e4SLinus Torvalds else if (end <= p->start) 23471da177e4SLinus Torvalds n = n->rb_left; 23481da177e4SLinus Torvalds else 23491da177e4SLinus Torvalds break; 23501da177e4SLinus Torvalds } 23511da177e4SLinus Torvalds if (!n) 23521da177e4SLinus Torvalds return NULL; 23531da177e4SLinus Torvalds for (;;) { 23541da177e4SLinus Torvalds struct sp_node *w = NULL; 23551da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 23561da177e4SLinus Torvalds if (!prev) 23571da177e4SLinus Torvalds break; 23581da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 23591da177e4SLinus Torvalds if (w->end <= start) 23601da177e4SLinus Torvalds break; 23611da177e4SLinus Torvalds n = prev; 23621da177e4SLinus Torvalds } 23631da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 23641da177e4SLinus Torvalds } 23651da177e4SLinus Torvalds 23664a8c7bb5SNathan Zimmer /* 23674a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 23684a8c7bb5SNathan Zimmer * writing. 23694a8c7bb5SNathan Zimmer */ 23701da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 23711da177e4SLinus Torvalds { 23721da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 23731da177e4SLinus Torvalds struct rb_node *parent = NULL; 23741da177e4SLinus Torvalds struct sp_node *nd; 23751da177e4SLinus Torvalds 23761da177e4SLinus Torvalds while (*p) { 23771da177e4SLinus Torvalds parent = *p; 23781da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 23791da177e4SLinus Torvalds if (new->start < nd->start) 23801da177e4SLinus Torvalds p = &(*p)->rb_left; 23811da177e4SLinus Torvalds else if (new->end > nd->end) 23821da177e4SLinus Torvalds p = &(*p)->rb_right; 23831da177e4SLinus Torvalds else 23841da177e4SLinus Torvalds BUG(); 23851da177e4SLinus Torvalds } 23861da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 23871da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2388140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 238945c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 23901da177e4SLinus Torvalds } 23911da177e4SLinus Torvalds 23921da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 23931da177e4SLinus Torvalds struct mempolicy * 23941da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 23951da177e4SLinus Torvalds { 23961da177e4SLinus Torvalds struct mempolicy *pol = NULL; 23971da177e4SLinus Torvalds struct sp_node *sn; 23981da177e4SLinus Torvalds 23991da177e4SLinus Torvalds if (!sp->root.rb_node) 24001da177e4SLinus Torvalds return NULL; 24014a8c7bb5SNathan Zimmer read_lock(&sp->lock); 24021da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 24031da177e4SLinus Torvalds if (sn) { 24041da177e4SLinus Torvalds mpol_get(sn->policy); 24051da177e4SLinus Torvalds pol = sn->policy; 24061da177e4SLinus Torvalds } 24074a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 24081da177e4SLinus Torvalds return pol; 24091da177e4SLinus Torvalds } 24101da177e4SLinus Torvalds 241163f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 241263f74ca2SKOSAKI Motohiro { 241363f74ca2SKOSAKI Motohiro mpol_put(n->policy); 241463f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 241563f74ca2SKOSAKI Motohiro } 241663f74ca2SKOSAKI Motohiro 2417771fb4d8SLee Schermerhorn /** 2418771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2419771fb4d8SLee Schermerhorn * 2420b46e14acSFabian Frederick * @page: page to be checked 2421b46e14acSFabian Frederick * @vma: vm area where page mapped 2422b46e14acSFabian Frederick * @addr: virtual address where page mapped 2423771fb4d8SLee Schermerhorn * 2424771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 24255f076944SMatthew Wilcox (Oracle) * node id. Policy determination "mimics" alloc_page_vma(). 2426771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 24275f076944SMatthew Wilcox (Oracle) * 2428*062db293SBaolin Wang * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2429*062db293SBaolin Wang * policy, or a suitable node ID to allocate a replacement page from. 2430771fb4d8SLee Schermerhorn */ 2431771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2432771fb4d8SLee Schermerhorn { 2433771fb4d8SLee Schermerhorn struct mempolicy *pol; 2434c33d6c06SMel Gorman struct zoneref *z; 2435771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2436771fb4d8SLee Schermerhorn unsigned long pgoff; 243790572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 243890572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 243998fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2440*062db293SBaolin Wang int ret = NUMA_NO_NODE; 2441771fb4d8SLee Schermerhorn 2442dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2443771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2444771fb4d8SLee Schermerhorn goto out; 2445771fb4d8SLee Schermerhorn 2446771fb4d8SLee Schermerhorn switch (pol->mode) { 2447771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2448771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2449771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 245098c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2451771fb4d8SLee Schermerhorn break; 2452771fb4d8SLee Schermerhorn 2453771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2454269fbe72SBen Widawsky polnid = first_node(pol->nodes); 2455771fb4d8SLee Schermerhorn break; 2456771fb4d8SLee Schermerhorn 24577858d7bcSFeng Tang case MPOL_LOCAL: 24587858d7bcSFeng Tang polnid = numa_node_id(); 24597858d7bcSFeng Tang break; 24607858d7bcSFeng Tang 2461771fb4d8SLee Schermerhorn case MPOL_BIND: 2462bda420b9SHuang Ying /* Optimize placement among multiple nodes via NUMA balancing */ 2463bda420b9SHuang Ying if (pol->flags & MPOL_F_MORON) { 2464269fbe72SBen Widawsky if (node_isset(thisnid, pol->nodes)) 2465bda420b9SHuang Ying break; 2466bda420b9SHuang Ying goto out; 2467bda420b9SHuang Ying } 2468c33d6c06SMel Gorman 2469771fb4d8SLee Schermerhorn /* 2470771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2471771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2472771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2473771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2474771fb4d8SLee Schermerhorn */ 2475269fbe72SBen Widawsky if (node_isset(curnid, pol->nodes)) 2476771fb4d8SLee Schermerhorn goto out; 2477c33d6c06SMel Gorman z = first_zones_zonelist( 2478771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2479771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2480269fbe72SBen Widawsky &pol->nodes); 2481c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2482771fb4d8SLee Schermerhorn break; 2483771fb4d8SLee Schermerhorn 2484771fb4d8SLee Schermerhorn default: 2485771fb4d8SLee Schermerhorn BUG(); 2486771fb4d8SLee Schermerhorn } 24875606e387SMel Gorman 24885606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2489e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 249090572890SPeter Zijlstra polnid = thisnid; 24915606e387SMel Gorman 249210f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2493de1c9ce6SRik van Riel goto out; 2494de1c9ce6SRik van Riel } 2495e42c8ff2SMel Gorman 2496771fb4d8SLee Schermerhorn if (curnid != polnid) 2497771fb4d8SLee Schermerhorn ret = polnid; 2498771fb4d8SLee Schermerhorn out: 2499771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2500771fb4d8SLee Schermerhorn 2501771fb4d8SLee Schermerhorn return ret; 2502771fb4d8SLee Schermerhorn } 2503771fb4d8SLee Schermerhorn 2504c11600e4SDavid Rientjes /* 2505c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2506c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2507c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2508c11600e4SDavid Rientjes * policy. 2509c11600e4SDavid Rientjes */ 2510c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2511c11600e4SDavid Rientjes { 2512c11600e4SDavid Rientjes struct mempolicy *pol; 2513c11600e4SDavid Rientjes 2514c11600e4SDavid Rientjes task_lock(task); 2515c11600e4SDavid Rientjes pol = task->mempolicy; 2516c11600e4SDavid Rientjes task->mempolicy = NULL; 2517c11600e4SDavid Rientjes task_unlock(task); 2518c11600e4SDavid Rientjes mpol_put(pol); 2519c11600e4SDavid Rientjes } 2520c11600e4SDavid Rientjes 25211da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 25221da177e4SLinus Torvalds { 2523140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 25241da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 252563f74ca2SKOSAKI Motohiro sp_free(n); 25261da177e4SLinus Torvalds } 25271da177e4SLinus Torvalds 252842288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 252942288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 253042288fe3SMel Gorman { 253142288fe3SMel Gorman node->start = start; 253242288fe3SMel Gorman node->end = end; 253342288fe3SMel Gorman node->policy = pol; 253442288fe3SMel Gorman } 253542288fe3SMel Gorman 2536dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2537dbcb0f19SAdrian Bunk struct mempolicy *pol) 25381da177e4SLinus Torvalds { 2539869833f2SKOSAKI Motohiro struct sp_node *n; 2540869833f2SKOSAKI Motohiro struct mempolicy *newpol; 25411da177e4SLinus Torvalds 2542869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 25431da177e4SLinus Torvalds if (!n) 25441da177e4SLinus Torvalds return NULL; 2545869833f2SKOSAKI Motohiro 2546869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2547869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2548869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2549869833f2SKOSAKI Motohiro return NULL; 2550869833f2SKOSAKI Motohiro } 2551869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 255242288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2553869833f2SKOSAKI Motohiro 25541da177e4SLinus Torvalds return n; 25551da177e4SLinus Torvalds } 25561da177e4SLinus Torvalds 25571da177e4SLinus Torvalds /* Replace a policy range. */ 25581da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 25591da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 25601da177e4SLinus Torvalds { 2561b22d127aSMel Gorman struct sp_node *n; 256242288fe3SMel Gorman struct sp_node *n_new = NULL; 256342288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2564b22d127aSMel Gorman int ret = 0; 25651da177e4SLinus Torvalds 256642288fe3SMel Gorman restart: 25674a8c7bb5SNathan Zimmer write_lock(&sp->lock); 25681da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 25691da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 25701da177e4SLinus Torvalds while (n && n->start < end) { 25711da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 25721da177e4SLinus Torvalds if (n->start >= start) { 25731da177e4SLinus Torvalds if (n->end <= end) 25741da177e4SLinus Torvalds sp_delete(sp, n); 25751da177e4SLinus Torvalds else 25761da177e4SLinus Torvalds n->start = end; 25771da177e4SLinus Torvalds } else { 25781da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 25791da177e4SLinus Torvalds if (n->end > end) { 258042288fe3SMel Gorman if (!n_new) 258142288fe3SMel Gorman goto alloc_new; 258242288fe3SMel Gorman 258342288fe3SMel Gorman *mpol_new = *n->policy; 258442288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 25857880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 25861da177e4SLinus Torvalds n->end = start; 25875ca39575SHillf Danton sp_insert(sp, n_new); 258842288fe3SMel Gorman n_new = NULL; 258942288fe3SMel Gorman mpol_new = NULL; 25901da177e4SLinus Torvalds break; 25911da177e4SLinus Torvalds } else 25921da177e4SLinus Torvalds n->end = start; 25931da177e4SLinus Torvalds } 25941da177e4SLinus Torvalds if (!next) 25951da177e4SLinus Torvalds break; 25961da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25971da177e4SLinus Torvalds } 25981da177e4SLinus Torvalds if (new) 25991da177e4SLinus Torvalds sp_insert(sp, new); 26004a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 260142288fe3SMel Gorman ret = 0; 260242288fe3SMel Gorman 260342288fe3SMel Gorman err_out: 260442288fe3SMel Gorman if (mpol_new) 260542288fe3SMel Gorman mpol_put(mpol_new); 260642288fe3SMel Gorman if (n_new) 260742288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 260842288fe3SMel Gorman 2609b22d127aSMel Gorman return ret; 261042288fe3SMel Gorman 261142288fe3SMel Gorman alloc_new: 26124a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 261342288fe3SMel Gorman ret = -ENOMEM; 261442288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 261542288fe3SMel Gorman if (!n_new) 261642288fe3SMel Gorman goto err_out; 261742288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 261842288fe3SMel Gorman if (!mpol_new) 261942288fe3SMel Gorman goto err_out; 262042288fe3SMel Gorman goto restart; 26211da177e4SLinus Torvalds } 26221da177e4SLinus Torvalds 262371fe804bSLee Schermerhorn /** 262471fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 262571fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 262671fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 262771fe804bSLee Schermerhorn * 262871fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 262971fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 263071fe804bSLee Schermerhorn * This must be released on exit. 26314bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 263271fe804bSLee Schermerhorn */ 263371fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 26347339ff83SRobin Holt { 263558568d2aSMiao Xie int ret; 263658568d2aSMiao Xie 263771fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 26384a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 26397339ff83SRobin Holt 264071fe804bSLee Schermerhorn if (mpol) { 26417339ff83SRobin Holt struct vm_area_struct pvma; 264271fe804bSLee Schermerhorn struct mempolicy *new; 26434bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 26447339ff83SRobin Holt 26454bfc4495SKAMEZAWA Hiroyuki if (!scratch) 26465c0c1654SLee Schermerhorn goto put_mpol; 264771fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 264871fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 264915d77835SLee Schermerhorn if (IS_ERR(new)) 26500cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 265158568d2aSMiao Xie 265258568d2aSMiao Xie task_lock(current); 26534bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 265458568d2aSMiao Xie task_unlock(current); 265515d77835SLee Schermerhorn if (ret) 26565c0c1654SLee Schermerhorn goto put_new; 265771fe804bSLee Schermerhorn 265871fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 26592c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 266071fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 266171fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 266215d77835SLee Schermerhorn 26635c0c1654SLee Schermerhorn put_new: 266471fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 26650cae3457SDan Carpenter free_scratch: 26664bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 26675c0c1654SLee Schermerhorn put_mpol: 26685c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 26697339ff83SRobin Holt } 26707339ff83SRobin Holt } 26717339ff83SRobin Holt 26721da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 26731da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 26741da177e4SLinus Torvalds { 26751da177e4SLinus Torvalds int err; 26761da177e4SLinus Torvalds struct sp_node *new = NULL; 26771da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 26781da177e4SLinus Torvalds 2679028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 26801da177e4SLinus Torvalds vma->vm_pgoff, 268145c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2682028fec41SDavid Rientjes npol ? npol->flags : -1, 2683269fbe72SBen Widawsky npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 26841da177e4SLinus Torvalds 26851da177e4SLinus Torvalds if (npol) { 26861da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 26871da177e4SLinus Torvalds if (!new) 26881da177e4SLinus Torvalds return -ENOMEM; 26891da177e4SLinus Torvalds } 26901da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 26911da177e4SLinus Torvalds if (err && new) 269263f74ca2SKOSAKI Motohiro sp_free(new); 26931da177e4SLinus Torvalds return err; 26941da177e4SLinus Torvalds } 26951da177e4SLinus Torvalds 26961da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 26971da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 26981da177e4SLinus Torvalds { 26991da177e4SLinus Torvalds struct sp_node *n; 27001da177e4SLinus Torvalds struct rb_node *next; 27011da177e4SLinus Torvalds 27021da177e4SLinus Torvalds if (!p->root.rb_node) 27031da177e4SLinus Torvalds return; 27044a8c7bb5SNathan Zimmer write_lock(&p->lock); 27051da177e4SLinus Torvalds next = rb_first(&p->root); 27061da177e4SLinus Torvalds while (next) { 27071da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27081da177e4SLinus Torvalds next = rb_next(&n->nd); 270963f74ca2SKOSAKI Motohiro sp_delete(p, n); 27101da177e4SLinus Torvalds } 27114a8c7bb5SNathan Zimmer write_unlock(&p->lock); 27121da177e4SLinus Torvalds } 27131da177e4SLinus Torvalds 27141a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2715c297663cSMel Gorman static int __initdata numabalancing_override; 27161a687c2eSMel Gorman 27171a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 27181a687c2eSMel Gorman { 27191a687c2eSMel Gorman bool numabalancing_default = false; 27201a687c2eSMel Gorman 27211a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 27221a687c2eSMel Gorman numabalancing_default = true; 27231a687c2eSMel Gorman 2724c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2725c297663cSMel Gorman if (numabalancing_override) 2726c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2727c297663cSMel Gorman 2728b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2729756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2730c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 27311a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 27321a687c2eSMel Gorman } 27331a687c2eSMel Gorman } 27341a687c2eSMel Gorman 27351a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 27361a687c2eSMel Gorman { 27371a687c2eSMel Gorman int ret = 0; 27381a687c2eSMel Gorman if (!str) 27391a687c2eSMel Gorman goto out; 27401a687c2eSMel Gorman 27411a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2742c297663cSMel Gorman numabalancing_override = 1; 27431a687c2eSMel Gorman ret = 1; 27441a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2745c297663cSMel Gorman numabalancing_override = -1; 27461a687c2eSMel Gorman ret = 1; 27471a687c2eSMel Gorman } 27481a687c2eSMel Gorman out: 27491a687c2eSMel Gorman if (!ret) 27504a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 27511a687c2eSMel Gorman 27521a687c2eSMel Gorman return ret; 27531a687c2eSMel Gorman } 27541a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 27551a687c2eSMel Gorman #else 27561a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 27571a687c2eSMel Gorman { 27581a687c2eSMel Gorman } 27591a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 27601a687c2eSMel Gorman 27611da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 27621da177e4SLinus Torvalds void __init numa_policy_init(void) 27631da177e4SLinus Torvalds { 2764b71636e2SPaul Mundt nodemask_t interleave_nodes; 2765b71636e2SPaul Mundt unsigned long largest = 0; 2766b71636e2SPaul Mundt int nid, prefer = 0; 2767b71636e2SPaul Mundt 27681da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 27691da177e4SLinus Torvalds sizeof(struct mempolicy), 277020c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27711da177e4SLinus Torvalds 27721da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 27731da177e4SLinus Torvalds sizeof(struct sp_node), 277420c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27751da177e4SLinus Torvalds 27765606e387SMel Gorman for_each_node(nid) { 27775606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 27785606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 27795606e387SMel Gorman .mode = MPOL_PREFERRED, 27805606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 2781269fbe72SBen Widawsky .nodes = nodemask_of_node(nid), 27825606e387SMel Gorman }; 27835606e387SMel Gorman } 27845606e387SMel Gorman 2785b71636e2SPaul Mundt /* 2786b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2787b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2788b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2789b71636e2SPaul Mundt */ 2790b71636e2SPaul Mundt nodes_clear(interleave_nodes); 279101f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2792b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 27931da177e4SLinus Torvalds 2794b71636e2SPaul Mundt /* Preserve the largest node */ 2795b71636e2SPaul Mundt if (largest < total_pages) { 2796b71636e2SPaul Mundt largest = total_pages; 2797b71636e2SPaul Mundt prefer = nid; 2798b71636e2SPaul Mundt } 2799b71636e2SPaul Mundt 2800b71636e2SPaul Mundt /* Interleave this node? */ 2801b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2802b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2803b71636e2SPaul Mundt } 2804b71636e2SPaul Mundt 2805b71636e2SPaul Mundt /* All too small, use the largest */ 2806b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2807b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2808b71636e2SPaul Mundt 2809028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2810b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 28111a687c2eSMel Gorman 28121a687c2eSMel Gorman check_numabalancing_enable(); 28131da177e4SLinus Torvalds } 28141da177e4SLinus Torvalds 28158bccd85fSChristoph Lameter /* Reset policy of current process to default */ 28161da177e4SLinus Torvalds void numa_default_policy(void) 28171da177e4SLinus Torvalds { 2818028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 28191da177e4SLinus Torvalds } 282068860ec1SPaul Jackson 28214225399aSPaul Jackson /* 2822095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2823095f1fc4SLee Schermerhorn */ 2824095f1fc4SLee Schermerhorn 2825345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2826345ace9cSLee Schermerhorn { 2827345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2828345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2829345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2830345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2831d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2832345ace9cSLee Schermerhorn }; 28331a75a6c8SChristoph Lameter 2834095f1fc4SLee Schermerhorn 2835095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2836095f1fc4SLee Schermerhorn /** 2837f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2838095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 283971fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2840095f1fc4SLee Schermerhorn * 2841095f1fc4SLee Schermerhorn * Format of input: 2842095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2843095f1fc4SLee Schermerhorn * 284471fe804bSLee Schermerhorn * On success, returns 0, else 1 2845095f1fc4SLee Schermerhorn */ 2846a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2847095f1fc4SLee Schermerhorn { 284871fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2849f2a07f40SHugh Dickins unsigned short mode_flags; 285071fe804bSLee Schermerhorn nodemask_t nodes; 2851095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2852095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2853dedf2c73Szhong jiang int err = 1, mode; 2854095f1fc4SLee Schermerhorn 2855c7a91bc7SDan Carpenter if (flags) 2856c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 2857c7a91bc7SDan Carpenter 2858095f1fc4SLee Schermerhorn if (nodelist) { 2859095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2860095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 286171fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2862095f1fc4SLee Schermerhorn goto out; 286301f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2864095f1fc4SLee Schermerhorn goto out; 286571fe804bSLee Schermerhorn } else 286671fe804bSLee Schermerhorn nodes_clear(nodes); 286771fe804bSLee Schermerhorn 2868dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 2869dedf2c73Szhong jiang if (mode < 0) 2870095f1fc4SLee Schermerhorn goto out; 2871095f1fc4SLee Schermerhorn 287271fe804bSLee Schermerhorn switch (mode) { 2873095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 287471fe804bSLee Schermerhorn /* 2875aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 2876aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 2877aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 287871fe804bSLee Schermerhorn */ 2879095f1fc4SLee Schermerhorn if (nodelist) { 2880095f1fc4SLee Schermerhorn char *rest = nodelist; 2881095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2882095f1fc4SLee Schermerhorn rest++; 2883926f2ae0SKOSAKI Motohiro if (*rest) 2884926f2ae0SKOSAKI Motohiro goto out; 2885aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 2886aa9f7d51SRandy Dunlap goto out; 2887095f1fc4SLee Schermerhorn } 2888095f1fc4SLee Schermerhorn break; 2889095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2890095f1fc4SLee Schermerhorn /* 2891095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2892095f1fc4SLee Schermerhorn */ 2893095f1fc4SLee Schermerhorn if (!nodelist) 289401f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 28953f226aa1SLee Schermerhorn break; 289671fe804bSLee Schermerhorn case MPOL_LOCAL: 28973f226aa1SLee Schermerhorn /* 289871fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 28993f226aa1SLee Schermerhorn */ 290071fe804bSLee Schermerhorn if (nodelist) 29013f226aa1SLee Schermerhorn goto out; 29023f226aa1SLee Schermerhorn break; 2903413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2904413b43deSRavikiran G Thirumalai /* 2905413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2906413b43deSRavikiran G Thirumalai */ 2907413b43deSRavikiran G Thirumalai if (!nodelist) 2908413b43deSRavikiran G Thirumalai err = 0; 2909413b43deSRavikiran G Thirumalai goto out; 2910d69b2e63SKOSAKI Motohiro case MPOL_BIND: 291171fe804bSLee Schermerhorn /* 2912d69b2e63SKOSAKI Motohiro * Insist on a nodelist 291371fe804bSLee Schermerhorn */ 2914d69b2e63SKOSAKI Motohiro if (!nodelist) 2915d69b2e63SKOSAKI Motohiro goto out; 2916095f1fc4SLee Schermerhorn } 2917095f1fc4SLee Schermerhorn 291871fe804bSLee Schermerhorn mode_flags = 0; 2919095f1fc4SLee Schermerhorn if (flags) { 2920095f1fc4SLee Schermerhorn /* 2921095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2922095f1fc4SLee Schermerhorn * mode flags. 2923095f1fc4SLee Schermerhorn */ 2924095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 292571fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2926095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 292771fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2928095f1fc4SLee Schermerhorn else 2929926f2ae0SKOSAKI Motohiro goto out; 2930095f1fc4SLee Schermerhorn } 293171fe804bSLee Schermerhorn 293271fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 293371fe804bSLee Schermerhorn if (IS_ERR(new)) 2934926f2ae0SKOSAKI Motohiro goto out; 2935926f2ae0SKOSAKI Motohiro 2936f2a07f40SHugh Dickins /* 2937f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2938f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2939f2a07f40SHugh Dickins */ 2940269fbe72SBen Widawsky if (mode != MPOL_PREFERRED) { 2941269fbe72SBen Widawsky new->nodes = nodes; 2942269fbe72SBen Widawsky } else if (nodelist) { 2943269fbe72SBen Widawsky nodes_clear(new->nodes); 2944269fbe72SBen Widawsky node_set(first_node(nodes), new->nodes); 2945269fbe72SBen Widawsky } else { 29467858d7bcSFeng Tang new->mode = MPOL_LOCAL; 2947269fbe72SBen Widawsky } 2948f2a07f40SHugh Dickins 2949f2a07f40SHugh Dickins /* 2950f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2951f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2952f2a07f40SHugh Dickins */ 2953e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2954f2a07f40SHugh Dickins 2955926f2ae0SKOSAKI Motohiro err = 0; 295671fe804bSLee Schermerhorn 2957095f1fc4SLee Schermerhorn out: 2958095f1fc4SLee Schermerhorn /* Restore string for error message */ 2959095f1fc4SLee Schermerhorn if (nodelist) 2960095f1fc4SLee Schermerhorn *--nodelist = ':'; 2961095f1fc4SLee Schermerhorn if (flags) 2962095f1fc4SLee Schermerhorn *--flags = '='; 296371fe804bSLee Schermerhorn if (!err) 296471fe804bSLee Schermerhorn *mpol = new; 2965095f1fc4SLee Schermerhorn return err; 2966095f1fc4SLee Schermerhorn } 2967095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2968095f1fc4SLee Schermerhorn 296971fe804bSLee Schermerhorn /** 297071fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 297171fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 297271fe804bSLee Schermerhorn * @maxlen: length of @buffer 297371fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 297471fe804bSLee Schermerhorn * 2975948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2976948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2977948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 29781a75a6c8SChristoph Lameter */ 2979948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 29801a75a6c8SChristoph Lameter { 29811a75a6c8SChristoph Lameter char *p = buffer; 2982948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2983948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2984948927eeSDavid Rientjes unsigned short flags = 0; 29851a75a6c8SChristoph Lameter 29868790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2987bea904d5SLee Schermerhorn mode = pol->mode; 2988948927eeSDavid Rientjes flags = pol->flags; 2989948927eeSDavid Rientjes } 2990bea904d5SLee Schermerhorn 29911a75a6c8SChristoph Lameter switch (mode) { 29921a75a6c8SChristoph Lameter case MPOL_DEFAULT: 29937858d7bcSFeng Tang case MPOL_LOCAL: 29941a75a6c8SChristoph Lameter break; 29951a75a6c8SChristoph Lameter case MPOL_PREFERRED: 29961a75a6c8SChristoph Lameter case MPOL_BIND: 29971a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 2998269fbe72SBen Widawsky nodes = pol->nodes; 29991a75a6c8SChristoph Lameter break; 30001a75a6c8SChristoph Lameter default: 3001948927eeSDavid Rientjes WARN_ON_ONCE(1); 3002948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 3003948927eeSDavid Rientjes return; 30041a75a6c8SChristoph Lameter } 30051a75a6c8SChristoph Lameter 3006b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 30071a75a6c8SChristoph Lameter 3008fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3009948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3010f5b087b5SDavid Rientjes 30112291990aSLee Schermerhorn /* 30122291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 30132291990aSLee Schermerhorn */ 3014f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 30152291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 30162291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 30172291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3018f5b087b5SDavid Rientjes } 3019f5b087b5SDavid Rientjes 30209e763e0fSTejun Heo if (!nodes_empty(nodes)) 30219e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 30229e763e0fSTejun Heo nodemask_pr_args(&nodes)); 30231a75a6c8SChristoph Lameter } 302420b51af1SHuang Ying 302520b51af1SHuang Ying bool numa_demotion_enabled = false; 302620b51af1SHuang Ying 302720b51af1SHuang Ying #ifdef CONFIG_SYSFS 302820b51af1SHuang Ying static ssize_t numa_demotion_enabled_show(struct kobject *kobj, 302920b51af1SHuang Ying struct kobj_attribute *attr, char *buf) 303020b51af1SHuang Ying { 303120b51af1SHuang Ying return sysfs_emit(buf, "%s\n", 303220b51af1SHuang Ying numa_demotion_enabled? "true" : "false"); 303320b51af1SHuang Ying } 303420b51af1SHuang Ying 303520b51af1SHuang Ying static ssize_t numa_demotion_enabled_store(struct kobject *kobj, 303620b51af1SHuang Ying struct kobj_attribute *attr, 303720b51af1SHuang Ying const char *buf, size_t count) 303820b51af1SHuang Ying { 303920b51af1SHuang Ying if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 304020b51af1SHuang Ying numa_demotion_enabled = true; 304120b51af1SHuang Ying else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 304220b51af1SHuang Ying numa_demotion_enabled = false; 304320b51af1SHuang Ying else 304420b51af1SHuang Ying return -EINVAL; 304520b51af1SHuang Ying 304620b51af1SHuang Ying return count; 304720b51af1SHuang Ying } 304820b51af1SHuang Ying 304920b51af1SHuang Ying static struct kobj_attribute numa_demotion_enabled_attr = 305020b51af1SHuang Ying __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show, 305120b51af1SHuang Ying numa_demotion_enabled_store); 305220b51af1SHuang Ying 305320b51af1SHuang Ying static struct attribute *numa_attrs[] = { 305420b51af1SHuang Ying &numa_demotion_enabled_attr.attr, 305520b51af1SHuang Ying NULL, 305620b51af1SHuang Ying }; 305720b51af1SHuang Ying 305820b51af1SHuang Ying static const struct attribute_group numa_attr_group = { 305920b51af1SHuang Ying .attrs = numa_attrs, 306020b51af1SHuang Ying }; 306120b51af1SHuang Ying 306220b51af1SHuang Ying static int __init numa_init_sysfs(void) 306320b51af1SHuang Ying { 306420b51af1SHuang Ying int err; 306520b51af1SHuang Ying struct kobject *numa_kobj; 306620b51af1SHuang Ying 306720b51af1SHuang Ying numa_kobj = kobject_create_and_add("numa", mm_kobj); 306820b51af1SHuang Ying if (!numa_kobj) { 306920b51af1SHuang Ying pr_err("failed to create numa kobject\n"); 307020b51af1SHuang Ying return -ENOMEM; 307120b51af1SHuang Ying } 307220b51af1SHuang Ying err = sysfs_create_group(numa_kobj, &numa_attr_group); 307320b51af1SHuang Ying if (err) { 307420b51af1SHuang Ying pr_err("failed to register numa group\n"); 307520b51af1SHuang Ying goto delete_obj; 307620b51af1SHuang Ying } 307720b51af1SHuang Ying return 0; 307820b51af1SHuang Ying 307920b51af1SHuang Ying delete_obj: 308020b51af1SHuang Ying kobject_put(numa_kobj); 308120b51af1SHuang Ying return err; 308220b51af1SHuang Ying } 308320b51af1SHuang Ying subsys_initcall(numa_init_sysfs); 308420b51af1SHuang Ying #endif 3085