146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 71a520110eSChristoph Hellwig #include <linux/pagewalk.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 766e84f315SIngo Molnar #include <linux/sched/mm.h> 776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 78f719ff9bSIngo Molnar #include <linux/sched/task.h> 791da177e4SLinus Torvalds #include <linux/nodemask.h> 801da177e4SLinus Torvalds #include <linux/cpuset.h> 811da177e4SLinus Torvalds #include <linux/slab.h> 821da177e4SLinus Torvalds #include <linux/string.h> 83b95f1b31SPaul Gortmaker #include <linux/export.h> 84b488893aSPavel Emelyanov #include <linux/nsproxy.h> 851da177e4SLinus Torvalds #include <linux/interrupt.h> 861da177e4SLinus Torvalds #include <linux/init.h> 871da177e4SLinus Torvalds #include <linux/compat.h> 8831367466SOtto Ebeling #include <linux/ptrace.h> 89dc9aa5b9SChristoph Lameter #include <linux/swap.h> 901a75a6c8SChristoph Lameter #include <linux/seq_file.h> 911a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 92b20a3503SChristoph Lameter #include <linux/migrate.h> 9362b61f61SHugh Dickins #include <linux/ksm.h> 9495a402c3SChristoph Lameter #include <linux/rmap.h> 9586c3a764SDavid Quigley #include <linux/security.h> 96dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 97095f1fc4SLee Schermerhorn #include <linux/ctype.h> 986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 100b1de0d13SMitchel Humpherys #include <linux/printk.h> 101c8633798SNaoya Horiguchi #include <linux/swapops.h> 102dc9aa5b9SChristoph Lameter 1031da177e4SLinus Torvalds #include <asm/tlbflush.h> 1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1051da177e4SLinus Torvalds 10662695a84SNick Piggin #include "internal.h" 10762695a84SNick Piggin 10838e35860SChristoph Lameter /* Internal flags */ 109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 111dc9aa5b9SChristoph Lameter 112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1141da177e4SLinus Torvalds 1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1161da177e4SLinus Torvalds policied. */ 1176267276fSChristoph Lameter enum zone_type policy_zone = 0; 1181da177e4SLinus Torvalds 119bea904d5SLee Schermerhorn /* 120bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 121bea904d5SLee Schermerhorn */ 122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1231da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 1247858d7bcSFeng Tang .mode = MPOL_LOCAL, 1251da177e4SLinus Torvalds }; 1261da177e4SLinus Torvalds 1275606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1285606e387SMel Gorman 129b2ca916cSDan Williams /** 130b2ca916cSDan Williams * numa_map_to_online_node - Find closest online node 131f6e92f40SKrzysztof Kozlowski * @node: Node id to start the search 132b2ca916cSDan Williams * 133b2ca916cSDan Williams * Lookup the next closest node by distance if @nid is not online. 134b2ca916cSDan Williams */ 135b2ca916cSDan Williams int numa_map_to_online_node(int node) 136b2ca916cSDan Williams { 1374fcbe96eSDan Williams int min_dist = INT_MAX, dist, n, min_node; 138b2ca916cSDan Williams 1394fcbe96eSDan Williams if (node == NUMA_NO_NODE || node_online(node)) 1404fcbe96eSDan Williams return node; 141b2ca916cSDan Williams 142b2ca916cSDan Williams min_node = node; 143b2ca916cSDan Williams for_each_online_node(n) { 144b2ca916cSDan Williams dist = node_distance(node, n); 145b2ca916cSDan Williams if (dist < min_dist) { 146b2ca916cSDan Williams min_dist = dist; 147b2ca916cSDan Williams min_node = n; 148b2ca916cSDan Williams } 149b2ca916cSDan Williams } 150b2ca916cSDan Williams 151b2ca916cSDan Williams return min_node; 152b2ca916cSDan Williams } 153b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node); 154b2ca916cSDan Williams 15574d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1565606e387SMel Gorman { 1575606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 158f15ca78eSOleg Nesterov int node; 1595606e387SMel Gorman 160f15ca78eSOleg Nesterov if (pol) 161f15ca78eSOleg Nesterov return pol; 1625606e387SMel Gorman 163f15ca78eSOleg Nesterov node = numa_node_id(); 1641da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1651da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 166f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 167f15ca78eSOleg Nesterov if (pol->mode) 168f15ca78eSOleg Nesterov return pol; 1691da6f0e1SJianguo Wu } 1705606e387SMel Gorman 171f15ca78eSOleg Nesterov return &default_policy; 1725606e387SMel Gorman } 1735606e387SMel Gorman 17437012946SDavid Rientjes static const struct mempolicy_operations { 17537012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 176213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 17737012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 17837012946SDavid Rientjes 179f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 180f5b087b5SDavid Rientjes { 1816d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1824c50bc01SDavid Rientjes } 1834c50bc01SDavid Rientjes 1844c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1854c50bc01SDavid Rientjes const nodemask_t *rel) 1864c50bc01SDavid Rientjes { 1874c50bc01SDavid Rientjes nodemask_t tmp; 1884c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1894c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 190f5b087b5SDavid Rientjes } 191f5b087b5SDavid Rientjes 19237012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 19337012946SDavid Rientjes { 19437012946SDavid Rientjes if (nodes_empty(*nodes)) 19537012946SDavid Rientjes return -EINVAL; 19637012946SDavid Rientjes pol->v.nodes = *nodes; 19737012946SDavid Rientjes return 0; 19837012946SDavid Rientjes } 19937012946SDavid Rientjes 20037012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 20137012946SDavid Rientjes { 2027858d7bcSFeng Tang if (nodes_empty(*nodes)) 2037858d7bcSFeng Tang return -EINVAL; 20437012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 20537012946SDavid Rientjes return 0; 20637012946SDavid Rientjes } 20737012946SDavid Rientjes 20837012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 20937012946SDavid Rientjes { 210859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 21137012946SDavid Rientjes return -EINVAL; 21237012946SDavid Rientjes pol->v.nodes = *nodes; 21337012946SDavid Rientjes return 0; 21437012946SDavid Rientjes } 21537012946SDavid Rientjes 21658568d2aSMiao Xie /* 21758568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 21858568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 2197858d7bcSFeng Tang * parameter with respect to the policy mode and flags. 22058568d2aSMiao Xie * 22158568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 222c1e8d7c6SMichel Lespinasse * and mempolicy. May also be called holding the mmap_lock for write. 22358568d2aSMiao Xie */ 2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2254bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22658568d2aSMiao Xie { 22758568d2aSMiao Xie int ret; 22858568d2aSMiao Xie 2297858d7bcSFeng Tang /* 2307858d7bcSFeng Tang * Default (pol==NULL) resp. local memory policies are not a 2317858d7bcSFeng Tang * subject of any remapping. They also do not need any special 2327858d7bcSFeng Tang * constructor. 2337858d7bcSFeng Tang */ 2347858d7bcSFeng Tang if (!pol || pol->mode == MPOL_LOCAL) 23558568d2aSMiao Xie return 0; 2367858d7bcSFeng Tang 23701f13bd6SLai Jiangshan /* Check N_MEMORY */ 2384bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 23901f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 24058568d2aSMiao Xie 24158568d2aSMiao Xie VM_BUG_ON(!nodes); 2427858d7bcSFeng Tang 24358568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2444bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 24558568d2aSMiao Xie else 2464bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2474bfc4495SKAMEZAWA Hiroyuki 24858568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 24958568d2aSMiao Xie pol->w.user_nodemask = *nodes; 25058568d2aSMiao Xie else 2517858d7bcSFeng Tang pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 25258568d2aSMiao Xie 2534bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 25458568d2aSMiao Xie return ret; 25558568d2aSMiao Xie } 25658568d2aSMiao Xie 25758568d2aSMiao Xie /* 25858568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25958568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 26058568d2aSMiao Xie */ 261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 262028fec41SDavid Rientjes nodemask_t *nodes) 2631da177e4SLinus Torvalds { 2641da177e4SLinus Torvalds struct mempolicy *policy; 2651da177e4SLinus Torvalds 266028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26700ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 268140d5a49SPaul Mundt 2693e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2703e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 27137012946SDavid Rientjes return ERR_PTR(-EINVAL); 272d3a71033SLee Schermerhorn return NULL; 27337012946SDavid Rientjes } 2743e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2753e1f0645SDavid Rientjes 2763e1f0645SDavid Rientjes /* 2773e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2783e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2793e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2803e1f0645SDavid Rientjes */ 2813e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2823e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2833e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2843e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2853e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2867858d7bcSFeng Tang 2877858d7bcSFeng Tang mode = MPOL_LOCAL; 2883e1f0645SDavid Rientjes } 289479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2908d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2918d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2928d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 293479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 2943e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2953e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2961da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2971da177e4SLinus Torvalds if (!policy) 2981da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2991da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 30045c4745aSLee Schermerhorn policy->mode = mode; 30137012946SDavid Rientjes policy->flags = flags; 3023e1f0645SDavid Rientjes 30337012946SDavid Rientjes return policy; 30437012946SDavid Rientjes } 30537012946SDavid Rientjes 30652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 30852cd3b07SLee Schermerhorn { 30952cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 31052cd3b07SLee Schermerhorn return; 31152cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 31252cd3b07SLee Schermerhorn } 31352cd3b07SLee Schermerhorn 314213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 31537012946SDavid Rientjes { 31637012946SDavid Rientjes } 31737012946SDavid Rientjes 318213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 3191d0d2680SDavid Rientjes { 3201d0d2680SDavid Rientjes nodemask_t tmp; 3211d0d2680SDavid Rientjes 32237012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32337012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32437012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32537012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3261d0d2680SDavid Rientjes else { 327213980c0SVlastimil Babka nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, 328213980c0SVlastimil Babka *nodes); 32929b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3301d0d2680SDavid Rientjes } 33137012946SDavid Rientjes 332708c1bbcSMiao Xie if (nodes_empty(tmp)) 333708c1bbcSMiao Xie tmp = *nodes; 334708c1bbcSMiao Xie 3351d0d2680SDavid Rientjes pol->v.nodes = tmp; 33637012946SDavid Rientjes } 33737012946SDavid Rientjes 33837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 339213980c0SVlastimil Babka const nodemask_t *nodes) 34037012946SDavid Rientjes { 34137012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3421d0d2680SDavid Rientjes } 34337012946SDavid Rientjes 344708c1bbcSMiao Xie /* 345708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 346708c1bbcSMiao Xie * 347c1e8d7c6SMichel Lespinasse * Per-vma policies are protected by mmap_lock. Allocations using per-task 348213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 349213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 350708c1bbcSMiao Xie */ 351213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35237012946SDavid Rientjes { 35337012946SDavid Rientjes if (!pol) 35437012946SDavid Rientjes return; 3557858d7bcSFeng Tang if (!mpol_store_user_nodemask(pol) && 35637012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35737012946SDavid Rientjes return; 358708c1bbcSMiao Xie 359213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3601d0d2680SDavid Rientjes } 3611d0d2680SDavid Rientjes 3621d0d2680SDavid Rientjes /* 3631d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3641d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36558568d2aSMiao Xie * 36658568d2aSMiao Xie * Called with task's alloc_lock held. 3671d0d2680SDavid Rientjes */ 3681d0d2680SDavid Rientjes 369213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3701d0d2680SDavid Rientjes { 371213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3721d0d2680SDavid Rientjes } 3731d0d2680SDavid Rientjes 3741d0d2680SDavid Rientjes /* 3751d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3761d0d2680SDavid Rientjes * 377c1e8d7c6SMichel Lespinasse * Call holding a reference to mm. Takes mm->mmap_lock during call. 3781d0d2680SDavid Rientjes */ 3791d0d2680SDavid Rientjes 3801d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3811d0d2680SDavid Rientjes { 3821d0d2680SDavid Rientjes struct vm_area_struct *vma; 3831d0d2680SDavid Rientjes 384d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 3851d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 386213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 387d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3881d0d2680SDavid Rientjes } 3891d0d2680SDavid Rientjes 39037012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 39137012946SDavid Rientjes [MPOL_DEFAULT] = { 39237012946SDavid Rientjes .rebind = mpol_rebind_default, 39337012946SDavid Rientjes }, 39437012946SDavid Rientjes [MPOL_INTERLEAVE] = { 39537012946SDavid Rientjes .create = mpol_new_interleave, 39637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39737012946SDavid Rientjes }, 39837012946SDavid Rientjes [MPOL_PREFERRED] = { 39937012946SDavid Rientjes .create = mpol_new_preferred, 40037012946SDavid Rientjes .rebind = mpol_rebind_preferred, 40137012946SDavid Rientjes }, 40237012946SDavid Rientjes [MPOL_BIND] = { 40337012946SDavid Rientjes .create = mpol_new_bind, 40437012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40537012946SDavid Rientjes }, 4067858d7bcSFeng Tang [MPOL_LOCAL] = { 4077858d7bcSFeng Tang .rebind = mpol_rebind_default, 4087858d7bcSFeng Tang }, 40937012946SDavid Rientjes }; 41037012946SDavid Rientjes 411a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 412fc301289SChristoph Lameter unsigned long flags); 4131a75a6c8SChristoph Lameter 4146f4576e3SNaoya Horiguchi struct queue_pages { 4156f4576e3SNaoya Horiguchi struct list_head *pagelist; 4166f4576e3SNaoya Horiguchi unsigned long flags; 4176f4576e3SNaoya Horiguchi nodemask_t *nmask; 418f18da660SLi Xinhai unsigned long start; 419f18da660SLi Xinhai unsigned long end; 420f18da660SLi Xinhai struct vm_area_struct *first; 4216f4576e3SNaoya Horiguchi }; 4226f4576e3SNaoya Horiguchi 42398094945SNaoya Horiguchi /* 42488aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 42588aaa2a1SNaoya Horiguchi * 42688aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 42788aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 42888aaa2a1SNaoya Horiguchi */ 42988aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 43088aaa2a1SNaoya Horiguchi struct queue_pages *qp) 43188aaa2a1SNaoya Horiguchi { 43288aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 43388aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 43488aaa2a1SNaoya Horiguchi 43588aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 43688aaa2a1SNaoya Horiguchi } 43788aaa2a1SNaoya Horiguchi 438a7f40cfeSYang Shi /* 439d8835445SYang Shi * queue_pages_pmd() has four possible return values: 440*e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 441*e5947d23SYang Shi * special page is met, i.e. huge zero page. 442d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 443d8835445SYang Shi * specified. 444d8835445SYang Shi * 2 - THP was split. 445d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 446d8835445SYang Shi * existing page was already on a node that does not follow the 447d8835445SYang Shi * policy. 448a7f40cfeSYang Shi */ 449c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 450c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 451959a7e13SJules Irenge __releases(ptl) 452c8633798SNaoya Horiguchi { 453c8633798SNaoya Horiguchi int ret = 0; 454c8633798SNaoya Horiguchi struct page *page; 455c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 456c8633798SNaoya Horiguchi unsigned long flags; 457c8633798SNaoya Horiguchi 458c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 459a7f40cfeSYang Shi ret = -EIO; 460c8633798SNaoya Horiguchi goto unlock; 461c8633798SNaoya Horiguchi } 462c8633798SNaoya Horiguchi page = pmd_page(*pmd); 463c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 464c8633798SNaoya Horiguchi spin_unlock(ptl); 465*e5947d23SYang Shi walk->action = ACTION_CONTINUE; 466c8633798SNaoya Horiguchi goto out; 467c8633798SNaoya Horiguchi } 468d8835445SYang Shi if (!queue_pages_required(page, qp)) 469c8633798SNaoya Horiguchi goto unlock; 470c8633798SNaoya Horiguchi 471c8633798SNaoya Horiguchi flags = qp->flags; 472c8633798SNaoya Horiguchi /* go to thp migration */ 473a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 474a53190a4SYang Shi if (!vma_migratable(walk->vma) || 475a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 476d8835445SYang Shi ret = 1; 477a7f40cfeSYang Shi goto unlock; 478a7f40cfeSYang Shi } 479a7f40cfeSYang Shi } else 480a7f40cfeSYang Shi ret = -EIO; 481c8633798SNaoya Horiguchi unlock: 482c8633798SNaoya Horiguchi spin_unlock(ptl); 483c8633798SNaoya Horiguchi out: 484c8633798SNaoya Horiguchi return ret; 485c8633798SNaoya Horiguchi } 486c8633798SNaoya Horiguchi 48788aaa2a1SNaoya Horiguchi /* 48898094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 48998094945SNaoya Horiguchi * and move them to the pagelist if they do. 490d8835445SYang Shi * 491d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 492*e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 493*e5947d23SYang Shi * special page is met, i.e. zero page. 494d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 495d8835445SYang Shi * specified. 496d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 497d8835445SYang Shi * on a node that does not follow the policy. 49898094945SNaoya Horiguchi */ 4996f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 5006f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 5011da177e4SLinus Torvalds { 5026f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5036f4576e3SNaoya Horiguchi struct page *page; 5046f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5056f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 506c8633798SNaoya Horiguchi int ret; 507d8835445SYang Shi bool has_unmovable = false; 5083f088420SShijie Luo pte_t *pte, *mapped_pte; 509705e87c0SHugh Dickins spinlock_t *ptl; 510941150a3SHugh Dickins 511c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 512c8633798SNaoya Horiguchi if (ptl) { 513c8633798SNaoya Horiguchi ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 514d8835445SYang Shi if (ret != 2) 515a7f40cfeSYang Shi return ret; 516248db92dSKirill A. Shutemov } 517d8835445SYang Shi /* THP was split, fall through to pte walk */ 51891612e0dSHugh Dickins 519337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 520337d9abfSNaoya Horiguchi return 0; 52194723aafSMichal Hocko 5223f088420SShijie Luo mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5236f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 52491612e0dSHugh Dickins if (!pte_present(*pte)) 52591612e0dSHugh Dickins continue; 5266aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5276aab341eSLinus Torvalds if (!page) 52891612e0dSHugh Dickins continue; 529053837fcSNick Piggin /* 53062b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 53162b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 532053837fcSNick Piggin */ 533b79bc0a0SHugh Dickins if (PageReserved(page)) 534f4598c8bSChristoph Lameter continue; 53588aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 53638e35860SChristoph Lameter continue; 537a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 538d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 539d8835445SYang Shi if (!vma_migratable(vma)) { 540d8835445SYang Shi has_unmovable = true; 541a7f40cfeSYang Shi break; 542d8835445SYang Shi } 543a53190a4SYang Shi 544a53190a4SYang Shi /* 545a53190a4SYang Shi * Do not abort immediately since there may be 546a53190a4SYang Shi * temporary off LRU pages in the range. Still 547a53190a4SYang Shi * need migrate other LRU pages. 548a53190a4SYang Shi */ 549a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 550a53190a4SYang Shi has_unmovable = true; 551a7f40cfeSYang Shi } else 552a7f40cfeSYang Shi break; 5536f4576e3SNaoya Horiguchi } 5543f088420SShijie Luo pte_unmap_unlock(mapped_pte, ptl); 5556f4576e3SNaoya Horiguchi cond_resched(); 556d8835445SYang Shi 557d8835445SYang Shi if (has_unmovable) 558d8835445SYang Shi return 1; 559d8835445SYang Shi 560a7f40cfeSYang Shi return addr != end ? -EIO : 0; 56191612e0dSHugh Dickins } 56291612e0dSHugh Dickins 5636f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5646f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5656f4576e3SNaoya Horiguchi struct mm_walk *walk) 566e2d8cf40SNaoya Horiguchi { 567dcf17635SLi Xinhai int ret = 0; 568e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5696f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 570dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 571e2d8cf40SNaoya Horiguchi struct page *page; 572cb900f41SKirill A. Shutemov spinlock_t *ptl; 573d4c54919SNaoya Horiguchi pte_t entry; 574e2d8cf40SNaoya Horiguchi 5756f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5766f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 577d4c54919SNaoya Horiguchi if (!pte_present(entry)) 578d4c54919SNaoya Horiguchi goto unlock; 579d4c54919SNaoya Horiguchi page = pte_page(entry); 58088aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 581e2d8cf40SNaoya Horiguchi goto unlock; 582dcf17635SLi Xinhai 583dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 584dcf17635SLi Xinhai /* 585dcf17635SLi Xinhai * STRICT alone means only detecting misplaced page and no 586dcf17635SLi Xinhai * need to further check other vma. 587dcf17635SLi Xinhai */ 588dcf17635SLi Xinhai ret = -EIO; 589dcf17635SLi Xinhai goto unlock; 590dcf17635SLi Xinhai } 591dcf17635SLi Xinhai 592dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 593dcf17635SLi Xinhai /* 594dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 595dcf17635SLi Xinhai * stopped walking current vma. 596dcf17635SLi Xinhai * Detecting misplaced page but allow migrating pages which 597dcf17635SLi Xinhai * have been queued. 598dcf17635SLi Xinhai */ 599dcf17635SLi Xinhai ret = 1; 600dcf17635SLi Xinhai goto unlock; 601dcf17635SLi Xinhai } 602dcf17635SLi Xinhai 603e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 604e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 605dcf17635SLi Xinhai (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { 606dcf17635SLi Xinhai if (!isolate_huge_page(page, qp->pagelist) && 607dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 608dcf17635SLi Xinhai /* 609dcf17635SLi Xinhai * Failed to isolate page but allow migrating pages 610dcf17635SLi Xinhai * which have been queued. 611dcf17635SLi Xinhai */ 612dcf17635SLi Xinhai ret = 1; 613dcf17635SLi Xinhai } 614e2d8cf40SNaoya Horiguchi unlock: 615cb900f41SKirill A. Shutemov spin_unlock(ptl); 616e2d8cf40SNaoya Horiguchi #else 617e2d8cf40SNaoya Horiguchi BUG(); 618e2d8cf40SNaoya Horiguchi #endif 619dcf17635SLi Xinhai return ret; 6201da177e4SLinus Torvalds } 6211da177e4SLinus Torvalds 6225877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 623b24f53a0SLee Schermerhorn /* 6244b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6254b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6264b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6274b10e7d5SMel Gorman * 6284b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6294b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6304b10e7d5SMel Gorman * changes to the core. 631b24f53a0SLee Schermerhorn */ 6324b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6334b10e7d5SMel Gorman unsigned long addr, unsigned long end) 634b24f53a0SLee Schermerhorn { 6354b10e7d5SMel Gorman int nr_updated; 636b24f53a0SLee Schermerhorn 63758705444SPeter Xu nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA); 63803c5a6e1SMel Gorman if (nr_updated) 63903c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 640b24f53a0SLee Schermerhorn 6414b10e7d5SMel Gorman return nr_updated; 642b24f53a0SLee Schermerhorn } 643b24f53a0SLee Schermerhorn #else 644b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 645b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 646b24f53a0SLee Schermerhorn { 647b24f53a0SLee Schermerhorn return 0; 648b24f53a0SLee Schermerhorn } 6495877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 650b24f53a0SLee Schermerhorn 6516f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6526f4576e3SNaoya Horiguchi struct mm_walk *walk) 6531da177e4SLinus Torvalds { 6546f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6556f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6565b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6576f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 658dc9aa5b9SChristoph Lameter 659a18b3ac2SLi Xinhai /* range check first */ 660ce33135cSMiaohe Lin VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 661f18da660SLi Xinhai 662f18da660SLi Xinhai if (!qp->first) { 663f18da660SLi Xinhai qp->first = vma; 664f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 665f18da660SLi Xinhai (qp->start < vma->vm_start)) 666f18da660SLi Xinhai /* hole at head side of range */ 667a18b3ac2SLi Xinhai return -EFAULT; 668a18b3ac2SLi Xinhai } 669f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 670f18da660SLi Xinhai ((vma->vm_end < qp->end) && 671f18da660SLi Xinhai (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) 672f18da660SLi Xinhai /* hole at middle or tail of range */ 673f18da660SLi Xinhai return -EFAULT; 674a18b3ac2SLi Xinhai 675a7f40cfeSYang Shi /* 676a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 677a7f40cfeSYang Shi * regardless of vma_migratable 678a7f40cfeSYang Shi */ 679a7f40cfeSYang Shi if (!vma_migratable(vma) && 680a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 68148684a65SNaoya Horiguchi return 1; 68248684a65SNaoya Horiguchi 6835b952b3cSAndi Kleen if (endvma > end) 6845b952b3cSAndi Kleen endvma = end; 685b24f53a0SLee Schermerhorn 686b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6872c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6883122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 6894355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 690b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6916f4576e3SNaoya Horiguchi return 1; 692b24f53a0SLee Schermerhorn } 693b24f53a0SLee Schermerhorn 6946f4576e3SNaoya Horiguchi /* queue pages from current vma */ 695a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 6966f4576e3SNaoya Horiguchi return 0; 6976f4576e3SNaoya Horiguchi return 1; 6986f4576e3SNaoya Horiguchi } 699b24f53a0SLee Schermerhorn 7007b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 7017b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 7027b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 7037b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 7047b86ac33SChristoph Hellwig }; 7057b86ac33SChristoph Hellwig 7066f4576e3SNaoya Horiguchi /* 7076f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7086f4576e3SNaoya Horiguchi * 7096f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7106f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 711d8835445SYang Shi * passed via @private. 712d8835445SYang Shi * 713d8835445SYang Shi * queue_pages_range() has three possible return values: 714d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 715d8835445SYang Shi * specified. 716d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 717a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 718a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 719a85dfc30SYang Shi * your accessible address space (-EFAULT) 7206f4576e3SNaoya Horiguchi */ 7216f4576e3SNaoya Horiguchi static int 7226f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7236f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 7246f4576e3SNaoya Horiguchi struct list_head *pagelist) 7256f4576e3SNaoya Horiguchi { 726f18da660SLi Xinhai int err; 7276f4576e3SNaoya Horiguchi struct queue_pages qp = { 7286f4576e3SNaoya Horiguchi .pagelist = pagelist, 7296f4576e3SNaoya Horiguchi .flags = flags, 7306f4576e3SNaoya Horiguchi .nmask = nodes, 731f18da660SLi Xinhai .start = start, 732f18da660SLi Xinhai .end = end, 733f18da660SLi Xinhai .first = NULL, 7346f4576e3SNaoya Horiguchi }; 7356f4576e3SNaoya Horiguchi 736f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 737f18da660SLi Xinhai 738f18da660SLi Xinhai if (!qp.first) 739f18da660SLi Xinhai /* whole range in hole */ 740f18da660SLi Xinhai err = -EFAULT; 741f18da660SLi Xinhai 742f18da660SLi Xinhai return err; 7431da177e4SLinus Torvalds } 7441da177e4SLinus Torvalds 745869833f2SKOSAKI Motohiro /* 746869833f2SKOSAKI Motohiro * Apply policy to a single VMA 747c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for writing. 748869833f2SKOSAKI Motohiro */ 749869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 750869833f2SKOSAKI Motohiro struct mempolicy *pol) 7518d34694cSKOSAKI Motohiro { 752869833f2SKOSAKI Motohiro int err; 753869833f2SKOSAKI Motohiro struct mempolicy *old; 754869833f2SKOSAKI Motohiro struct mempolicy *new; 7558d34694cSKOSAKI Motohiro 7568d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7578d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7588d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7598d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7608d34694cSKOSAKI Motohiro 761869833f2SKOSAKI Motohiro new = mpol_dup(pol); 762869833f2SKOSAKI Motohiro if (IS_ERR(new)) 763869833f2SKOSAKI Motohiro return PTR_ERR(new); 764869833f2SKOSAKI Motohiro 765869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7668d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 767869833f2SKOSAKI Motohiro if (err) 768869833f2SKOSAKI Motohiro goto err_out; 7698d34694cSKOSAKI Motohiro } 770869833f2SKOSAKI Motohiro 771869833f2SKOSAKI Motohiro old = vma->vm_policy; 772c1e8d7c6SMichel Lespinasse vma->vm_policy = new; /* protected by mmap_lock */ 773869833f2SKOSAKI Motohiro mpol_put(old); 774869833f2SKOSAKI Motohiro 775869833f2SKOSAKI Motohiro return 0; 776869833f2SKOSAKI Motohiro err_out: 777869833f2SKOSAKI Motohiro mpol_put(new); 7788d34694cSKOSAKI Motohiro return err; 7798d34694cSKOSAKI Motohiro } 7808d34694cSKOSAKI Motohiro 7811da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7829d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7839d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7841da177e4SLinus Torvalds { 7851da177e4SLinus Torvalds struct vm_area_struct *next; 7869d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7879d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7889d8cebd4SKOSAKI Motohiro int err = 0; 789e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7909d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7919d8cebd4SKOSAKI Motohiro unsigned long vmend; 7921da177e4SLinus Torvalds 793097d5910SLinus Torvalds vma = find_vma(mm, start); 794f18da660SLi Xinhai VM_BUG_ON(!vma); 7959d8cebd4SKOSAKI Motohiro 796097d5910SLinus Torvalds prev = vma->vm_prev; 797e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 798e26a5114SKOSAKI Motohiro prev = vma; 799e26a5114SKOSAKI Motohiro 8009d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 8011da177e4SLinus Torvalds next = vma->vm_next; 8029d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 8039d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 8049d8cebd4SKOSAKI Motohiro 805e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 806e26a5114SKOSAKI Motohiro continue; 807e26a5114SKOSAKI Motohiro 808e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 809e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 8109d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 811e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 81219a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 8139d8cebd4SKOSAKI Motohiro if (prev) { 8149d8cebd4SKOSAKI Motohiro vma = prev; 8159d8cebd4SKOSAKI Motohiro next = vma->vm_next; 8163964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 8179d8cebd4SKOSAKI Motohiro continue; 8183964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 8193964acd0SOleg Nesterov goto replace; 8201da177e4SLinus Torvalds } 8219d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 8229d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 8239d8cebd4SKOSAKI Motohiro if (err) 8249d8cebd4SKOSAKI Motohiro goto out; 8259d8cebd4SKOSAKI Motohiro } 8269d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 8279d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 8289d8cebd4SKOSAKI Motohiro if (err) 8299d8cebd4SKOSAKI Motohiro goto out; 8309d8cebd4SKOSAKI Motohiro } 8313964acd0SOleg Nesterov replace: 832869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 8339d8cebd4SKOSAKI Motohiro if (err) 8349d8cebd4SKOSAKI Motohiro goto out; 8359d8cebd4SKOSAKI Motohiro } 8369d8cebd4SKOSAKI Motohiro 8379d8cebd4SKOSAKI Motohiro out: 8381da177e4SLinus Torvalds return err; 8391da177e4SLinus Torvalds } 8401da177e4SLinus Torvalds 8411da177e4SLinus Torvalds /* Set the process memory policy */ 842028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 843028fec41SDavid Rientjes nodemask_t *nodes) 8441da177e4SLinus Torvalds { 84558568d2aSMiao Xie struct mempolicy *new, *old; 8464bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 84758568d2aSMiao Xie int ret; 8481da177e4SLinus Torvalds 8494bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8504bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 851f4e53d91SLee Schermerhorn 8524bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8534bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8544bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8554bfc4495SKAMEZAWA Hiroyuki goto out; 8564bfc4495SKAMEZAWA Hiroyuki } 8572c7c3a7dSOleg Nesterov 858bda420b9SHuang Ying if (flags & MPOL_F_NUMA_BALANCING) { 859bda420b9SHuang Ying if (new && new->mode == MPOL_BIND) { 860bda420b9SHuang Ying new->flags |= (MPOL_F_MOF | MPOL_F_MORON); 861bda420b9SHuang Ying } else { 862bda420b9SHuang Ying ret = -EINVAL; 863bda420b9SHuang Ying mpol_put(new); 864bda420b9SHuang Ying goto out; 865bda420b9SHuang Ying } 866bda420b9SHuang Ying } 867bda420b9SHuang Ying 8684bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 86958568d2aSMiao Xie if (ret) { 87058568d2aSMiao Xie mpol_put(new); 8714bfc4495SKAMEZAWA Hiroyuki goto out; 87258568d2aSMiao Xie } 87378b132e9SWei Yang task_lock(current); 87458568d2aSMiao Xie old = current->mempolicy; 8751da177e4SLinus Torvalds current->mempolicy = new; 87645816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 87745816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 87858568d2aSMiao Xie task_unlock(current); 87958568d2aSMiao Xie mpol_put(old); 8804bfc4495SKAMEZAWA Hiroyuki ret = 0; 8814bfc4495SKAMEZAWA Hiroyuki out: 8824bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8834bfc4495SKAMEZAWA Hiroyuki return ret; 8841da177e4SLinus Torvalds } 8851da177e4SLinus Torvalds 886bea904d5SLee Schermerhorn /* 887bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 88858568d2aSMiao Xie * 88958568d2aSMiao Xie * Called with task's alloc_lock held 890bea904d5SLee Schermerhorn */ 891bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8921da177e4SLinus Torvalds { 893dfcd3c0dSAndi Kleen nodes_clear(*nodes); 894bea904d5SLee Schermerhorn if (p == &default_policy) 895bea904d5SLee Schermerhorn return; 896bea904d5SLee Schermerhorn 89745c4745aSLee Schermerhorn switch (p->mode) { 89819770b32SMel Gorman case MPOL_BIND: 8991da177e4SLinus Torvalds case MPOL_INTERLEAVE: 900dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 9011da177e4SLinus Torvalds break; 9027858d7bcSFeng Tang case MPOL_LOCAL: 9037858d7bcSFeng Tang /* return empty node mask for local allocation */ 9047858d7bcSFeng Tang break; 9057858d7bcSFeng Tang 9061da177e4SLinus Torvalds case MPOL_PREFERRED: 907dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 9081da177e4SLinus Torvalds break; 9091da177e4SLinus Torvalds default: 9101da177e4SLinus Torvalds BUG(); 9111da177e4SLinus Torvalds } 9121da177e4SLinus Torvalds } 9131da177e4SLinus Torvalds 9143b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 9151da177e4SLinus Torvalds { 916ba841078SPeter Xu struct page *p = NULL; 9171da177e4SLinus Torvalds int err; 9181da177e4SLinus Torvalds 9193b9aadf7SAndrea Arcangeli int locked = 1; 9203b9aadf7SAndrea Arcangeli err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); 9212d3a36a4SMichal Hocko if (err > 0) { 9221da177e4SLinus Torvalds err = page_to_nid(p); 9231da177e4SLinus Torvalds put_page(p); 9241da177e4SLinus Torvalds } 9253b9aadf7SAndrea Arcangeli if (locked) 926d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9271da177e4SLinus Torvalds return err; 9281da177e4SLinus Torvalds } 9291da177e4SLinus Torvalds 9301da177e4SLinus Torvalds /* Retrieve NUMA policy */ 931dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9321da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9331da177e4SLinus Torvalds { 9348bccd85fSChristoph Lameter int err; 9351da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9361da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9373b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9381da177e4SLinus Torvalds 939754af6f5SLee Schermerhorn if (flags & 940754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9411da177e4SLinus Torvalds return -EINVAL; 942754af6f5SLee Schermerhorn 943754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 944754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 945754af6f5SLee Schermerhorn return -EINVAL; 946754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 94758568d2aSMiao Xie task_lock(current); 948754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 94958568d2aSMiao Xie task_unlock(current); 950754af6f5SLee Schermerhorn return 0; 951754af6f5SLee Schermerhorn } 952754af6f5SLee Schermerhorn 9531da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 954bea904d5SLee Schermerhorn /* 955bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 956bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 957bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 958bea904d5SLee Schermerhorn */ 959d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 96033e3575cSLiam Howlett vma = vma_lookup(mm, addr); 9611da177e4SLinus Torvalds if (!vma) { 962d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9631da177e4SLinus Torvalds return -EFAULT; 9641da177e4SLinus Torvalds } 9651da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9661da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9671da177e4SLinus Torvalds else 9681da177e4SLinus Torvalds pol = vma->vm_policy; 9691da177e4SLinus Torvalds } else if (addr) 9701da177e4SLinus Torvalds return -EINVAL; 9711da177e4SLinus Torvalds 9721da177e4SLinus Torvalds if (!pol) 973bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9741da177e4SLinus Torvalds 9751da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9761da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9773b9aadf7SAndrea Arcangeli /* 9783b9aadf7SAndrea Arcangeli * Take a refcount on the mpol, lookup_node() 979baf2f90bSLu Jialin * will drop the mmap_lock, so after calling 9803b9aadf7SAndrea Arcangeli * lookup_node() only "pol" remains valid, "vma" 9813b9aadf7SAndrea Arcangeli * is stale. 9823b9aadf7SAndrea Arcangeli */ 9833b9aadf7SAndrea Arcangeli pol_refcount = pol; 9843b9aadf7SAndrea Arcangeli vma = NULL; 9853b9aadf7SAndrea Arcangeli mpol_get(pol); 9863b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9871da177e4SLinus Torvalds if (err < 0) 9881da177e4SLinus Torvalds goto out; 9898bccd85fSChristoph Lameter *policy = err; 9901da177e4SLinus Torvalds } else if (pol == current->mempolicy && 99145c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 99245816682SVlastimil Babka *policy = next_node_in(current->il_prev, pol->v.nodes); 9931da177e4SLinus Torvalds } else { 9941da177e4SLinus Torvalds err = -EINVAL; 9951da177e4SLinus Torvalds goto out; 9961da177e4SLinus Torvalds } 997bea904d5SLee Schermerhorn } else { 998bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 999bea904d5SLee Schermerhorn pol->mode; 1000d79df630SDavid Rientjes /* 1001d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 1002d79df630SDavid Rientjes * the policy to userspace. 1003d79df630SDavid Rientjes */ 1004d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 1005bea904d5SLee Schermerhorn } 10061da177e4SLinus Torvalds 10071da177e4SLinus Torvalds err = 0; 100858568d2aSMiao Xie if (nmask) { 1009c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 1010c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 1011c6b6ef8bSLee Schermerhorn } else { 101258568d2aSMiao Xie task_lock(current); 1013bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 101458568d2aSMiao Xie task_unlock(current); 101558568d2aSMiao Xie } 1016c6b6ef8bSLee Schermerhorn } 10171da177e4SLinus Torvalds 10181da177e4SLinus Torvalds out: 101952cd3b07SLee Schermerhorn mpol_cond_put(pol); 10201da177e4SLinus Torvalds if (vma) 1021d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 10223b9aadf7SAndrea Arcangeli if (pol_refcount) 10233b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10241da177e4SLinus Torvalds return err; 10251da177e4SLinus Torvalds } 10261da177e4SLinus Torvalds 1027b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10288bccd85fSChristoph Lameter /* 1029c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 10306ce3c4c0SChristoph Lameter */ 1031a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1032fc301289SChristoph Lameter unsigned long flags) 10336ce3c4c0SChristoph Lameter { 1034c8633798SNaoya Horiguchi struct page *head = compound_head(page); 10356ce3c4c0SChristoph Lameter /* 1036fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 10376ce3c4c0SChristoph Lameter */ 1038c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1039c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 1040c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 1041c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 10429de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head), 10436c357848SMatthew Wilcox (Oracle) thp_nr_pages(head)); 1044a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1045a53190a4SYang Shi /* 1046a53190a4SYang Shi * Non-movable page may reach here. And, there may be 1047a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 1048a53190a4SYang Shi * Treat them as unmovable pages since they can't be 1049a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1050a53190a4SYang Shi * should return -EIO for this case too. 1051a53190a4SYang Shi */ 1052a53190a4SYang Shi return -EIO; 105362695a84SNick Piggin } 105462695a84SNick Piggin } 1055a53190a4SYang Shi 1056a53190a4SYang Shi return 0; 10576ce3c4c0SChristoph Lameter } 10586ce3c4c0SChristoph Lameter 10596ce3c4c0SChristoph Lameter /* 10607e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10617e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10627e2ab150SChristoph Lameter */ 1063dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1064dbcb0f19SAdrian Bunk int flags) 10657e2ab150SChristoph Lameter { 10667e2ab150SChristoph Lameter nodemask_t nmask; 10677e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10687e2ab150SChristoph Lameter int err = 0; 1069a0976311SJoonsoo Kim struct migration_target_control mtc = { 1070a0976311SJoonsoo Kim .nid = dest, 1071a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1072a0976311SJoonsoo Kim }; 10737e2ab150SChristoph Lameter 10747e2ab150SChristoph Lameter nodes_clear(nmask); 10757e2ab150SChristoph Lameter node_set(source, nmask); 10767e2ab150SChristoph Lameter 107708270807SMinchan Kim /* 107808270807SMinchan Kim * This does not "check" the range but isolates all pages that 107908270807SMinchan Kim * need migration. Between passing in the full user address 108008270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 108108270807SMinchan Kim */ 108208270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 108398094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10847e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10857e2ab150SChristoph Lameter 1086cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1087a0976311SJoonsoo Kim err = migrate_pages(&pagelist, alloc_migration_target, NULL, 1088a0976311SJoonsoo Kim (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL); 1089cf608ac1SMinchan Kim if (err) 1090e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1091cf608ac1SMinchan Kim } 109295a402c3SChristoph Lameter 10937e2ab150SChristoph Lameter return err; 10947e2ab150SChristoph Lameter } 10957e2ab150SChristoph Lameter 10967e2ab150SChristoph Lameter /* 10977e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10987e2ab150SChristoph Lameter * layout as much as possible. 109939743889SChristoph Lameter * 110039743889SChristoph Lameter * Returns the number of page that could not be moved. 110139743889SChristoph Lameter */ 11020ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11030ce72d4fSAndrew Morton const nodemask_t *to, int flags) 110439743889SChristoph Lameter { 11057e2ab150SChristoph Lameter int busy = 0; 1106f555befdSJan Stancek int err = 0; 11077e2ab150SChristoph Lameter nodemask_t tmp; 110839743889SChristoph Lameter 1109361a2a22SMinchan Kim lru_cache_disable(); 11100aedadf9SChristoph Lameter 1111d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1112d4984711SChristoph Lameter 11137e2ab150SChristoph Lameter /* 11147e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11157e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11167e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11177e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11187e2ab150SChristoph Lameter * 11197e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11207e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11217e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11227e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11237e2ab150SChristoph Lameter * 11247e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11257e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11267e2ab150SChristoph Lameter * (nothing left to migrate). 11277e2ab150SChristoph Lameter * 11287e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11297e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11307e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11317e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11327e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11337e2ab150SChristoph Lameter * 11347e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11357e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11367e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11377e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1138ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11397e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11407e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11417e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11427e2ab150SChristoph Lameter */ 11437e2ab150SChristoph Lameter 11440ce72d4fSAndrew Morton tmp = *from; 11457e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11467e2ab150SChristoph Lameter int s, d; 1147b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11487e2ab150SChristoph Lameter int dest = 0; 11497e2ab150SChristoph Lameter 11507e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11514a5b18ccSLarry Woodman 11524a5b18ccSLarry Woodman /* 11534a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11544a5b18ccSLarry Woodman * node relationship of the pages established between 11554a5b18ccSLarry Woodman * threads and memory areas. 11564a5b18ccSLarry Woodman * 11574a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11584a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11594a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11604a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11614a5b18ccSLarry Woodman * mask. 11624a5b18ccSLarry Woodman * 11634a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11644a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11654a5b18ccSLarry Woodman */ 11664a5b18ccSLarry Woodman 11670ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11680ce72d4fSAndrew Morton (node_isset(s, *to))) 11694a5b18ccSLarry Woodman continue; 11704a5b18ccSLarry Woodman 11710ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11727e2ab150SChristoph Lameter if (s == d) 11737e2ab150SChristoph Lameter continue; 11747e2ab150SChristoph Lameter 11757e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11767e2ab150SChristoph Lameter dest = d; 11777e2ab150SChristoph Lameter 11787e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11797e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11807e2ab150SChristoph Lameter break; 11817e2ab150SChristoph Lameter } 1182b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11837e2ab150SChristoph Lameter break; 11847e2ab150SChristoph Lameter 11857e2ab150SChristoph Lameter node_clear(source, tmp); 11867e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11877e2ab150SChristoph Lameter if (err > 0) 11887e2ab150SChristoph Lameter busy += err; 11897e2ab150SChristoph Lameter if (err < 0) 11907e2ab150SChristoph Lameter break; 119139743889SChristoph Lameter } 1192d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1193d479960eSMinchan Kim 1194361a2a22SMinchan Kim lru_cache_enable(); 11957e2ab150SChristoph Lameter if (err < 0) 11967e2ab150SChristoph Lameter return err; 11977e2ab150SChristoph Lameter return busy; 1198b20a3503SChristoph Lameter 119939743889SChristoph Lameter } 120039743889SChristoph Lameter 12013ad33b24SLee Schermerhorn /* 12023ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1203d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 12043ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 12053ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 12063ad33b24SLee Schermerhorn * is in virtual address order. 12073ad33b24SLee Schermerhorn */ 1208666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 120995a402c3SChristoph Lameter { 1210d05f0cdcSHugh Dickins struct vm_area_struct *vma; 12113f649ab7SKees Cook unsigned long address; 121295a402c3SChristoph Lameter 1213d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 12143ad33b24SLee Schermerhorn while (vma) { 12153ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 12163ad33b24SLee Schermerhorn if (address != -EFAULT) 12173ad33b24SLee Schermerhorn break; 12183ad33b24SLee Schermerhorn vma = vma->vm_next; 12193ad33b24SLee Schermerhorn } 12203ad33b24SLee Schermerhorn 122111c731e8SWanpeng Li if (PageHuge(page)) { 1222389c8178SMichal Hocko return alloc_huge_page_vma(page_hstate(compound_head(page)), 1223389c8178SMichal Hocko vma, address); 122494723aafSMichal Hocko } else if (PageTransHuge(page)) { 1225c8633798SNaoya Horiguchi struct page *thp; 1226c8633798SNaoya Horiguchi 122719deb769SDavid Rientjes thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 122819deb769SDavid Rientjes HPAGE_PMD_ORDER); 1229c8633798SNaoya Horiguchi if (!thp) 1230c8633798SNaoya Horiguchi return NULL; 1231c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1232c8633798SNaoya Horiguchi return thp; 123311c731e8SWanpeng Li } 123411c731e8SWanpeng Li /* 123511c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 123611c731e8SWanpeng Li */ 12370f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 12380f556856SMichal Hocko vma, address); 123995a402c3SChristoph Lameter } 1240b20a3503SChristoph Lameter #else 1241b20a3503SChristoph Lameter 1242a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1243b20a3503SChristoph Lameter unsigned long flags) 1244b20a3503SChristoph Lameter { 1245a53190a4SYang Shi return -EIO; 1246b20a3503SChristoph Lameter } 1247b20a3503SChristoph Lameter 12480ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12490ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1250b20a3503SChristoph Lameter { 1251b20a3503SChristoph Lameter return -ENOSYS; 1252b20a3503SChristoph Lameter } 125395a402c3SChristoph Lameter 1254666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 125595a402c3SChristoph Lameter { 125695a402c3SChristoph Lameter return NULL; 125795a402c3SChristoph Lameter } 1258b20a3503SChristoph Lameter #endif 1259b20a3503SChristoph Lameter 1260dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1261028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1262028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12636ce3c4c0SChristoph Lameter { 12646ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12656ce3c4c0SChristoph Lameter struct mempolicy *new; 12666ce3c4c0SChristoph Lameter unsigned long end; 12676ce3c4c0SChristoph Lameter int err; 1268d8835445SYang Shi int ret; 12696ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12706ce3c4c0SChristoph Lameter 1271b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12726ce3c4c0SChristoph Lameter return -EINVAL; 127374c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12746ce3c4c0SChristoph Lameter return -EPERM; 12756ce3c4c0SChristoph Lameter 12766ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12776ce3c4c0SChristoph Lameter return -EINVAL; 12786ce3c4c0SChristoph Lameter 12796ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12806ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12816ce3c4c0SChristoph Lameter 12826ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12836ce3c4c0SChristoph Lameter end = start + len; 12846ce3c4c0SChristoph Lameter 12856ce3c4c0SChristoph Lameter if (end < start) 12866ce3c4c0SChristoph Lameter return -EINVAL; 12876ce3c4c0SChristoph Lameter if (end == start) 12886ce3c4c0SChristoph Lameter return 0; 12896ce3c4c0SChristoph Lameter 1290028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12916ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12926ce3c4c0SChristoph Lameter return PTR_ERR(new); 12936ce3c4c0SChristoph Lameter 1294b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1295b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1296b24f53a0SLee Schermerhorn 12976ce3c4c0SChristoph Lameter /* 12986ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12996ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 13006ce3c4c0SChristoph Lameter */ 13016ce3c4c0SChristoph Lameter if (!new) 13026ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 13036ce3c4c0SChristoph Lameter 1304028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1305028fec41SDavid Rientjes start, start + len, mode, mode_flags, 130600ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 13076ce3c4c0SChristoph Lameter 13080aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 13090aedadf9SChristoph Lameter 1310361a2a22SMinchan Kim lru_cache_disable(); 13110aedadf9SChristoph Lameter } 13124bfc4495SKAMEZAWA Hiroyuki { 13134bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 13144bfc4495SKAMEZAWA Hiroyuki if (scratch) { 1315d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 13164bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 13174bfc4495SKAMEZAWA Hiroyuki if (err) 1318d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 13194bfc4495SKAMEZAWA Hiroyuki } else 13204bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13214bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13224bfc4495SKAMEZAWA Hiroyuki } 1323b05ca738SKOSAKI Motohiro if (err) 1324b05ca738SKOSAKI Motohiro goto mpol_out; 1325b05ca738SKOSAKI Motohiro 1326d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13276ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1328d8835445SYang Shi 1329d8835445SYang Shi if (ret < 0) { 1330a85dfc30SYang Shi err = ret; 1331d8835445SYang Shi goto up_out; 1332d8835445SYang Shi } 1333d8835445SYang Shi 13349d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13357e2ab150SChristoph Lameter 1336b24f53a0SLee Schermerhorn if (!err) { 1337b24f53a0SLee Schermerhorn int nr_failed = 0; 1338b24f53a0SLee Schermerhorn 1339cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1340b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1341d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1342d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1343cf608ac1SMinchan Kim if (nr_failed) 134474060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1345cf608ac1SMinchan Kim } 13466ce3c4c0SChristoph Lameter 1347d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13486ce3c4c0SChristoph Lameter err = -EIO; 1349a85dfc30SYang Shi } else { 1350d8835445SYang Shi up_out: 1351a85dfc30SYang Shi if (!list_empty(&pagelist)) 1352a85dfc30SYang Shi putback_movable_pages(&pagelist); 1353a85dfc30SYang Shi } 1354a85dfc30SYang Shi 1355d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1356b05ca738SKOSAKI Motohiro mpol_out: 1357f0be3d32SLee Schermerhorn mpol_put(new); 1358d479960eSMinchan Kim if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1359361a2a22SMinchan Kim lru_cache_enable(); 13606ce3c4c0SChristoph Lameter return err; 13616ce3c4c0SChristoph Lameter } 13626ce3c4c0SChristoph Lameter 136339743889SChristoph Lameter /* 13648bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13658bccd85fSChristoph Lameter */ 13668bccd85fSChristoph Lameter 13678bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 136839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13698bccd85fSChristoph Lameter unsigned long maxnode) 13708bccd85fSChristoph Lameter { 13718bccd85fSChristoph Lameter unsigned long k; 137256521e7aSYisheng Xie unsigned long t; 13738bccd85fSChristoph Lameter unsigned long nlongs; 13748bccd85fSChristoph Lameter unsigned long endmask; 13758bccd85fSChristoph Lameter 13768bccd85fSChristoph Lameter --maxnode; 13778bccd85fSChristoph Lameter nodes_clear(*nodes); 13788bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13798bccd85fSChristoph Lameter return 0; 1380a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1381636f13c1SChris Wright return -EINVAL; 13828bccd85fSChristoph Lameter 13838bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 13848bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 13858bccd85fSChristoph Lameter endmask = ~0UL; 13868bccd85fSChristoph Lameter else 13878bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 13888bccd85fSChristoph Lameter 138956521e7aSYisheng Xie /* 139056521e7aSYisheng Xie * When the user specified more nodes than supported just check 139156521e7aSYisheng Xie * if the non supported part is all zero. 139256521e7aSYisheng Xie * 139356521e7aSYisheng Xie * If maxnode have more longs than MAX_NUMNODES, check 139456521e7aSYisheng Xie * the bits in that area first. And then go through to 139556521e7aSYisheng Xie * check the rest bits which equal or bigger than MAX_NUMNODES. 139656521e7aSYisheng Xie * Otherwise, just check bits [MAX_NUMNODES, maxnode). 139756521e7aSYisheng Xie */ 13988bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 13998bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 14008bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 14018bccd85fSChristoph Lameter return -EFAULT; 14028bccd85fSChristoph Lameter if (k == nlongs - 1) { 14038bccd85fSChristoph Lameter if (t & endmask) 14048bccd85fSChristoph Lameter return -EINVAL; 14058bccd85fSChristoph Lameter } else if (t) 14068bccd85fSChristoph Lameter return -EINVAL; 14078bccd85fSChristoph Lameter } 14088bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 14098bccd85fSChristoph Lameter endmask = ~0UL; 14108bccd85fSChristoph Lameter } 14118bccd85fSChristoph Lameter 141256521e7aSYisheng Xie if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { 141356521e7aSYisheng Xie unsigned long valid_mask = endmask; 141456521e7aSYisheng Xie 141556521e7aSYisheng Xie valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 141656521e7aSYisheng Xie if (get_user(t, nmask + nlongs - 1)) 141756521e7aSYisheng Xie return -EFAULT; 141856521e7aSYisheng Xie if (t & valid_mask) 141956521e7aSYisheng Xie return -EINVAL; 142056521e7aSYisheng Xie } 142156521e7aSYisheng Xie 14228bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 14238bccd85fSChristoph Lameter return -EFAULT; 14248bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 14258bccd85fSChristoph Lameter return 0; 14268bccd85fSChristoph Lameter } 14278bccd85fSChristoph Lameter 14288bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14298bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14308bccd85fSChristoph Lameter nodemask_t *nodes) 14318bccd85fSChristoph Lameter { 14328bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1433050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 14348bccd85fSChristoph Lameter 14358bccd85fSChristoph Lameter if (copy > nbytes) { 14368bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14378bccd85fSChristoph Lameter return -EINVAL; 14388bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14398bccd85fSChristoph Lameter return -EFAULT; 14408bccd85fSChristoph Lameter copy = nbytes; 14418bccd85fSChristoph Lameter } 14428bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14438bccd85fSChristoph Lameter } 14448bccd85fSChristoph Lameter 144595837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 144695837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 144795837924SFeng Tang { 144895837924SFeng Tang *flags = *mode & MPOL_MODE_FLAGS; 144995837924SFeng Tang *mode &= ~MPOL_MODE_FLAGS; 145095837924SFeng Tang if ((unsigned int)(*mode) >= MPOL_MAX) 145195837924SFeng Tang return -EINVAL; 145295837924SFeng Tang if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 145395837924SFeng Tang return -EINVAL; 145495837924SFeng Tang 145595837924SFeng Tang return 0; 145695837924SFeng Tang } 145795837924SFeng Tang 1458e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1459e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1460e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14618bccd85fSChristoph Lameter { 1462028fec41SDavid Rientjes unsigned short mode_flags; 146395837924SFeng Tang nodemask_t nodes; 146495837924SFeng Tang int lmode = mode; 146595837924SFeng Tang int err; 14668bccd85fSChristoph Lameter 1467057d3389SAndrey Konovalov start = untagged_addr(start); 146895837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 146995837924SFeng Tang if (err) 147095837924SFeng Tang return err; 147195837924SFeng Tang 14728bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14738bccd85fSChristoph Lameter if (err) 14748bccd85fSChristoph Lameter return err; 147595837924SFeng Tang 147695837924SFeng Tang return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 14778bccd85fSChristoph Lameter } 14788bccd85fSChristoph Lameter 1479e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1480e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1481e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1482e7dc9ad6SDominik Brodowski { 1483e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1484e7dc9ad6SDominik Brodowski } 1485e7dc9ad6SDominik Brodowski 14868bccd85fSChristoph Lameter /* Set the process memory policy */ 1487af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1488af03c4acSDominik Brodowski unsigned long maxnode) 14898bccd85fSChristoph Lameter { 149095837924SFeng Tang unsigned short mode_flags; 14918bccd85fSChristoph Lameter nodemask_t nodes; 149295837924SFeng Tang int lmode = mode; 149395837924SFeng Tang int err; 14948bccd85fSChristoph Lameter 149595837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 149695837924SFeng Tang if (err) 149795837924SFeng Tang return err; 149895837924SFeng Tang 14998bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 15008bccd85fSChristoph Lameter if (err) 15018bccd85fSChristoph Lameter return err; 150295837924SFeng Tang 150395837924SFeng Tang return do_set_mempolicy(lmode, mode_flags, &nodes); 15048bccd85fSChristoph Lameter } 15058bccd85fSChristoph Lameter 1506af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1507af03c4acSDominik Brodowski unsigned long, maxnode) 1508af03c4acSDominik Brodowski { 1509af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1510af03c4acSDominik Brodowski } 1511af03c4acSDominik Brodowski 1512b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1513b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1514b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 151539743889SChristoph Lameter { 1516596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 151739743889SChristoph Lameter struct task_struct *task; 151839743889SChristoph Lameter nodemask_t task_nodes; 151939743889SChristoph Lameter int err; 1520596d7cfaSKOSAKI Motohiro nodemask_t *old; 1521596d7cfaSKOSAKI Motohiro nodemask_t *new; 1522596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 152339743889SChristoph Lameter 1524596d7cfaSKOSAKI Motohiro if (!scratch) 1525596d7cfaSKOSAKI Motohiro return -ENOMEM; 152639743889SChristoph Lameter 1527596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1528596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1529596d7cfaSKOSAKI Motohiro 1530596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 153139743889SChristoph Lameter if (err) 1532596d7cfaSKOSAKI Motohiro goto out; 1533596d7cfaSKOSAKI Motohiro 1534596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1535596d7cfaSKOSAKI Motohiro if (err) 1536596d7cfaSKOSAKI Motohiro goto out; 153739743889SChristoph Lameter 153839743889SChristoph Lameter /* Find the mm_struct */ 153955cfaa3cSZeng Zhaoming rcu_read_lock(); 1540228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 154139743889SChristoph Lameter if (!task) { 154255cfaa3cSZeng Zhaoming rcu_read_unlock(); 1543596d7cfaSKOSAKI Motohiro err = -ESRCH; 1544596d7cfaSKOSAKI Motohiro goto out; 154539743889SChristoph Lameter } 15463268c63eSChristoph Lameter get_task_struct(task); 154739743889SChristoph Lameter 1548596d7cfaSKOSAKI Motohiro err = -EINVAL; 154939743889SChristoph Lameter 155039743889SChristoph Lameter /* 155131367466SOtto Ebeling * Check if this process has the right to modify the specified process. 155231367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 155339743889SChristoph Lameter */ 155431367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1555c69e8d9cSDavid Howells rcu_read_unlock(); 155639743889SChristoph Lameter err = -EPERM; 15573268c63eSChristoph Lameter goto out_put; 155839743889SChristoph Lameter } 1559c69e8d9cSDavid Howells rcu_read_unlock(); 156039743889SChristoph Lameter 156139743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 156239743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1563596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 156439743889SChristoph Lameter err = -EPERM; 15653268c63eSChristoph Lameter goto out_put; 156639743889SChristoph Lameter } 156739743889SChristoph Lameter 15680486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 15690486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 15700486a38bSYisheng Xie if (nodes_empty(*new)) 15713268c63eSChristoph Lameter goto out_put; 15720486a38bSYisheng Xie 157386c3a764SDavid Quigley err = security_task_movememory(task); 157486c3a764SDavid Quigley if (err) 15753268c63eSChristoph Lameter goto out_put; 157686c3a764SDavid Quigley 15773268c63eSChristoph Lameter mm = get_task_mm(task); 15783268c63eSChristoph Lameter put_task_struct(task); 1579f2a9ef88SSasha Levin 1580f2a9ef88SSasha Levin if (!mm) { 1581f2a9ef88SSasha Levin err = -EINVAL; 1582f2a9ef88SSasha Levin goto out; 1583f2a9ef88SSasha Levin } 1584f2a9ef88SSasha Levin 1585596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 158674c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 15873268c63eSChristoph Lameter 158839743889SChristoph Lameter mmput(mm); 15893268c63eSChristoph Lameter out: 1590596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1591596d7cfaSKOSAKI Motohiro 159239743889SChristoph Lameter return err; 15933268c63eSChristoph Lameter 15943268c63eSChristoph Lameter out_put: 15953268c63eSChristoph Lameter put_task_struct(task); 15963268c63eSChristoph Lameter goto out; 15973268c63eSChristoph Lameter 159839743889SChristoph Lameter } 159939743889SChristoph Lameter 1600b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1601b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1602b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1603b6e9b0baSDominik Brodowski { 1604b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1605b6e9b0baSDominik Brodowski } 1606b6e9b0baSDominik Brodowski 160739743889SChristoph Lameter 16088bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1609af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1610af03c4acSDominik Brodowski unsigned long __user *nmask, 1611af03c4acSDominik Brodowski unsigned long maxnode, 1612af03c4acSDominik Brodowski unsigned long addr, 1613af03c4acSDominik Brodowski unsigned long flags) 16148bccd85fSChristoph Lameter { 1615dbcb0f19SAdrian Bunk int err; 16163f649ab7SKees Cook int pval; 16178bccd85fSChristoph Lameter nodemask_t nodes; 16188bccd85fSChristoph Lameter 1619050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 16208bccd85fSChristoph Lameter return -EINVAL; 16218bccd85fSChristoph Lameter 16224605f057SWenchao Hao addr = untagged_addr(addr); 16234605f057SWenchao Hao 16248bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 16258bccd85fSChristoph Lameter 16268bccd85fSChristoph Lameter if (err) 16278bccd85fSChristoph Lameter return err; 16288bccd85fSChristoph Lameter 16298bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 16308bccd85fSChristoph Lameter return -EFAULT; 16318bccd85fSChristoph Lameter 16328bccd85fSChristoph Lameter if (nmask) 16338bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 16348bccd85fSChristoph Lameter 16358bccd85fSChristoph Lameter return err; 16368bccd85fSChristoph Lameter } 16378bccd85fSChristoph Lameter 1638af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1639af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1640af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1641af03c4acSDominik Brodowski { 1642af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1643af03c4acSDominik Brodowski } 1644af03c4acSDominik Brodowski 16451da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 16461da177e4SLinus Torvalds 1647c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1648c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1649c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1650c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 16511da177e4SLinus Torvalds { 16521da177e4SLinus Torvalds long err; 16531da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16541da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16551da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16561da177e4SLinus Torvalds 1657050c17f2SRalph Campbell nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); 16581da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16591da177e4SLinus Torvalds 16601da177e4SLinus Torvalds if (nmask) 16611da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 16621da177e4SLinus Torvalds 1663af03c4acSDominik Brodowski err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 16641da177e4SLinus Torvalds 16651da177e4SLinus Torvalds if (!err && nmask) { 16662bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 16672bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 16682bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 16691da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 16701da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 16711da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 16721da177e4SLinus Torvalds } 16731da177e4SLinus Torvalds 16741da177e4SLinus Torvalds return err; 16751da177e4SLinus Torvalds } 16761da177e4SLinus Torvalds 1677c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1678c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 16791da177e4SLinus Torvalds { 16801da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16811da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16821da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16831da177e4SLinus Torvalds 16841da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16851da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16861da177e4SLinus Torvalds 16871da177e4SLinus Torvalds if (nmask) { 1688cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 16891da177e4SLinus Torvalds return -EFAULT; 1690cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1691cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1692cf01fb99SChris Salls return -EFAULT; 1693cf01fb99SChris Salls } 16941da177e4SLinus Torvalds 1695af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nm, nr_bits+1); 16961da177e4SLinus Torvalds } 16971da177e4SLinus Torvalds 1698c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1699c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1700c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 17011da177e4SLinus Torvalds { 17021da177e4SLinus Torvalds unsigned long __user *nm = NULL; 17031da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1704dfcd3c0dSAndi Kleen nodemask_t bm; 17051da177e4SLinus Torvalds 17061da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 17071da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 17081da177e4SLinus Torvalds 17091da177e4SLinus Torvalds if (nmask) { 1710cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 17111da177e4SLinus Torvalds return -EFAULT; 1712cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1713cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1714cf01fb99SChris Salls return -EFAULT; 1715cf01fb99SChris Salls } 17161da177e4SLinus Torvalds 1717e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); 17181da177e4SLinus Torvalds } 17191da177e4SLinus Torvalds 1720b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, 1721b6e9b0baSDominik Brodowski compat_ulong_t, maxnode, 1722b6e9b0baSDominik Brodowski const compat_ulong_t __user *, old_nodes, 1723b6e9b0baSDominik Brodowski const compat_ulong_t __user *, new_nodes) 1724b6e9b0baSDominik Brodowski { 1725b6e9b0baSDominik Brodowski unsigned long __user *old = NULL; 1726b6e9b0baSDominik Brodowski unsigned long __user *new = NULL; 1727b6e9b0baSDominik Brodowski nodemask_t tmp_mask; 1728b6e9b0baSDominik Brodowski unsigned long nr_bits; 1729b6e9b0baSDominik Brodowski unsigned long size; 1730b6e9b0baSDominik Brodowski 1731b6e9b0baSDominik Brodowski nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1732b6e9b0baSDominik Brodowski size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1733b6e9b0baSDominik Brodowski if (old_nodes) { 1734b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1735b6e9b0baSDominik Brodowski return -EFAULT; 1736b6e9b0baSDominik Brodowski old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1737b6e9b0baSDominik Brodowski if (new_nodes) 1738b6e9b0baSDominik Brodowski new = old + size / sizeof(unsigned long); 1739b6e9b0baSDominik Brodowski if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1740b6e9b0baSDominik Brodowski return -EFAULT; 1741b6e9b0baSDominik Brodowski } 1742b6e9b0baSDominik Brodowski if (new_nodes) { 1743b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1744b6e9b0baSDominik Brodowski return -EFAULT; 1745b6e9b0baSDominik Brodowski if (new == NULL) 1746b6e9b0baSDominik Brodowski new = compat_alloc_user_space(size); 1747b6e9b0baSDominik Brodowski if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1748b6e9b0baSDominik Brodowski return -EFAULT; 1749b6e9b0baSDominik Brodowski } 1750b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, nr_bits + 1, old, new); 1751b6e9b0baSDominik Brodowski } 1752b6e9b0baSDominik Brodowski 1753b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */ 17541da177e4SLinus Torvalds 175520ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 175620ca87f2SLi Xinhai { 175720ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 175820ca87f2SLi Xinhai return false; 175920ca87f2SLi Xinhai 176020ca87f2SLi Xinhai /* 176120ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 176220ca87f2SLi Xinhai * incurring periodic faults. 176320ca87f2SLi Xinhai */ 176420ca87f2SLi Xinhai if (vma_is_dax(vma)) 176520ca87f2SLi Xinhai return false; 176620ca87f2SLi Xinhai 176720ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 176820ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 176920ca87f2SLi Xinhai return false; 177020ca87f2SLi Xinhai 177120ca87f2SLi Xinhai /* 177220ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 177320ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 177420ca87f2SLi Xinhai * possible. 177520ca87f2SLi Xinhai */ 177620ca87f2SLi Xinhai if (vma->vm_file && 177720ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 177820ca87f2SLi Xinhai < policy_zone) 177920ca87f2SLi Xinhai return false; 178020ca87f2SLi Xinhai return true; 178120ca87f2SLi Xinhai } 178220ca87f2SLi Xinhai 178374d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 178474d2c3a0SOleg Nesterov unsigned long addr) 17851da177e4SLinus Torvalds { 17868d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17871da177e4SLinus Torvalds 17881da177e4SLinus Torvalds if (vma) { 1789480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17908d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 179100442ad0SMel Gorman } else if (vma->vm_policy) { 17921da177e4SLinus Torvalds pol = vma->vm_policy; 179300442ad0SMel Gorman 179400442ad0SMel Gorman /* 179500442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 179600442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 179700442ad0SMel Gorman * count on these policies which will be dropped by 179800442ad0SMel Gorman * mpol_cond_put() later 179900442ad0SMel Gorman */ 180000442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 180100442ad0SMel Gorman mpol_get(pol); 180200442ad0SMel Gorman } 18031da177e4SLinus Torvalds } 1804f15ca78eSOleg Nesterov 180574d2c3a0SOleg Nesterov return pol; 180674d2c3a0SOleg Nesterov } 180774d2c3a0SOleg Nesterov 180874d2c3a0SOleg Nesterov /* 1809dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 181074d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 181174d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 181274d2c3a0SOleg Nesterov * 181374d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1814dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 181574d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 181674d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 181774d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 181874d2c3a0SOleg Nesterov * extra reference for shared policies. 181974d2c3a0SOleg Nesterov */ 1820ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1821dd6eecb9SOleg Nesterov unsigned long addr) 182274d2c3a0SOleg Nesterov { 182374d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 182474d2c3a0SOleg Nesterov 18258d90274bSOleg Nesterov if (!pol) 1826dd6eecb9SOleg Nesterov pol = get_task_policy(current); 18278d90274bSOleg Nesterov 18281da177e4SLinus Torvalds return pol; 18291da177e4SLinus Torvalds } 18301da177e4SLinus Torvalds 18316b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1832fc314724SMel Gorman { 18336b6482bbSOleg Nesterov struct mempolicy *pol; 1834f15ca78eSOleg Nesterov 1835fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1836fc314724SMel Gorman bool ret = false; 1837fc314724SMel Gorman 1838fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1839fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1840fc314724SMel Gorman ret = true; 1841fc314724SMel Gorman mpol_cond_put(pol); 1842fc314724SMel Gorman 1843fc314724SMel Gorman return ret; 18448d90274bSOleg Nesterov } 18458d90274bSOleg Nesterov 1846fc314724SMel Gorman pol = vma->vm_policy; 18478d90274bSOleg Nesterov if (!pol) 18486b6482bbSOleg Nesterov pol = get_task_policy(current); 1849fc314724SMel Gorman 1850fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1851fc314724SMel Gorman } 1852fc314724SMel Gorman 1853d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1854d3eb1570SLai Jiangshan { 1855d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1856d3eb1570SLai Jiangshan 1857d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1858d3eb1570SLai Jiangshan 1859d3eb1570SLai Jiangshan /* 1860d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1861d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1862d3eb1570SLai Jiangshan * 1863d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1864f0953a1bSIngo Molnar * so if the following test fails, it implies 1865d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1866d3eb1570SLai Jiangshan */ 1867d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1868d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1869d3eb1570SLai Jiangshan 1870d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1871d3eb1570SLai Jiangshan } 1872d3eb1570SLai Jiangshan 187352cd3b07SLee Schermerhorn /* 187452cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 187552cd3b07SLee Schermerhorn * page allocation 187652cd3b07SLee Schermerhorn */ 18778ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 187819770b32SMel Gorman { 187919770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 188045c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1881d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 188219770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 188319770b32SMel Gorman return &policy->v.nodes; 188419770b32SMel Gorman 188519770b32SMel Gorman return NULL; 188619770b32SMel Gorman } 188719770b32SMel Gorman 188804ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */ 1889f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 18901da177e4SLinus Torvalds { 18917858d7bcSFeng Tang if (policy->mode == MPOL_PREFERRED) { 18921da177e4SLinus Torvalds nd = policy->v.preferred_node; 18937858d7bcSFeng Tang } else { 189419770b32SMel Gorman /* 18956d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18966d840958SMichal Hocko * because we might easily break the expectation to stay on the 18976d840958SMichal Hocko * requested node and not break the policy. 189819770b32SMel Gorman */ 18996d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 19001da177e4SLinus Torvalds } 19016d840958SMichal Hocko 190204ec6264SVlastimil Babka return nd; 19031da177e4SLinus Torvalds } 19041da177e4SLinus Torvalds 19051da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 19061da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 19071da177e4SLinus Torvalds { 190845816682SVlastimil Babka unsigned next; 19091da177e4SLinus Torvalds struct task_struct *me = current; 19101da177e4SLinus Torvalds 191145816682SVlastimil Babka next = next_node_in(me->il_prev, policy->v.nodes); 1912f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 191345816682SVlastimil Babka me->il_prev = next; 191445816682SVlastimil Babka return next; 19151da177e4SLinus Torvalds } 19161da177e4SLinus Torvalds 1917dc85da15SChristoph Lameter /* 1918dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1919dc85da15SChristoph Lameter * next slab entry. 1920dc85da15SChristoph Lameter */ 19212a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1922dc85da15SChristoph Lameter { 1923e7b691b0SAndi Kleen struct mempolicy *policy; 19242a389610SDavid Rientjes int node = numa_mem_id(); 1925e7b691b0SAndi Kleen 1926e7b691b0SAndi Kleen if (in_interrupt()) 19272a389610SDavid Rientjes return node; 1928e7b691b0SAndi Kleen 1929e7b691b0SAndi Kleen policy = current->mempolicy; 19307858d7bcSFeng Tang if (!policy) 19312a389610SDavid Rientjes return node; 1932765c4507SChristoph Lameter 1933bea904d5SLee Schermerhorn switch (policy->mode) { 1934bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1935bea904d5SLee Schermerhorn return policy->v.preferred_node; 1936bea904d5SLee Schermerhorn 1937dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1938dc85da15SChristoph Lameter return interleave_nodes(policy); 1939dc85da15SChristoph Lameter 1940dd1a239fSMel Gorman case MPOL_BIND: { 1941c33d6c06SMel Gorman struct zoneref *z; 1942c33d6c06SMel Gorman 1943dc85da15SChristoph Lameter /* 1944dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1945dc85da15SChristoph Lameter * first node. 1946dc85da15SChristoph Lameter */ 194719770b32SMel Gorman struct zonelist *zonelist; 194819770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1949c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1950c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1951c33d6c06SMel Gorman &policy->v.nodes); 1952c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1953dd1a239fSMel Gorman } 19547858d7bcSFeng Tang case MPOL_LOCAL: 19557858d7bcSFeng Tang return node; 1956dc85da15SChristoph Lameter 1957dc85da15SChristoph Lameter default: 1958bea904d5SLee Schermerhorn BUG(); 1959dc85da15SChristoph Lameter } 1960dc85da15SChristoph Lameter } 1961dc85da15SChristoph Lameter 1962fee83b3aSAndrew Morton /* 1963fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1964fee83b3aSAndrew Morton * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1965fee83b3aSAndrew Morton * number of present nodes. 1966fee83b3aSAndrew Morton */ 196798c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19681da177e4SLinus Torvalds { 1969dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1970f5b087b5SDavid Rientjes unsigned target; 1971fee83b3aSAndrew Morton int i; 1972fee83b3aSAndrew Morton int nid; 19731da177e4SLinus Torvalds 1974f5b087b5SDavid Rientjes if (!nnodes) 1975f5b087b5SDavid Rientjes return numa_node_id(); 1976fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1977fee83b3aSAndrew Morton nid = first_node(pol->v.nodes); 1978fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1979dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 19801da177e4SLinus Torvalds return nid; 19811da177e4SLinus Torvalds } 19821da177e4SLinus Torvalds 19835da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19845da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19855da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19865da7ca86SChristoph Lameter { 19875da7ca86SChristoph Lameter if (vma) { 19885da7ca86SChristoph Lameter unsigned long off; 19895da7ca86SChristoph Lameter 19903b98b087SNishanth Aravamudan /* 19913b98b087SNishanth Aravamudan * for small pages, there is no difference between 19923b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 19933b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 19943b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 19953b98b087SNishanth Aravamudan * a useful offset. 19963b98b087SNishanth Aravamudan */ 19973b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 19983b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 19995da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 200098c70baaSLaurent Dufour return offset_il_node(pol, off); 20015da7ca86SChristoph Lameter } else 20025da7ca86SChristoph Lameter return interleave_nodes(pol); 20035da7ca86SChristoph Lameter } 20045da7ca86SChristoph Lameter 200500ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 2006480eccf9SLee Schermerhorn /* 200704ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 2008b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 2009b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 2010b46e14acSFabian Frederick * @gfp_flags: for requested zone 2011b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2012b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 2013480eccf9SLee Schermerhorn * 201404ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 201552cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 201652cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 201752cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 2018c0ff7453SMiao Xie * 2019d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2020480eccf9SLee Schermerhorn */ 202104ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 202204ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20235da7ca86SChristoph Lameter { 202404ec6264SVlastimil Babka int nid; 20255da7ca86SChristoph Lameter 2026dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 202719770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 20285da7ca86SChristoph Lameter 202952cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 203004ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 203104ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 203252cd3b07SLee Schermerhorn } else { 203304ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 203452cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 203552cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 2036480eccf9SLee Schermerhorn } 203704ec6264SVlastimil Babka return nid; 20385da7ca86SChristoph Lameter } 203906808b08SLee Schermerhorn 204006808b08SLee Schermerhorn /* 204106808b08SLee Schermerhorn * init_nodemask_of_mempolicy 204206808b08SLee Schermerhorn * 204306808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 204406808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 204506808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 204606808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 204706808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 204806808b08SLee Schermerhorn * of non-default mempolicy. 204906808b08SLee Schermerhorn * 205006808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 205106808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 205206808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 205306808b08SLee Schermerhorn * 205406808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 205506808b08SLee Schermerhorn */ 205606808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 205706808b08SLee Schermerhorn { 205806808b08SLee Schermerhorn struct mempolicy *mempolicy; 205906808b08SLee Schermerhorn int nid; 206006808b08SLee Schermerhorn 206106808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 206206808b08SLee Schermerhorn return false; 206306808b08SLee Schermerhorn 2064c0ff7453SMiao Xie task_lock(current); 206506808b08SLee Schermerhorn mempolicy = current->mempolicy; 206606808b08SLee Schermerhorn switch (mempolicy->mode) { 206706808b08SLee Schermerhorn case MPOL_PREFERRED: 206806808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 206906808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 207006808b08SLee Schermerhorn break; 207106808b08SLee Schermerhorn 207206808b08SLee Schermerhorn case MPOL_BIND: 207306808b08SLee Schermerhorn case MPOL_INTERLEAVE: 207406808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 207506808b08SLee Schermerhorn break; 207606808b08SLee Schermerhorn 20777858d7bcSFeng Tang case MPOL_LOCAL: 20787858d7bcSFeng Tang nid = numa_node_id(); 20797858d7bcSFeng Tang init_nodemask_of_node(mask, nid); 20807858d7bcSFeng Tang break; 20817858d7bcSFeng Tang 208206808b08SLee Schermerhorn default: 208306808b08SLee Schermerhorn BUG(); 208406808b08SLee Schermerhorn } 2085c0ff7453SMiao Xie task_unlock(current); 208606808b08SLee Schermerhorn 208706808b08SLee Schermerhorn return true; 208806808b08SLee Schermerhorn } 208900ac59adSChen, Kenneth W #endif 20905da7ca86SChristoph Lameter 20916f48d0ebSDavid Rientjes /* 2092b26e517aSFeng Tang * mempolicy_in_oom_domain 20936f48d0ebSDavid Rientjes * 2094b26e517aSFeng Tang * If tsk's mempolicy is "bind", check for intersection between mask and 2095b26e517aSFeng Tang * the policy nodemask. Otherwise, return true for all other policies 2096b26e517aSFeng Tang * including "interleave", as a tsk with "interleave" policy may have 2097b26e517aSFeng Tang * memory allocated from all nodes in system. 20986f48d0ebSDavid Rientjes * 20996f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 21006f48d0ebSDavid Rientjes */ 2101b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk, 21026f48d0ebSDavid Rientjes const nodemask_t *mask) 21036f48d0ebSDavid Rientjes { 21046f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 21056f48d0ebSDavid Rientjes bool ret = true; 21066f48d0ebSDavid Rientjes 21076f48d0ebSDavid Rientjes if (!mask) 21086f48d0ebSDavid Rientjes return ret; 2109b26e517aSFeng Tang 21106f48d0ebSDavid Rientjes task_lock(tsk); 21116f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 2112b26e517aSFeng Tang if (mempolicy && mempolicy->mode == MPOL_BIND) 21136f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 21146f48d0ebSDavid Rientjes task_unlock(tsk); 2115b26e517aSFeng Tang 21166f48d0ebSDavid Rientjes return ret; 21176f48d0ebSDavid Rientjes } 21186f48d0ebSDavid Rientjes 21191da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 21201da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2121662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2122662f3a0bSAndi Kleen unsigned nid) 21231da177e4SLinus Torvalds { 21241da177e4SLinus Torvalds struct page *page; 21251da177e4SLinus Torvalds 212684172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, nid, NULL); 21274518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21284518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21294518085eSKemi Wang return page; 2130de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2131de55c8b2SAndrey Ryabinin preempt_disable(); 2132f19298b9SMel Gorman __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2133de55c8b2SAndrey Ryabinin preempt_enable(); 2134de55c8b2SAndrey Ryabinin } 21351da177e4SLinus Torvalds return page; 21361da177e4SLinus Torvalds } 21371da177e4SLinus Torvalds 21381da177e4SLinus Torvalds /** 21390bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 2140eb350739SMatthew Wilcox (Oracle) * @gfp: GFP flags. 21410bbbc0b3SAndrea Arcangeli * @order: Order of the GFP allocation. 21421da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 2143eb350739SMatthew Wilcox (Oracle) * @addr: Virtual address of the allocation. Must be inside @vma. 2144be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 2145eb350739SMatthew Wilcox (Oracle) * @hugepage: For hugepages try only the preferred node if possible. 21461da177e4SLinus Torvalds * 2147eb350739SMatthew Wilcox (Oracle) * Allocate a page for a specific address in @vma, using the appropriate 2148eb350739SMatthew Wilcox (Oracle) * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2149eb350739SMatthew Wilcox (Oracle) * of the mm_struct of the VMA to prevent it from going away. Should be 2150eb350739SMatthew Wilcox (Oracle) * used for all allocations for pages that will be mapped into user space. 2151eb350739SMatthew Wilcox (Oracle) * 2152eb350739SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 21531da177e4SLinus Torvalds */ 2154eb350739SMatthew Wilcox (Oracle) struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 215519deb769SDavid Rientjes unsigned long addr, int node, bool hugepage) 21561da177e4SLinus Torvalds { 2157cc9a6c87SMel Gorman struct mempolicy *pol; 2158c0ff7453SMiao Xie struct page *page; 215904ec6264SVlastimil Babka int preferred_nid; 2160be97a41bSVlastimil Babka nodemask_t *nmask; 21611da177e4SLinus Torvalds 2162dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2163cc9a6c87SMel Gorman 2164be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 21651da177e4SLinus Torvalds unsigned nid; 21665da7ca86SChristoph Lameter 21678eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 216852cd3b07SLee Schermerhorn mpol_cond_put(pol); 21690bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2170be97a41bSVlastimil Babka goto out; 21711da177e4SLinus Torvalds } 21721da177e4SLinus Torvalds 217319deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 217419deb769SDavid Rientjes int hpage_node = node; 217519deb769SDavid Rientjes 217619deb769SDavid Rientjes /* 217719deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 217819deb769SDavid Rientjes * allows the current node (or other explicitly preferred 217919deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 218019deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 218119deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 218219deb769SDavid Rientjes * 218319deb769SDavid Rientjes * If the policy is interleave, or does not allow the current 218419deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 218519deb769SDavid Rientjes */ 21867858d7bcSFeng Tang if (pol->mode == MPOL_PREFERRED) 218719deb769SDavid Rientjes hpage_node = pol->v.preferred_node; 218819deb769SDavid Rientjes 218919deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 219019deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 219119deb769SDavid Rientjes mpol_cond_put(pol); 2192cc638f32SVlastimil Babka /* 2193cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2194cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2195cc638f32SVlastimil Babka */ 219619deb769SDavid Rientjes page = __alloc_pages_node(hpage_node, 2197cc638f32SVlastimil Babka gfp | __GFP_THISNODE | __GFP_NORETRY, order); 219876e654ccSDavid Rientjes 219976e654ccSDavid Rientjes /* 220076e654ccSDavid Rientjes * If hugepage allocations are configured to always 220176e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 220276e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2203cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 220476e654ccSDavid Rientjes */ 220576e654ccSDavid Rientjes if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 220676e654ccSDavid Rientjes page = __alloc_pages_node(hpage_node, 2207cc638f32SVlastimil Babka gfp, order); 220876e654ccSDavid Rientjes 220919deb769SDavid Rientjes goto out; 221019deb769SDavid Rientjes } 221119deb769SDavid Rientjes } 221219deb769SDavid Rientjes 2213077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 221404ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 221584172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, preferred_nid, nmask); 2216d51e9894SVlastimil Babka mpol_cond_put(pol); 2217be97a41bSVlastimil Babka out: 2218077fcf11SAneesh Kumar K.V return page; 2219077fcf11SAneesh Kumar K.V } 222069262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma); 2221077fcf11SAneesh Kumar K.V 22221da177e4SLinus Torvalds /** 2223d7f946d0SMatthew Wilcox (Oracle) * alloc_pages - Allocate pages. 22246421ec76SMatthew Wilcox (Oracle) * @gfp: GFP flags. 22256421ec76SMatthew Wilcox (Oracle) * @order: Power of two of number of pages to allocate. 22261da177e4SLinus Torvalds * 22276421ec76SMatthew Wilcox (Oracle) * Allocate 1 << @order contiguous pages. The physical address of the 22286421ec76SMatthew Wilcox (Oracle) * first page is naturally aligned (eg an order-3 allocation will be aligned 22296421ec76SMatthew Wilcox (Oracle) * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 22306421ec76SMatthew Wilcox (Oracle) * process is honoured when in process context. 22311da177e4SLinus Torvalds * 22326421ec76SMatthew Wilcox (Oracle) * Context: Can be called from any context, providing the appropriate GFP 22336421ec76SMatthew Wilcox (Oracle) * flags are used. 22346421ec76SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 22351da177e4SLinus Torvalds */ 2236d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order) 22371da177e4SLinus Torvalds { 22388d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2239c0ff7453SMiao Xie struct page *page; 22401da177e4SLinus Torvalds 22418d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22428d90274bSOleg Nesterov pol = get_task_policy(current); 224352cd3b07SLee Schermerhorn 224452cd3b07SLee Schermerhorn /* 224552cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 224652cd3b07SLee Schermerhorn * nor system default_policy 224752cd3b07SLee Schermerhorn */ 224845c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2249c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2250c0ff7453SMiao Xie else 225184172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, 225204ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 22535c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2254cc9a6c87SMel Gorman 2255c0ff7453SMiao Xie return page; 22561da177e4SLinus Torvalds } 2257d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages); 22581da177e4SLinus Torvalds 2259ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2260ef0855d3SOleg Nesterov { 2261ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2262ef0855d3SOleg Nesterov 2263ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2264ef0855d3SOleg Nesterov return PTR_ERR(pol); 2265ef0855d3SOleg Nesterov dst->vm_policy = pol; 2266ef0855d3SOleg Nesterov return 0; 2267ef0855d3SOleg Nesterov } 2268ef0855d3SOleg Nesterov 22694225399aSPaul Jackson /* 2270846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 22714225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 22724225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 22734225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 22744225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2275708c1bbcSMiao Xie * 2276708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2277708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 22784225399aSPaul Jackson */ 22794225399aSPaul Jackson 2280846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2281846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 22821da177e4SLinus Torvalds { 22831da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 22841da177e4SLinus Torvalds 22851da177e4SLinus Torvalds if (!new) 22861da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2287708c1bbcSMiao Xie 2288708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2289708c1bbcSMiao Xie if (old == current->mempolicy) { 2290708c1bbcSMiao Xie task_lock(current); 2291708c1bbcSMiao Xie *new = *old; 2292708c1bbcSMiao Xie task_unlock(current); 2293708c1bbcSMiao Xie } else 2294708c1bbcSMiao Xie *new = *old; 2295708c1bbcSMiao Xie 22964225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 22974225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2298213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 22994225399aSPaul Jackson } 23001da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 23011da177e4SLinus Torvalds return new; 23021da177e4SLinus Torvalds } 23031da177e4SLinus Torvalds 23041da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2305fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 23061da177e4SLinus Torvalds { 23071da177e4SLinus Torvalds if (!a || !b) 2308fcfb4dccSKOSAKI Motohiro return false; 230945c4745aSLee Schermerhorn if (a->mode != b->mode) 2310fcfb4dccSKOSAKI Motohiro return false; 231119800502SBob Liu if (a->flags != b->flags) 2312fcfb4dccSKOSAKI Motohiro return false; 231319800502SBob Liu if (mpol_store_user_nodemask(a)) 231419800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2315fcfb4dccSKOSAKI Motohiro return false; 231619800502SBob Liu 231745c4745aSLee Schermerhorn switch (a->mode) { 231819770b32SMel Gorman case MPOL_BIND: 23191da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2320fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 23211da177e4SLinus Torvalds case MPOL_PREFERRED: 232275719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 23237858d7bcSFeng Tang case MPOL_LOCAL: 23247858d7bcSFeng Tang return true; 23251da177e4SLinus Torvalds default: 23261da177e4SLinus Torvalds BUG(); 2327fcfb4dccSKOSAKI Motohiro return false; 23281da177e4SLinus Torvalds } 23291da177e4SLinus Torvalds } 23301da177e4SLinus Torvalds 23311da177e4SLinus Torvalds /* 23321da177e4SLinus Torvalds * Shared memory backing store policy support. 23331da177e4SLinus Torvalds * 23341da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 23351da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 23364a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 23371da177e4SLinus Torvalds * for any accesses to the tree. 23381da177e4SLinus Torvalds */ 23391da177e4SLinus Torvalds 23404a8c7bb5SNathan Zimmer /* 23414a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 23424a8c7bb5SNathan Zimmer * reading or for writing 23434a8c7bb5SNathan Zimmer */ 23441da177e4SLinus Torvalds static struct sp_node * 23451da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 23461da177e4SLinus Torvalds { 23471da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 23481da177e4SLinus Torvalds 23491da177e4SLinus Torvalds while (n) { 23501da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 23511da177e4SLinus Torvalds 23521da177e4SLinus Torvalds if (start >= p->end) 23531da177e4SLinus Torvalds n = n->rb_right; 23541da177e4SLinus Torvalds else if (end <= p->start) 23551da177e4SLinus Torvalds n = n->rb_left; 23561da177e4SLinus Torvalds else 23571da177e4SLinus Torvalds break; 23581da177e4SLinus Torvalds } 23591da177e4SLinus Torvalds if (!n) 23601da177e4SLinus Torvalds return NULL; 23611da177e4SLinus Torvalds for (;;) { 23621da177e4SLinus Torvalds struct sp_node *w = NULL; 23631da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 23641da177e4SLinus Torvalds if (!prev) 23651da177e4SLinus Torvalds break; 23661da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 23671da177e4SLinus Torvalds if (w->end <= start) 23681da177e4SLinus Torvalds break; 23691da177e4SLinus Torvalds n = prev; 23701da177e4SLinus Torvalds } 23711da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 23721da177e4SLinus Torvalds } 23731da177e4SLinus Torvalds 23744a8c7bb5SNathan Zimmer /* 23754a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 23764a8c7bb5SNathan Zimmer * writing. 23774a8c7bb5SNathan Zimmer */ 23781da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 23791da177e4SLinus Torvalds { 23801da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 23811da177e4SLinus Torvalds struct rb_node *parent = NULL; 23821da177e4SLinus Torvalds struct sp_node *nd; 23831da177e4SLinus Torvalds 23841da177e4SLinus Torvalds while (*p) { 23851da177e4SLinus Torvalds parent = *p; 23861da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 23871da177e4SLinus Torvalds if (new->start < nd->start) 23881da177e4SLinus Torvalds p = &(*p)->rb_left; 23891da177e4SLinus Torvalds else if (new->end > nd->end) 23901da177e4SLinus Torvalds p = &(*p)->rb_right; 23911da177e4SLinus Torvalds else 23921da177e4SLinus Torvalds BUG(); 23931da177e4SLinus Torvalds } 23941da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 23951da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2396140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 239745c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 23981da177e4SLinus Torvalds } 23991da177e4SLinus Torvalds 24001da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 24011da177e4SLinus Torvalds struct mempolicy * 24021da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 24031da177e4SLinus Torvalds { 24041da177e4SLinus Torvalds struct mempolicy *pol = NULL; 24051da177e4SLinus Torvalds struct sp_node *sn; 24061da177e4SLinus Torvalds 24071da177e4SLinus Torvalds if (!sp->root.rb_node) 24081da177e4SLinus Torvalds return NULL; 24094a8c7bb5SNathan Zimmer read_lock(&sp->lock); 24101da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 24111da177e4SLinus Torvalds if (sn) { 24121da177e4SLinus Torvalds mpol_get(sn->policy); 24131da177e4SLinus Torvalds pol = sn->policy; 24141da177e4SLinus Torvalds } 24154a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 24161da177e4SLinus Torvalds return pol; 24171da177e4SLinus Torvalds } 24181da177e4SLinus Torvalds 241963f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 242063f74ca2SKOSAKI Motohiro { 242163f74ca2SKOSAKI Motohiro mpol_put(n->policy); 242263f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 242363f74ca2SKOSAKI Motohiro } 242463f74ca2SKOSAKI Motohiro 2425771fb4d8SLee Schermerhorn /** 2426771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2427771fb4d8SLee Schermerhorn * 2428b46e14acSFabian Frederick * @page: page to be checked 2429b46e14acSFabian Frederick * @vma: vm area where page mapped 2430b46e14acSFabian Frederick * @addr: virtual address where page mapped 2431771fb4d8SLee Schermerhorn * 2432771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 24335f076944SMatthew Wilcox (Oracle) * node id. Policy determination "mimics" alloc_page_vma(). 2434771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 24355f076944SMatthew Wilcox (Oracle) * 24365f076944SMatthew Wilcox (Oracle) * Return: -1 if the page is in a node that is valid for this policy, or a 24375f076944SMatthew Wilcox (Oracle) * suitable node ID to allocate a replacement page from. 2438771fb4d8SLee Schermerhorn */ 2439771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2440771fb4d8SLee Schermerhorn { 2441771fb4d8SLee Schermerhorn struct mempolicy *pol; 2442c33d6c06SMel Gorman struct zoneref *z; 2443771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2444771fb4d8SLee Schermerhorn unsigned long pgoff; 244590572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 244690572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 244798fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2448771fb4d8SLee Schermerhorn int ret = -1; 2449771fb4d8SLee Schermerhorn 2450dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2451771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2452771fb4d8SLee Schermerhorn goto out; 2453771fb4d8SLee Schermerhorn 2454771fb4d8SLee Schermerhorn switch (pol->mode) { 2455771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2456771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2457771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 245898c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2459771fb4d8SLee Schermerhorn break; 2460771fb4d8SLee Schermerhorn 2461771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2462771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2463771fb4d8SLee Schermerhorn break; 2464771fb4d8SLee Schermerhorn 24657858d7bcSFeng Tang case MPOL_LOCAL: 24667858d7bcSFeng Tang polnid = numa_node_id(); 24677858d7bcSFeng Tang break; 24687858d7bcSFeng Tang 2469771fb4d8SLee Schermerhorn case MPOL_BIND: 2470bda420b9SHuang Ying /* Optimize placement among multiple nodes via NUMA balancing */ 2471bda420b9SHuang Ying if (pol->flags & MPOL_F_MORON) { 2472bda420b9SHuang Ying if (node_isset(thisnid, pol->v.nodes)) 2473bda420b9SHuang Ying break; 2474bda420b9SHuang Ying goto out; 2475bda420b9SHuang Ying } 2476c33d6c06SMel Gorman 2477771fb4d8SLee Schermerhorn /* 2478771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2479771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2480771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2481771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2482771fb4d8SLee Schermerhorn */ 2483771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2484771fb4d8SLee Schermerhorn goto out; 2485c33d6c06SMel Gorman z = first_zones_zonelist( 2486771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2487771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2488c33d6c06SMel Gorman &pol->v.nodes); 2489c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2490771fb4d8SLee Schermerhorn break; 2491771fb4d8SLee Schermerhorn 2492771fb4d8SLee Schermerhorn default: 2493771fb4d8SLee Schermerhorn BUG(); 2494771fb4d8SLee Schermerhorn } 24955606e387SMel Gorman 24965606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2497e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 249890572890SPeter Zijlstra polnid = thisnid; 24995606e387SMel Gorman 250010f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2501de1c9ce6SRik van Riel goto out; 2502de1c9ce6SRik van Riel } 2503e42c8ff2SMel Gorman 2504771fb4d8SLee Schermerhorn if (curnid != polnid) 2505771fb4d8SLee Schermerhorn ret = polnid; 2506771fb4d8SLee Schermerhorn out: 2507771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2508771fb4d8SLee Schermerhorn 2509771fb4d8SLee Schermerhorn return ret; 2510771fb4d8SLee Schermerhorn } 2511771fb4d8SLee Schermerhorn 2512c11600e4SDavid Rientjes /* 2513c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2514c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2515c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2516c11600e4SDavid Rientjes * policy. 2517c11600e4SDavid Rientjes */ 2518c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2519c11600e4SDavid Rientjes { 2520c11600e4SDavid Rientjes struct mempolicy *pol; 2521c11600e4SDavid Rientjes 2522c11600e4SDavid Rientjes task_lock(task); 2523c11600e4SDavid Rientjes pol = task->mempolicy; 2524c11600e4SDavid Rientjes task->mempolicy = NULL; 2525c11600e4SDavid Rientjes task_unlock(task); 2526c11600e4SDavid Rientjes mpol_put(pol); 2527c11600e4SDavid Rientjes } 2528c11600e4SDavid Rientjes 25291da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 25301da177e4SLinus Torvalds { 2531140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 25321da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 253363f74ca2SKOSAKI Motohiro sp_free(n); 25341da177e4SLinus Torvalds } 25351da177e4SLinus Torvalds 253642288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 253742288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 253842288fe3SMel Gorman { 253942288fe3SMel Gorman node->start = start; 254042288fe3SMel Gorman node->end = end; 254142288fe3SMel Gorman node->policy = pol; 254242288fe3SMel Gorman } 254342288fe3SMel Gorman 2544dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2545dbcb0f19SAdrian Bunk struct mempolicy *pol) 25461da177e4SLinus Torvalds { 2547869833f2SKOSAKI Motohiro struct sp_node *n; 2548869833f2SKOSAKI Motohiro struct mempolicy *newpol; 25491da177e4SLinus Torvalds 2550869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 25511da177e4SLinus Torvalds if (!n) 25521da177e4SLinus Torvalds return NULL; 2553869833f2SKOSAKI Motohiro 2554869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2555869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2556869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2557869833f2SKOSAKI Motohiro return NULL; 2558869833f2SKOSAKI Motohiro } 2559869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 256042288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2561869833f2SKOSAKI Motohiro 25621da177e4SLinus Torvalds return n; 25631da177e4SLinus Torvalds } 25641da177e4SLinus Torvalds 25651da177e4SLinus Torvalds /* Replace a policy range. */ 25661da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 25671da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 25681da177e4SLinus Torvalds { 2569b22d127aSMel Gorman struct sp_node *n; 257042288fe3SMel Gorman struct sp_node *n_new = NULL; 257142288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2572b22d127aSMel Gorman int ret = 0; 25731da177e4SLinus Torvalds 257442288fe3SMel Gorman restart: 25754a8c7bb5SNathan Zimmer write_lock(&sp->lock); 25761da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 25771da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 25781da177e4SLinus Torvalds while (n && n->start < end) { 25791da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 25801da177e4SLinus Torvalds if (n->start >= start) { 25811da177e4SLinus Torvalds if (n->end <= end) 25821da177e4SLinus Torvalds sp_delete(sp, n); 25831da177e4SLinus Torvalds else 25841da177e4SLinus Torvalds n->start = end; 25851da177e4SLinus Torvalds } else { 25861da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 25871da177e4SLinus Torvalds if (n->end > end) { 258842288fe3SMel Gorman if (!n_new) 258942288fe3SMel Gorman goto alloc_new; 259042288fe3SMel Gorman 259142288fe3SMel Gorman *mpol_new = *n->policy; 259242288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 25937880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 25941da177e4SLinus Torvalds n->end = start; 25955ca39575SHillf Danton sp_insert(sp, n_new); 259642288fe3SMel Gorman n_new = NULL; 259742288fe3SMel Gorman mpol_new = NULL; 25981da177e4SLinus Torvalds break; 25991da177e4SLinus Torvalds } else 26001da177e4SLinus Torvalds n->end = start; 26011da177e4SLinus Torvalds } 26021da177e4SLinus Torvalds if (!next) 26031da177e4SLinus Torvalds break; 26041da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 26051da177e4SLinus Torvalds } 26061da177e4SLinus Torvalds if (new) 26071da177e4SLinus Torvalds sp_insert(sp, new); 26084a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 260942288fe3SMel Gorman ret = 0; 261042288fe3SMel Gorman 261142288fe3SMel Gorman err_out: 261242288fe3SMel Gorman if (mpol_new) 261342288fe3SMel Gorman mpol_put(mpol_new); 261442288fe3SMel Gorman if (n_new) 261542288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 261642288fe3SMel Gorman 2617b22d127aSMel Gorman return ret; 261842288fe3SMel Gorman 261942288fe3SMel Gorman alloc_new: 26204a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 262142288fe3SMel Gorman ret = -ENOMEM; 262242288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 262342288fe3SMel Gorman if (!n_new) 262442288fe3SMel Gorman goto err_out; 262542288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 262642288fe3SMel Gorman if (!mpol_new) 262742288fe3SMel Gorman goto err_out; 262842288fe3SMel Gorman goto restart; 26291da177e4SLinus Torvalds } 26301da177e4SLinus Torvalds 263171fe804bSLee Schermerhorn /** 263271fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 263371fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 263471fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 263571fe804bSLee Schermerhorn * 263671fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 263771fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 263871fe804bSLee Schermerhorn * This must be released on exit. 26394bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 264071fe804bSLee Schermerhorn */ 264171fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 26427339ff83SRobin Holt { 264358568d2aSMiao Xie int ret; 264458568d2aSMiao Xie 264571fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 26464a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 26477339ff83SRobin Holt 264871fe804bSLee Schermerhorn if (mpol) { 26497339ff83SRobin Holt struct vm_area_struct pvma; 265071fe804bSLee Schermerhorn struct mempolicy *new; 26514bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 26527339ff83SRobin Holt 26534bfc4495SKAMEZAWA Hiroyuki if (!scratch) 26545c0c1654SLee Schermerhorn goto put_mpol; 265571fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 265671fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 265715d77835SLee Schermerhorn if (IS_ERR(new)) 26580cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 265958568d2aSMiao Xie 266058568d2aSMiao Xie task_lock(current); 26614bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 266258568d2aSMiao Xie task_unlock(current); 266315d77835SLee Schermerhorn if (ret) 26645c0c1654SLee Schermerhorn goto put_new; 266571fe804bSLee Schermerhorn 266671fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 26672c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 266871fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 266971fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 267015d77835SLee Schermerhorn 26715c0c1654SLee Schermerhorn put_new: 267271fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 26730cae3457SDan Carpenter free_scratch: 26744bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 26755c0c1654SLee Schermerhorn put_mpol: 26765c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 26777339ff83SRobin Holt } 26787339ff83SRobin Holt } 26797339ff83SRobin Holt 26801da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 26811da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 26821da177e4SLinus Torvalds { 26831da177e4SLinus Torvalds int err; 26841da177e4SLinus Torvalds struct sp_node *new = NULL; 26851da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 26861da177e4SLinus Torvalds 2687028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 26881da177e4SLinus Torvalds vma->vm_pgoff, 268945c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2690028fec41SDavid Rientjes npol ? npol->flags : -1, 269100ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 26921da177e4SLinus Torvalds 26931da177e4SLinus Torvalds if (npol) { 26941da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 26951da177e4SLinus Torvalds if (!new) 26961da177e4SLinus Torvalds return -ENOMEM; 26971da177e4SLinus Torvalds } 26981da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 26991da177e4SLinus Torvalds if (err && new) 270063f74ca2SKOSAKI Motohiro sp_free(new); 27011da177e4SLinus Torvalds return err; 27021da177e4SLinus Torvalds } 27031da177e4SLinus Torvalds 27041da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 27051da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 27061da177e4SLinus Torvalds { 27071da177e4SLinus Torvalds struct sp_node *n; 27081da177e4SLinus Torvalds struct rb_node *next; 27091da177e4SLinus Torvalds 27101da177e4SLinus Torvalds if (!p->root.rb_node) 27111da177e4SLinus Torvalds return; 27124a8c7bb5SNathan Zimmer write_lock(&p->lock); 27131da177e4SLinus Torvalds next = rb_first(&p->root); 27141da177e4SLinus Torvalds while (next) { 27151da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27161da177e4SLinus Torvalds next = rb_next(&n->nd); 271763f74ca2SKOSAKI Motohiro sp_delete(p, n); 27181da177e4SLinus Torvalds } 27194a8c7bb5SNathan Zimmer write_unlock(&p->lock); 27201da177e4SLinus Torvalds } 27211da177e4SLinus Torvalds 27221a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2723c297663cSMel Gorman static int __initdata numabalancing_override; 27241a687c2eSMel Gorman 27251a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 27261a687c2eSMel Gorman { 27271a687c2eSMel Gorman bool numabalancing_default = false; 27281a687c2eSMel Gorman 27291a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 27301a687c2eSMel Gorman numabalancing_default = true; 27311a687c2eSMel Gorman 2732c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2733c297663cSMel Gorman if (numabalancing_override) 2734c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2735c297663cSMel Gorman 2736b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2737756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2738c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 27391a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 27401a687c2eSMel Gorman } 27411a687c2eSMel Gorman } 27421a687c2eSMel Gorman 27431a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 27441a687c2eSMel Gorman { 27451a687c2eSMel Gorman int ret = 0; 27461a687c2eSMel Gorman if (!str) 27471a687c2eSMel Gorman goto out; 27481a687c2eSMel Gorman 27491a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2750c297663cSMel Gorman numabalancing_override = 1; 27511a687c2eSMel Gorman ret = 1; 27521a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2753c297663cSMel Gorman numabalancing_override = -1; 27541a687c2eSMel Gorman ret = 1; 27551a687c2eSMel Gorman } 27561a687c2eSMel Gorman out: 27571a687c2eSMel Gorman if (!ret) 27584a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 27591a687c2eSMel Gorman 27601a687c2eSMel Gorman return ret; 27611a687c2eSMel Gorman } 27621a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 27631a687c2eSMel Gorman #else 27641a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 27651a687c2eSMel Gorman { 27661a687c2eSMel Gorman } 27671a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 27681a687c2eSMel Gorman 27691da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 27701da177e4SLinus Torvalds void __init numa_policy_init(void) 27711da177e4SLinus Torvalds { 2772b71636e2SPaul Mundt nodemask_t interleave_nodes; 2773b71636e2SPaul Mundt unsigned long largest = 0; 2774b71636e2SPaul Mundt int nid, prefer = 0; 2775b71636e2SPaul Mundt 27761da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 27771da177e4SLinus Torvalds sizeof(struct mempolicy), 277820c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27791da177e4SLinus Torvalds 27801da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 27811da177e4SLinus Torvalds sizeof(struct sp_node), 278220c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27831da177e4SLinus Torvalds 27845606e387SMel Gorman for_each_node(nid) { 27855606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 27865606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 27875606e387SMel Gorman .mode = MPOL_PREFERRED, 27885606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 27895606e387SMel Gorman .v = { .preferred_node = nid, }, 27905606e387SMel Gorman }; 27915606e387SMel Gorman } 27925606e387SMel Gorman 2793b71636e2SPaul Mundt /* 2794b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2795b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2796b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2797b71636e2SPaul Mundt */ 2798b71636e2SPaul Mundt nodes_clear(interleave_nodes); 279901f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2800b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 28011da177e4SLinus Torvalds 2802b71636e2SPaul Mundt /* Preserve the largest node */ 2803b71636e2SPaul Mundt if (largest < total_pages) { 2804b71636e2SPaul Mundt largest = total_pages; 2805b71636e2SPaul Mundt prefer = nid; 2806b71636e2SPaul Mundt } 2807b71636e2SPaul Mundt 2808b71636e2SPaul Mundt /* Interleave this node? */ 2809b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2810b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2811b71636e2SPaul Mundt } 2812b71636e2SPaul Mundt 2813b71636e2SPaul Mundt /* All too small, use the largest */ 2814b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2815b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2816b71636e2SPaul Mundt 2817028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2818b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 28191a687c2eSMel Gorman 28201a687c2eSMel Gorman check_numabalancing_enable(); 28211da177e4SLinus Torvalds } 28221da177e4SLinus Torvalds 28238bccd85fSChristoph Lameter /* Reset policy of current process to default */ 28241da177e4SLinus Torvalds void numa_default_policy(void) 28251da177e4SLinus Torvalds { 2826028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 28271da177e4SLinus Torvalds } 282868860ec1SPaul Jackson 28294225399aSPaul Jackson /* 2830095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2831095f1fc4SLee Schermerhorn */ 2832095f1fc4SLee Schermerhorn 2833345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2834345ace9cSLee Schermerhorn { 2835345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2836345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2837345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2838345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2839d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2840345ace9cSLee Schermerhorn }; 28411a75a6c8SChristoph Lameter 2842095f1fc4SLee Schermerhorn 2843095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2844095f1fc4SLee Schermerhorn /** 2845f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2846095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 284771fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2848095f1fc4SLee Schermerhorn * 2849095f1fc4SLee Schermerhorn * Format of input: 2850095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2851095f1fc4SLee Schermerhorn * 285271fe804bSLee Schermerhorn * On success, returns 0, else 1 2853095f1fc4SLee Schermerhorn */ 2854a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2855095f1fc4SLee Schermerhorn { 285671fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2857f2a07f40SHugh Dickins unsigned short mode_flags; 285871fe804bSLee Schermerhorn nodemask_t nodes; 2859095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2860095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2861dedf2c73Szhong jiang int err = 1, mode; 2862095f1fc4SLee Schermerhorn 2863c7a91bc7SDan Carpenter if (flags) 2864c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 2865c7a91bc7SDan Carpenter 2866095f1fc4SLee Schermerhorn if (nodelist) { 2867095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2868095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 286971fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2870095f1fc4SLee Schermerhorn goto out; 287101f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2872095f1fc4SLee Schermerhorn goto out; 287371fe804bSLee Schermerhorn } else 287471fe804bSLee Schermerhorn nodes_clear(nodes); 287571fe804bSLee Schermerhorn 2876dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 2877dedf2c73Szhong jiang if (mode < 0) 2878095f1fc4SLee Schermerhorn goto out; 2879095f1fc4SLee Schermerhorn 288071fe804bSLee Schermerhorn switch (mode) { 2881095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 288271fe804bSLee Schermerhorn /* 2883aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 2884aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 2885aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 288671fe804bSLee Schermerhorn */ 2887095f1fc4SLee Schermerhorn if (nodelist) { 2888095f1fc4SLee Schermerhorn char *rest = nodelist; 2889095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2890095f1fc4SLee Schermerhorn rest++; 2891926f2ae0SKOSAKI Motohiro if (*rest) 2892926f2ae0SKOSAKI Motohiro goto out; 2893aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 2894aa9f7d51SRandy Dunlap goto out; 2895095f1fc4SLee Schermerhorn } 2896095f1fc4SLee Schermerhorn break; 2897095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2898095f1fc4SLee Schermerhorn /* 2899095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2900095f1fc4SLee Schermerhorn */ 2901095f1fc4SLee Schermerhorn if (!nodelist) 290201f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 29033f226aa1SLee Schermerhorn break; 290471fe804bSLee Schermerhorn case MPOL_LOCAL: 29053f226aa1SLee Schermerhorn /* 290671fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 29073f226aa1SLee Schermerhorn */ 290871fe804bSLee Schermerhorn if (nodelist) 29093f226aa1SLee Schermerhorn goto out; 29103f226aa1SLee Schermerhorn break; 2911413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2912413b43deSRavikiran G Thirumalai /* 2913413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2914413b43deSRavikiran G Thirumalai */ 2915413b43deSRavikiran G Thirumalai if (!nodelist) 2916413b43deSRavikiran G Thirumalai err = 0; 2917413b43deSRavikiran G Thirumalai goto out; 2918d69b2e63SKOSAKI Motohiro case MPOL_BIND: 291971fe804bSLee Schermerhorn /* 2920d69b2e63SKOSAKI Motohiro * Insist on a nodelist 292171fe804bSLee Schermerhorn */ 2922d69b2e63SKOSAKI Motohiro if (!nodelist) 2923d69b2e63SKOSAKI Motohiro goto out; 2924095f1fc4SLee Schermerhorn } 2925095f1fc4SLee Schermerhorn 292671fe804bSLee Schermerhorn mode_flags = 0; 2927095f1fc4SLee Schermerhorn if (flags) { 2928095f1fc4SLee Schermerhorn /* 2929095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2930095f1fc4SLee Schermerhorn * mode flags. 2931095f1fc4SLee Schermerhorn */ 2932095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 293371fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2934095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 293571fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2936095f1fc4SLee Schermerhorn else 2937926f2ae0SKOSAKI Motohiro goto out; 2938095f1fc4SLee Schermerhorn } 293971fe804bSLee Schermerhorn 294071fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 294171fe804bSLee Schermerhorn if (IS_ERR(new)) 2942926f2ae0SKOSAKI Motohiro goto out; 2943926f2ae0SKOSAKI Motohiro 2944f2a07f40SHugh Dickins /* 2945f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2946f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2947f2a07f40SHugh Dickins */ 2948f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2949f2a07f40SHugh Dickins new->v.nodes = nodes; 2950f2a07f40SHugh Dickins else if (nodelist) 2951f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2952f2a07f40SHugh Dickins else 29537858d7bcSFeng Tang new->mode = MPOL_LOCAL; 2954f2a07f40SHugh Dickins 2955f2a07f40SHugh Dickins /* 2956f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2957f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2958f2a07f40SHugh Dickins */ 2959e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2960f2a07f40SHugh Dickins 2961926f2ae0SKOSAKI Motohiro err = 0; 296271fe804bSLee Schermerhorn 2963095f1fc4SLee Schermerhorn out: 2964095f1fc4SLee Schermerhorn /* Restore string for error message */ 2965095f1fc4SLee Schermerhorn if (nodelist) 2966095f1fc4SLee Schermerhorn *--nodelist = ':'; 2967095f1fc4SLee Schermerhorn if (flags) 2968095f1fc4SLee Schermerhorn *--flags = '='; 296971fe804bSLee Schermerhorn if (!err) 297071fe804bSLee Schermerhorn *mpol = new; 2971095f1fc4SLee Schermerhorn return err; 2972095f1fc4SLee Schermerhorn } 2973095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2974095f1fc4SLee Schermerhorn 297571fe804bSLee Schermerhorn /** 297671fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 297771fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 297871fe804bSLee Schermerhorn * @maxlen: length of @buffer 297971fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 298071fe804bSLee Schermerhorn * 2981948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2982948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2983948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 29841a75a6c8SChristoph Lameter */ 2985948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 29861a75a6c8SChristoph Lameter { 29871a75a6c8SChristoph Lameter char *p = buffer; 2988948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2989948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2990948927eeSDavid Rientjes unsigned short flags = 0; 29911a75a6c8SChristoph Lameter 29928790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2993bea904d5SLee Schermerhorn mode = pol->mode; 2994948927eeSDavid Rientjes flags = pol->flags; 2995948927eeSDavid Rientjes } 2996bea904d5SLee Schermerhorn 29971a75a6c8SChristoph Lameter switch (mode) { 29981a75a6c8SChristoph Lameter case MPOL_DEFAULT: 29997858d7bcSFeng Tang case MPOL_LOCAL: 30001a75a6c8SChristoph Lameter break; 30011a75a6c8SChristoph Lameter case MPOL_PREFERRED: 3002fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 30031a75a6c8SChristoph Lameter break; 30041a75a6c8SChristoph Lameter case MPOL_BIND: 30051a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 30061a75a6c8SChristoph Lameter nodes = pol->v.nodes; 30071a75a6c8SChristoph Lameter break; 30081a75a6c8SChristoph Lameter default: 3009948927eeSDavid Rientjes WARN_ON_ONCE(1); 3010948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 3011948927eeSDavid Rientjes return; 30121a75a6c8SChristoph Lameter } 30131a75a6c8SChristoph Lameter 3014b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 30151a75a6c8SChristoph Lameter 3016fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3017948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3018f5b087b5SDavid Rientjes 30192291990aSLee Schermerhorn /* 30202291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 30212291990aSLee Schermerhorn */ 3022f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 30232291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 30242291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 30252291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3026f5b087b5SDavid Rientjes } 3027f5b087b5SDavid Rientjes 30289e763e0fSTejun Heo if (!nodes_empty(nodes)) 30299e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 30309e763e0fSTejun Heo nodemask_pr_args(&nodes)); 30311a75a6c8SChristoph Lameter } 3032