146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 34b27abaccSDave Hansen * preferred many Try a set of nodes first before normal fallback. This is 35b27abaccSDave Hansen * similar to preferred without the special case. 36b27abaccSDave Hansen * 371da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 381da177e4SLinus Torvalds * use the process policy. This is what Linux always did 391da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 401da177e4SLinus Torvalds * 411da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 421da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 431da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 441da177e4SLinus Torvalds * allocations for a VMA in the VM. 451da177e4SLinus Torvalds * 461da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 471da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 481da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 491da177e4SLinus Torvalds * 501da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 511da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 521da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 531da177e4SLinus Torvalds * Same with GFP_DMA allocations. 541da177e4SLinus Torvalds * 551da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 561da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 571da177e4SLinus Torvalds */ 581da177e4SLinus Torvalds 591da177e4SLinus Torvalds /* Notebook: 601da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 611da177e4SLinus Torvalds object 621da177e4SLinus Torvalds statistics for bigpages 631da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 641da177e4SLinus Torvalds first item above. 651da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 661da177e4SLinus Torvalds grows down? 671da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 681da177e4SLinus Torvalds kernel is not always grateful with that. 691da177e4SLinus Torvalds */ 701da177e4SLinus Torvalds 71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72b1de0d13SMitchel Humpherys 731da177e4SLinus Torvalds #include <linux/mempolicy.h> 74a520110eSChristoph Hellwig #include <linux/pagewalk.h> 751da177e4SLinus Torvalds #include <linux/highmem.h> 761da177e4SLinus Torvalds #include <linux/hugetlb.h> 771da177e4SLinus Torvalds #include <linux/kernel.h> 781da177e4SLinus Torvalds #include <linux/sched.h> 796e84f315SIngo Molnar #include <linux/sched/mm.h> 806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 81f719ff9bSIngo Molnar #include <linux/sched/task.h> 821da177e4SLinus Torvalds #include <linux/nodemask.h> 831da177e4SLinus Torvalds #include <linux/cpuset.h> 841da177e4SLinus Torvalds #include <linux/slab.h> 851da177e4SLinus Torvalds #include <linux/string.h> 86b95f1b31SPaul Gortmaker #include <linux/export.h> 87b488893aSPavel Emelyanov #include <linux/nsproxy.h> 881da177e4SLinus Torvalds #include <linux/interrupt.h> 891da177e4SLinus Torvalds #include <linux/init.h> 901da177e4SLinus Torvalds #include <linux/compat.h> 9131367466SOtto Ebeling #include <linux/ptrace.h> 92dc9aa5b9SChristoph Lameter #include <linux/swap.h> 931a75a6c8SChristoph Lameter #include <linux/seq_file.h> 941a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 95b20a3503SChristoph Lameter #include <linux/migrate.h> 9662b61f61SHugh Dickins #include <linux/ksm.h> 9795a402c3SChristoph Lameter #include <linux/rmap.h> 9886c3a764SDavid Quigley #include <linux/security.h> 99dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 100095f1fc4SLee Schermerhorn #include <linux/ctype.h> 1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 103b1de0d13SMitchel Humpherys #include <linux/printk.h> 104c8633798SNaoya Horiguchi #include <linux/swapops.h> 105dc9aa5b9SChristoph Lameter 1061da177e4SLinus Torvalds #include <asm/tlbflush.h> 1074a18419fSNadav Amit #include <asm/tlb.h> 1087c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1091da177e4SLinus Torvalds 11062695a84SNick Piggin #include "internal.h" 11162695a84SNick Piggin 11238e35860SChristoph Lameter /* Internal flags */ 113dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11438e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 115dc9aa5b9SChristoph Lameter 116fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 117fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1181da177e4SLinus Torvalds 1191da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1201da177e4SLinus Torvalds policied. */ 1216267276fSChristoph Lameter enum zone_type policy_zone = 0; 1221da177e4SLinus Torvalds 123bea904d5SLee Schermerhorn /* 124bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 125bea904d5SLee Schermerhorn */ 126e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1271da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 1287858d7bcSFeng Tang .mode = MPOL_LOCAL, 1291da177e4SLinus Torvalds }; 1301da177e4SLinus Torvalds 1315606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1325606e387SMel Gorman 133b2ca916cSDan Williams /** 134b2ca916cSDan Williams * numa_map_to_online_node - Find closest online node 135f6e92f40SKrzysztof Kozlowski * @node: Node id to start the search 136b2ca916cSDan Williams * 137b2ca916cSDan Williams * Lookup the next closest node by distance if @nid is not online. 138dad5b023SRandy Dunlap * 139dad5b023SRandy Dunlap * Return: this @node if it is online, otherwise the closest node by distance 140b2ca916cSDan Williams */ 141b2ca916cSDan Williams int numa_map_to_online_node(int node) 142b2ca916cSDan Williams { 1434fcbe96eSDan Williams int min_dist = INT_MAX, dist, n, min_node; 144b2ca916cSDan Williams 1454fcbe96eSDan Williams if (node == NUMA_NO_NODE || node_online(node)) 1464fcbe96eSDan Williams return node; 147b2ca916cSDan Williams 148b2ca916cSDan Williams min_node = node; 149b2ca916cSDan Williams for_each_online_node(n) { 150b2ca916cSDan Williams dist = node_distance(node, n); 151b2ca916cSDan Williams if (dist < min_dist) { 152b2ca916cSDan Williams min_dist = dist; 153b2ca916cSDan Williams min_node = n; 154b2ca916cSDan Williams } 155b2ca916cSDan Williams } 156b2ca916cSDan Williams 157b2ca916cSDan Williams return min_node; 158b2ca916cSDan Williams } 159b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node); 160b2ca916cSDan Williams 16174d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1625606e387SMel Gorman { 1635606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 164f15ca78eSOleg Nesterov int node; 1655606e387SMel Gorman 166f15ca78eSOleg Nesterov if (pol) 167f15ca78eSOleg Nesterov return pol; 1685606e387SMel Gorman 169f15ca78eSOleg Nesterov node = numa_node_id(); 1701da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1711da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 172f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 173f15ca78eSOleg Nesterov if (pol->mode) 174f15ca78eSOleg Nesterov return pol; 1751da6f0e1SJianguo Wu } 1765606e387SMel Gorman 177f15ca78eSOleg Nesterov return &default_policy; 1785606e387SMel Gorman } 1795606e387SMel Gorman 18037012946SDavid Rientjes static const struct mempolicy_operations { 18137012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 182213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 18337012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 18437012946SDavid Rientjes 185f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 186f5b087b5SDavid Rientjes { 1876d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1884c50bc01SDavid Rientjes } 1894c50bc01SDavid Rientjes 1904c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1914c50bc01SDavid Rientjes const nodemask_t *rel) 1924c50bc01SDavid Rientjes { 1934c50bc01SDavid Rientjes nodemask_t tmp; 1944c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1954c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 196f5b087b5SDavid Rientjes } 197f5b087b5SDavid Rientjes 198be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 19937012946SDavid Rientjes { 20037012946SDavid Rientjes if (nodes_empty(*nodes)) 20137012946SDavid Rientjes return -EINVAL; 202269fbe72SBen Widawsky pol->nodes = *nodes; 20337012946SDavid Rientjes return 0; 20437012946SDavid Rientjes } 20537012946SDavid Rientjes 20637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 20737012946SDavid Rientjes { 2087858d7bcSFeng Tang if (nodes_empty(*nodes)) 2097858d7bcSFeng Tang return -EINVAL; 210269fbe72SBen Widawsky 211269fbe72SBen Widawsky nodes_clear(pol->nodes); 212269fbe72SBen Widawsky node_set(first_node(*nodes), pol->nodes); 21337012946SDavid Rientjes return 0; 21437012946SDavid Rientjes } 21537012946SDavid Rientjes 21658568d2aSMiao Xie /* 21758568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 21858568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 2197858d7bcSFeng Tang * parameter with respect to the policy mode and flags. 22058568d2aSMiao Xie * 22158568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 222c1e8d7c6SMichel Lespinasse * and mempolicy. May also be called holding the mmap_lock for write. 22358568d2aSMiao Xie */ 2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2254bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22658568d2aSMiao Xie { 22758568d2aSMiao Xie int ret; 22858568d2aSMiao Xie 2297858d7bcSFeng Tang /* 2307858d7bcSFeng Tang * Default (pol==NULL) resp. local memory policies are not a 2317858d7bcSFeng Tang * subject of any remapping. They also do not need any special 2327858d7bcSFeng Tang * constructor. 2337858d7bcSFeng Tang */ 2347858d7bcSFeng Tang if (!pol || pol->mode == MPOL_LOCAL) 23558568d2aSMiao Xie return 0; 2367858d7bcSFeng Tang 23701f13bd6SLai Jiangshan /* Check N_MEMORY */ 2384bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 23901f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 24058568d2aSMiao Xie 24158568d2aSMiao Xie VM_BUG_ON(!nodes); 2427858d7bcSFeng Tang 24358568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2444bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 24558568d2aSMiao Xie else 2464bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2474bfc4495SKAMEZAWA Hiroyuki 24858568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 24958568d2aSMiao Xie pol->w.user_nodemask = *nodes; 25058568d2aSMiao Xie else 2517858d7bcSFeng Tang pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 25258568d2aSMiao Xie 2534bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 25458568d2aSMiao Xie return ret; 25558568d2aSMiao Xie } 25658568d2aSMiao Xie 25758568d2aSMiao Xie /* 25858568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25958568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 26058568d2aSMiao Xie */ 261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 262028fec41SDavid Rientjes nodemask_t *nodes) 2631da177e4SLinus Torvalds { 2641da177e4SLinus Torvalds struct mempolicy *policy; 2651da177e4SLinus Torvalds 266028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26700ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 268140d5a49SPaul Mundt 2693e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2703e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 27137012946SDavid Rientjes return ERR_PTR(-EINVAL); 272d3a71033SLee Schermerhorn return NULL; 27337012946SDavid Rientjes } 2743e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2753e1f0645SDavid Rientjes 2763e1f0645SDavid Rientjes /* 2773e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2783e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2793e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2803e1f0645SDavid Rientjes */ 2813e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2823e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2833e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2843e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2853e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2867858d7bcSFeng Tang 2877858d7bcSFeng Tang mode = MPOL_LOCAL; 2883e1f0645SDavid Rientjes } 289479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2908d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2918d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2928d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 293479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 2943e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2953e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2961da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2971da177e4SLinus Torvalds if (!policy) 2981da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2991da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 30045c4745aSLee Schermerhorn policy->mode = mode; 30137012946SDavid Rientjes policy->flags = flags; 302c6018b4bSAneesh Kumar K.V policy->home_node = NUMA_NO_NODE; 3033e1f0645SDavid Rientjes 30437012946SDavid Rientjes return policy; 30537012946SDavid Rientjes } 30637012946SDavid Rientjes 30752cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30852cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 30952cd3b07SLee Schermerhorn { 31052cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 31152cd3b07SLee Schermerhorn return; 31252cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 31352cd3b07SLee Schermerhorn } 31452cd3b07SLee Schermerhorn 315213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 31637012946SDavid Rientjes { 31737012946SDavid Rientjes } 31837012946SDavid Rientjes 319213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 3201d0d2680SDavid Rientjes { 3211d0d2680SDavid Rientjes nodemask_t tmp; 3221d0d2680SDavid Rientjes 32337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32437012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32537012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32637012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3271d0d2680SDavid Rientjes else { 328269fbe72SBen Widawsky nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 329213980c0SVlastimil Babka *nodes); 33029b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3311d0d2680SDavid Rientjes } 33237012946SDavid Rientjes 333708c1bbcSMiao Xie if (nodes_empty(tmp)) 334708c1bbcSMiao Xie tmp = *nodes; 335708c1bbcSMiao Xie 336269fbe72SBen Widawsky pol->nodes = tmp; 33737012946SDavid Rientjes } 33837012946SDavid Rientjes 33937012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 340213980c0SVlastimil Babka const nodemask_t *nodes) 34137012946SDavid Rientjes { 34237012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3431d0d2680SDavid Rientjes } 34437012946SDavid Rientjes 345708c1bbcSMiao Xie /* 346708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 347708c1bbcSMiao Xie * 348c1e8d7c6SMichel Lespinasse * Per-vma policies are protected by mmap_lock. Allocations using per-task 349213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 350213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 351708c1bbcSMiao Xie */ 352213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35337012946SDavid Rientjes { 354018160adSWang Cheng if (!pol || pol->mode == MPOL_LOCAL) 35537012946SDavid Rientjes return; 3567858d7bcSFeng Tang if (!mpol_store_user_nodemask(pol) && 35737012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35837012946SDavid Rientjes return; 359708c1bbcSMiao Xie 360213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3611d0d2680SDavid Rientjes } 3621d0d2680SDavid Rientjes 3631d0d2680SDavid Rientjes /* 3641d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3651d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36658568d2aSMiao Xie * 36758568d2aSMiao Xie * Called with task's alloc_lock held. 3681d0d2680SDavid Rientjes */ 3691d0d2680SDavid Rientjes 370213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3711d0d2680SDavid Rientjes { 372213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3731d0d2680SDavid Rientjes } 3741d0d2680SDavid Rientjes 3751d0d2680SDavid Rientjes /* 3761d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3771d0d2680SDavid Rientjes * 378c1e8d7c6SMichel Lespinasse * Call holding a reference to mm. Takes mm->mmap_lock during call. 3791d0d2680SDavid Rientjes */ 3801d0d2680SDavid Rientjes 3811d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3821d0d2680SDavid Rientjes { 3831d0d2680SDavid Rientjes struct vm_area_struct *vma; 38466850be5SLiam R. Howlett VMA_ITERATOR(vmi, mm, 0); 3851d0d2680SDavid Rientjes 386d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 38766850be5SLiam R. Howlett for_each_vma(vmi, vma) 388213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 389d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3901d0d2680SDavid Rientjes } 3911d0d2680SDavid Rientjes 39237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 39337012946SDavid Rientjes [MPOL_DEFAULT] = { 39437012946SDavid Rientjes .rebind = mpol_rebind_default, 39537012946SDavid Rientjes }, 39637012946SDavid Rientjes [MPOL_INTERLEAVE] = { 397be897d48SFeng Tang .create = mpol_new_nodemask, 39837012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39937012946SDavid Rientjes }, 40037012946SDavid Rientjes [MPOL_PREFERRED] = { 40137012946SDavid Rientjes .create = mpol_new_preferred, 40237012946SDavid Rientjes .rebind = mpol_rebind_preferred, 40337012946SDavid Rientjes }, 40437012946SDavid Rientjes [MPOL_BIND] = { 405be897d48SFeng Tang .create = mpol_new_nodemask, 40637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40737012946SDavid Rientjes }, 4087858d7bcSFeng Tang [MPOL_LOCAL] = { 4097858d7bcSFeng Tang .rebind = mpol_rebind_default, 4107858d7bcSFeng Tang }, 411b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = { 412be897d48SFeng Tang .create = mpol_new_nodemask, 413b27abaccSDave Hansen .rebind = mpol_rebind_preferred, 414b27abaccSDave Hansen }, 41537012946SDavid Rientjes }; 41637012946SDavid Rientjes 417a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 418fc301289SChristoph Lameter unsigned long flags); 4191a75a6c8SChristoph Lameter 4206f4576e3SNaoya Horiguchi struct queue_pages { 4216f4576e3SNaoya Horiguchi struct list_head *pagelist; 4226f4576e3SNaoya Horiguchi unsigned long flags; 4236f4576e3SNaoya Horiguchi nodemask_t *nmask; 424f18da660SLi Xinhai unsigned long start; 425f18da660SLi Xinhai unsigned long end; 426f18da660SLi Xinhai struct vm_area_struct *first; 4276f4576e3SNaoya Horiguchi }; 4286f4576e3SNaoya Horiguchi 42998094945SNaoya Horiguchi /* 43088aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 43188aaa2a1SNaoya Horiguchi * 43288aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 43388aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 43488aaa2a1SNaoya Horiguchi */ 43588aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 43688aaa2a1SNaoya Horiguchi struct queue_pages *qp) 43788aaa2a1SNaoya Horiguchi { 43888aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 43988aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 44088aaa2a1SNaoya Horiguchi 44188aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 44288aaa2a1SNaoya Horiguchi } 44388aaa2a1SNaoya Horiguchi 444a7f40cfeSYang Shi /* 445bc78b5edSMiaohe Lin * queue_pages_pmd() has three possible return values: 446e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 447e5947d23SYang Shi * special page is met, i.e. huge zero page. 448d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 449d8835445SYang Shi * specified. 450d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 451d8835445SYang Shi * existing page was already on a node that does not follow the 452d8835445SYang Shi * policy. 453a7f40cfeSYang Shi */ 454c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 455c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 456959a7e13SJules Irenge __releases(ptl) 457c8633798SNaoya Horiguchi { 458c8633798SNaoya Horiguchi int ret = 0; 459c8633798SNaoya Horiguchi struct page *page; 460c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 461c8633798SNaoya Horiguchi unsigned long flags; 462c8633798SNaoya Horiguchi 463c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 464a7f40cfeSYang Shi ret = -EIO; 465c8633798SNaoya Horiguchi goto unlock; 466c8633798SNaoya Horiguchi } 467c8633798SNaoya Horiguchi page = pmd_page(*pmd); 468c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 469e5947d23SYang Shi walk->action = ACTION_CONTINUE; 4706d97cf88SMiaohe Lin goto unlock; 471c8633798SNaoya Horiguchi } 472d8835445SYang Shi if (!queue_pages_required(page, qp)) 473c8633798SNaoya Horiguchi goto unlock; 474c8633798SNaoya Horiguchi 475c8633798SNaoya Horiguchi flags = qp->flags; 476c8633798SNaoya Horiguchi /* go to thp migration */ 477a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 478a53190a4SYang Shi if (!vma_migratable(walk->vma) || 479a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 480d8835445SYang Shi ret = 1; 481a7f40cfeSYang Shi goto unlock; 482a7f40cfeSYang Shi } 483a7f40cfeSYang Shi } else 484a7f40cfeSYang Shi ret = -EIO; 485c8633798SNaoya Horiguchi unlock: 486c8633798SNaoya Horiguchi spin_unlock(ptl); 487c8633798SNaoya Horiguchi return ret; 488c8633798SNaoya Horiguchi } 489c8633798SNaoya Horiguchi 49088aaa2a1SNaoya Horiguchi /* 49198094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 49298094945SNaoya Horiguchi * and move them to the pagelist if they do. 493d8835445SYang Shi * 494d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 495e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 496e5947d23SYang Shi * special page is met, i.e. zero page. 497d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 498d8835445SYang Shi * specified. 499d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 500d8835445SYang Shi * on a node that does not follow the policy. 50198094945SNaoya Horiguchi */ 5026f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 5036f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 5041da177e4SLinus Torvalds { 5056f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5066f4576e3SNaoya Horiguchi struct page *page; 5076f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5086f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 509d8835445SYang Shi bool has_unmovable = false; 5103f088420SShijie Luo pte_t *pte, *mapped_pte; 511705e87c0SHugh Dickins spinlock_t *ptl; 512941150a3SHugh Dickins 513c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 514bc78b5edSMiaohe Lin if (ptl) 515bc78b5edSMiaohe Lin return queue_pages_pmd(pmd, ptl, addr, end, walk); 51691612e0dSHugh Dickins 517337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 518337d9abfSNaoya Horiguchi return 0; 51994723aafSMichal Hocko 5203f088420SShijie Luo mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5216f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 52291612e0dSHugh Dickins if (!pte_present(*pte)) 52391612e0dSHugh Dickins continue; 5246aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5253218f871SAlex Sierra if (!page || is_zone_device_page(page)) 52691612e0dSHugh Dickins continue; 527053837fcSNick Piggin /* 52862b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 52962b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 530053837fcSNick Piggin */ 531b79bc0a0SHugh Dickins if (PageReserved(page)) 532f4598c8bSChristoph Lameter continue; 53388aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 53438e35860SChristoph Lameter continue; 535a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 536d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 537d8835445SYang Shi if (!vma_migratable(vma)) { 538d8835445SYang Shi has_unmovable = true; 539a7f40cfeSYang Shi break; 540d8835445SYang Shi } 541a53190a4SYang Shi 542a53190a4SYang Shi /* 543a53190a4SYang Shi * Do not abort immediately since there may be 544a53190a4SYang Shi * temporary off LRU pages in the range. Still 545a53190a4SYang Shi * need migrate other LRU pages. 546a53190a4SYang Shi */ 547a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 548a53190a4SYang Shi has_unmovable = true; 549a7f40cfeSYang Shi } else 550a7f40cfeSYang Shi break; 5516f4576e3SNaoya Horiguchi } 5523f088420SShijie Luo pte_unmap_unlock(mapped_pte, ptl); 5536f4576e3SNaoya Horiguchi cond_resched(); 554d8835445SYang Shi 555d8835445SYang Shi if (has_unmovable) 556d8835445SYang Shi return 1; 557d8835445SYang Shi 558a7f40cfeSYang Shi return addr != end ? -EIO : 0; 55991612e0dSHugh Dickins } 56091612e0dSHugh Dickins 5616f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5626f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5636f4576e3SNaoya Horiguchi struct mm_walk *walk) 564e2d8cf40SNaoya Horiguchi { 565dcf17635SLi Xinhai int ret = 0; 566e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5676f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 568dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 569e2d8cf40SNaoya Horiguchi struct page *page; 570cb900f41SKirill A. Shutemov spinlock_t *ptl; 571d4c54919SNaoya Horiguchi pte_t entry; 572e2d8cf40SNaoya Horiguchi 5736f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5746f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 575d4c54919SNaoya Horiguchi if (!pte_present(entry)) 576d4c54919SNaoya Horiguchi goto unlock; 577d4c54919SNaoya Horiguchi page = pte_page(entry); 57888aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 579e2d8cf40SNaoya Horiguchi goto unlock; 580dcf17635SLi Xinhai 581dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 582dcf17635SLi Xinhai /* 583dcf17635SLi Xinhai * STRICT alone means only detecting misplaced page and no 584dcf17635SLi Xinhai * need to further check other vma. 585dcf17635SLi Xinhai */ 586dcf17635SLi Xinhai ret = -EIO; 587dcf17635SLi Xinhai goto unlock; 588dcf17635SLi Xinhai } 589dcf17635SLi Xinhai 590dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 591dcf17635SLi Xinhai /* 592dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 593dcf17635SLi Xinhai * stopped walking current vma. 594dcf17635SLi Xinhai * Detecting misplaced page but allow migrating pages which 595dcf17635SLi Xinhai * have been queued. 596dcf17635SLi Xinhai */ 597dcf17635SLi Xinhai ret = 1; 598dcf17635SLi Xinhai goto unlock; 599dcf17635SLi Xinhai } 600dcf17635SLi Xinhai 601e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 602e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 603dcf17635SLi Xinhai (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { 6047ce82f4cSMiaohe Lin if (isolate_hugetlb(page, qp->pagelist) && 605dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 606dcf17635SLi Xinhai /* 607dcf17635SLi Xinhai * Failed to isolate page but allow migrating pages 608dcf17635SLi Xinhai * which have been queued. 609dcf17635SLi Xinhai */ 610dcf17635SLi Xinhai ret = 1; 611dcf17635SLi Xinhai } 612e2d8cf40SNaoya Horiguchi unlock: 613cb900f41SKirill A. Shutemov spin_unlock(ptl); 614e2d8cf40SNaoya Horiguchi #else 615e2d8cf40SNaoya Horiguchi BUG(); 616e2d8cf40SNaoya Horiguchi #endif 617dcf17635SLi Xinhai return ret; 6181da177e4SLinus Torvalds } 6191da177e4SLinus Torvalds 6205877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 621b24f53a0SLee Schermerhorn /* 6224b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6234b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6244b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6254b10e7d5SMel Gorman * 6264b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6274b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6284b10e7d5SMel Gorman * changes to the core. 629b24f53a0SLee Schermerhorn */ 6304b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6314b10e7d5SMel Gorman unsigned long addr, unsigned long end) 632b24f53a0SLee Schermerhorn { 6334a18419fSNadav Amit struct mmu_gather tlb; 6344b10e7d5SMel Gorman int nr_updated; 635b24f53a0SLee Schermerhorn 6364a18419fSNadav Amit tlb_gather_mmu(&tlb, vma->vm_mm); 6374a18419fSNadav Amit 6384a18419fSNadav Amit nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE, 6394a18419fSNadav Amit MM_CP_PROT_NUMA); 64003c5a6e1SMel Gorman if (nr_updated) 64103c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 642b24f53a0SLee Schermerhorn 6434a18419fSNadav Amit tlb_finish_mmu(&tlb); 6444a18419fSNadav Amit 6454b10e7d5SMel Gorman return nr_updated; 646b24f53a0SLee Schermerhorn } 647b24f53a0SLee Schermerhorn #else 648b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 649b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 650b24f53a0SLee Schermerhorn { 651b24f53a0SLee Schermerhorn return 0; 652b24f53a0SLee Schermerhorn } 6535877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 654b24f53a0SLee Schermerhorn 6556f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6566f4576e3SNaoya Horiguchi struct mm_walk *walk) 6571da177e4SLinus Torvalds { 65866850be5SLiam R. Howlett struct vm_area_struct *next, *vma = walk->vma; 6596f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6605b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6616f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 662dc9aa5b9SChristoph Lameter 663a18b3ac2SLi Xinhai /* range check first */ 664ce33135cSMiaohe Lin VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 665f18da660SLi Xinhai 666f18da660SLi Xinhai if (!qp->first) { 667f18da660SLi Xinhai qp->first = vma; 668f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 669f18da660SLi Xinhai (qp->start < vma->vm_start)) 670f18da660SLi Xinhai /* hole at head side of range */ 671a18b3ac2SLi Xinhai return -EFAULT; 672a18b3ac2SLi Xinhai } 67366850be5SLiam R. Howlett next = find_vma(vma->vm_mm, vma->vm_end); 674f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 675f18da660SLi Xinhai ((vma->vm_end < qp->end) && 67666850be5SLiam R. Howlett (!next || vma->vm_end < next->vm_start))) 677f18da660SLi Xinhai /* hole at middle or tail of range */ 678f18da660SLi Xinhai return -EFAULT; 679a18b3ac2SLi Xinhai 680a7f40cfeSYang Shi /* 681a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 682a7f40cfeSYang Shi * regardless of vma_migratable 683a7f40cfeSYang Shi */ 684a7f40cfeSYang Shi if (!vma_migratable(vma) && 685a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 68648684a65SNaoya Horiguchi return 1; 68748684a65SNaoya Horiguchi 6885b952b3cSAndi Kleen if (endvma > end) 6895b952b3cSAndi Kleen endvma = end; 690b24f53a0SLee Schermerhorn 691b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6922c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6933122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 6944355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 695b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6966f4576e3SNaoya Horiguchi return 1; 697b24f53a0SLee Schermerhorn } 698b24f53a0SLee Schermerhorn 6996f4576e3SNaoya Horiguchi /* queue pages from current vma */ 700a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 7016f4576e3SNaoya Horiguchi return 0; 7026f4576e3SNaoya Horiguchi return 1; 7036f4576e3SNaoya Horiguchi } 704b24f53a0SLee Schermerhorn 7057b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 7067b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 7077b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 7087b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 7097b86ac33SChristoph Hellwig }; 7107b86ac33SChristoph Hellwig 7116f4576e3SNaoya Horiguchi /* 7126f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7136f4576e3SNaoya Horiguchi * 7146f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7156f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 716d8835445SYang Shi * passed via @private. 717d8835445SYang Shi * 718d8835445SYang Shi * queue_pages_range() has three possible return values: 719d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 720d8835445SYang Shi * specified. 721d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 722a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 723a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 724a85dfc30SYang Shi * your accessible address space (-EFAULT) 7256f4576e3SNaoya Horiguchi */ 7266f4576e3SNaoya Horiguchi static int 7276f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7286f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 7296f4576e3SNaoya Horiguchi struct list_head *pagelist) 7306f4576e3SNaoya Horiguchi { 731f18da660SLi Xinhai int err; 7326f4576e3SNaoya Horiguchi struct queue_pages qp = { 7336f4576e3SNaoya Horiguchi .pagelist = pagelist, 7346f4576e3SNaoya Horiguchi .flags = flags, 7356f4576e3SNaoya Horiguchi .nmask = nodes, 736f18da660SLi Xinhai .start = start, 737f18da660SLi Xinhai .end = end, 738f18da660SLi Xinhai .first = NULL, 7396f4576e3SNaoya Horiguchi }; 7406f4576e3SNaoya Horiguchi 741f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 742f18da660SLi Xinhai 743f18da660SLi Xinhai if (!qp.first) 744f18da660SLi Xinhai /* whole range in hole */ 745f18da660SLi Xinhai err = -EFAULT; 746f18da660SLi Xinhai 747f18da660SLi Xinhai return err; 7481da177e4SLinus Torvalds } 7491da177e4SLinus Torvalds 750869833f2SKOSAKI Motohiro /* 751869833f2SKOSAKI Motohiro * Apply policy to a single VMA 752c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for writing. 753869833f2SKOSAKI Motohiro */ 754869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 755869833f2SKOSAKI Motohiro struct mempolicy *pol) 7568d34694cSKOSAKI Motohiro { 757869833f2SKOSAKI Motohiro int err; 758869833f2SKOSAKI Motohiro struct mempolicy *old; 759869833f2SKOSAKI Motohiro struct mempolicy *new; 7608d34694cSKOSAKI Motohiro 7618d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7628d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7638d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7648d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7658d34694cSKOSAKI Motohiro 766869833f2SKOSAKI Motohiro new = mpol_dup(pol); 767869833f2SKOSAKI Motohiro if (IS_ERR(new)) 768869833f2SKOSAKI Motohiro return PTR_ERR(new); 769869833f2SKOSAKI Motohiro 770869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7718d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 772869833f2SKOSAKI Motohiro if (err) 773869833f2SKOSAKI Motohiro goto err_out; 7748d34694cSKOSAKI Motohiro } 775869833f2SKOSAKI Motohiro 776869833f2SKOSAKI Motohiro old = vma->vm_policy; 777c1e8d7c6SMichel Lespinasse vma->vm_policy = new; /* protected by mmap_lock */ 778869833f2SKOSAKI Motohiro mpol_put(old); 779869833f2SKOSAKI Motohiro 780869833f2SKOSAKI Motohiro return 0; 781869833f2SKOSAKI Motohiro err_out: 782869833f2SKOSAKI Motohiro mpol_put(new); 7838d34694cSKOSAKI Motohiro return err; 7848d34694cSKOSAKI Motohiro } 7858d34694cSKOSAKI Motohiro 7861da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7879d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7889d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7891da177e4SLinus Torvalds { 790*7329e3ebSLiam Howlett MA_STATE(mas, &mm->mm_mt, start, start); 7919d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7929d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7939d8cebd4SKOSAKI Motohiro int err = 0; 794e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7951da177e4SLinus Torvalds 796*7329e3ebSLiam Howlett prev = mas_prev(&mas, 0); 797*7329e3ebSLiam Howlett if (unlikely(!prev)) 798*7329e3ebSLiam Howlett mas_set(&mas, start); 799*7329e3ebSLiam Howlett 800*7329e3ebSLiam Howlett vma = mas_find(&mas, end - 1); 801*7329e3ebSLiam Howlett if (WARN_ON(!vma)) 802*7329e3ebSLiam Howlett return 0; 803*7329e3ebSLiam Howlett 804*7329e3ebSLiam Howlett if (start > vma->vm_start) 805*7329e3ebSLiam Howlett prev = vma; 8069d8cebd4SKOSAKI Motohiro 80766850be5SLiam R. Howlett for (; vma; vma = mas_next(&mas, end - 1)) { 80866850be5SLiam R. Howlett unsigned long vmstart = max(start, vma->vm_start); 80966850be5SLiam R. Howlett unsigned long vmend = min(end, vma->vm_end); 8109d8cebd4SKOSAKI Motohiro 811e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 81266850be5SLiam R. Howlett goto next; 813e26a5114SKOSAKI Motohiro 814e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 815e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 8169d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 817e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 8189a10064fSColin Cross new_pol, vma->vm_userfaultfd_ctx, 8195c26f6acSSuren Baghdasaryan anon_vma_name(vma)); 8209d8cebd4SKOSAKI Motohiro if (prev) { 82166850be5SLiam R. Howlett /* vma_merge() invalidated the mas */ 82266850be5SLiam R. Howlett mas_pause(&mas); 8239d8cebd4SKOSAKI Motohiro vma = prev; 8243964acd0SOleg Nesterov goto replace; 8251da177e4SLinus Torvalds } 8269d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 8279d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 8289d8cebd4SKOSAKI Motohiro if (err) 8299d8cebd4SKOSAKI Motohiro goto out; 83066850be5SLiam R. Howlett /* split_vma() invalidated the mas */ 83166850be5SLiam R. Howlett mas_pause(&mas); 8329d8cebd4SKOSAKI Motohiro } 8339d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 8349d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 8359d8cebd4SKOSAKI Motohiro if (err) 8369d8cebd4SKOSAKI Motohiro goto out; 83766850be5SLiam R. Howlett /* split_vma() invalidated the mas */ 83866850be5SLiam R. Howlett mas_pause(&mas); 8399d8cebd4SKOSAKI Motohiro } 8403964acd0SOleg Nesterov replace: 841869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 8429d8cebd4SKOSAKI Motohiro if (err) 8439d8cebd4SKOSAKI Motohiro goto out; 84466850be5SLiam R. Howlett next: 84566850be5SLiam R. Howlett prev = vma; 8469d8cebd4SKOSAKI Motohiro } 8479d8cebd4SKOSAKI Motohiro 8489d8cebd4SKOSAKI Motohiro out: 8491da177e4SLinus Torvalds return err; 8501da177e4SLinus Torvalds } 8511da177e4SLinus Torvalds 8521da177e4SLinus Torvalds /* Set the process memory policy */ 853028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 854028fec41SDavid Rientjes nodemask_t *nodes) 8551da177e4SLinus Torvalds { 85658568d2aSMiao Xie struct mempolicy *new, *old; 8574bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 85858568d2aSMiao Xie int ret; 8591da177e4SLinus Torvalds 8604bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8614bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 862f4e53d91SLee Schermerhorn 8634bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8644bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8654bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8664bfc4495SKAMEZAWA Hiroyuki goto out; 8674bfc4495SKAMEZAWA Hiroyuki } 8682c7c3a7dSOleg Nesterov 86912c1dc8eSAbel Wu task_lock(current); 8704bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 87158568d2aSMiao Xie if (ret) { 87212c1dc8eSAbel Wu task_unlock(current); 87358568d2aSMiao Xie mpol_put(new); 8744bfc4495SKAMEZAWA Hiroyuki goto out; 87558568d2aSMiao Xie } 87612c1dc8eSAbel Wu 87758568d2aSMiao Xie old = current->mempolicy; 8781da177e4SLinus Torvalds current->mempolicy = new; 87945816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 88045816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 88158568d2aSMiao Xie task_unlock(current); 88258568d2aSMiao Xie mpol_put(old); 8834bfc4495SKAMEZAWA Hiroyuki ret = 0; 8844bfc4495SKAMEZAWA Hiroyuki out: 8854bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8864bfc4495SKAMEZAWA Hiroyuki return ret; 8871da177e4SLinus Torvalds } 8881da177e4SLinus Torvalds 889bea904d5SLee Schermerhorn /* 890bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 89158568d2aSMiao Xie * 89258568d2aSMiao Xie * Called with task's alloc_lock held 893bea904d5SLee Schermerhorn */ 894bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8951da177e4SLinus Torvalds { 896dfcd3c0dSAndi Kleen nodes_clear(*nodes); 897bea904d5SLee Schermerhorn if (p == &default_policy) 898bea904d5SLee Schermerhorn return; 899bea904d5SLee Schermerhorn 90045c4745aSLee Schermerhorn switch (p->mode) { 90119770b32SMel Gorman case MPOL_BIND: 9021da177e4SLinus Torvalds case MPOL_INTERLEAVE: 903269fbe72SBen Widawsky case MPOL_PREFERRED: 904b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 905269fbe72SBen Widawsky *nodes = p->nodes; 9061da177e4SLinus Torvalds break; 9077858d7bcSFeng Tang case MPOL_LOCAL: 9087858d7bcSFeng Tang /* return empty node mask for local allocation */ 9097858d7bcSFeng Tang break; 9101da177e4SLinus Torvalds default: 9111da177e4SLinus Torvalds BUG(); 9121da177e4SLinus Torvalds } 9131da177e4SLinus Torvalds } 9141da177e4SLinus Torvalds 9153b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 9161da177e4SLinus Torvalds { 917ba841078SPeter Xu struct page *p = NULL; 918f728b9c4SJohn Hubbard int ret; 9191da177e4SLinus Torvalds 920f728b9c4SJohn Hubbard ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p); 921f728b9c4SJohn Hubbard if (ret > 0) { 922f728b9c4SJohn Hubbard ret = page_to_nid(p); 9231da177e4SLinus Torvalds put_page(p); 9241da177e4SLinus Torvalds } 925f728b9c4SJohn Hubbard return ret; 9261da177e4SLinus Torvalds } 9271da177e4SLinus Torvalds 9281da177e4SLinus Torvalds /* Retrieve NUMA policy */ 929dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9301da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9311da177e4SLinus Torvalds { 9328bccd85fSChristoph Lameter int err; 9331da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9341da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9353b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9361da177e4SLinus Torvalds 937754af6f5SLee Schermerhorn if (flags & 938754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9391da177e4SLinus Torvalds return -EINVAL; 940754af6f5SLee Schermerhorn 941754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 942754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 943754af6f5SLee Schermerhorn return -EINVAL; 944754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 94558568d2aSMiao Xie task_lock(current); 946754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 94758568d2aSMiao Xie task_unlock(current); 948754af6f5SLee Schermerhorn return 0; 949754af6f5SLee Schermerhorn } 950754af6f5SLee Schermerhorn 9511da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 952bea904d5SLee Schermerhorn /* 953bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 954bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 955bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 956bea904d5SLee Schermerhorn */ 957d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 95833e3575cSLiam Howlett vma = vma_lookup(mm, addr); 9591da177e4SLinus Torvalds if (!vma) { 960d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9611da177e4SLinus Torvalds return -EFAULT; 9621da177e4SLinus Torvalds } 9631da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9641da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9651da177e4SLinus Torvalds else 9661da177e4SLinus Torvalds pol = vma->vm_policy; 9671da177e4SLinus Torvalds } else if (addr) 9681da177e4SLinus Torvalds return -EINVAL; 9691da177e4SLinus Torvalds 9701da177e4SLinus Torvalds if (!pol) 971bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9721da177e4SLinus Torvalds 9731da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9741da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9753b9aadf7SAndrea Arcangeli /* 976f728b9c4SJohn Hubbard * Take a refcount on the mpol, because we are about to 977f728b9c4SJohn Hubbard * drop the mmap_lock, after which only "pol" remains 978f728b9c4SJohn Hubbard * valid, "vma" is stale. 9793b9aadf7SAndrea Arcangeli */ 9803b9aadf7SAndrea Arcangeli pol_refcount = pol; 9813b9aadf7SAndrea Arcangeli vma = NULL; 9823b9aadf7SAndrea Arcangeli mpol_get(pol); 983f728b9c4SJohn Hubbard mmap_read_unlock(mm); 9843b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9851da177e4SLinus Torvalds if (err < 0) 9861da177e4SLinus Torvalds goto out; 9878bccd85fSChristoph Lameter *policy = err; 9881da177e4SLinus Torvalds } else if (pol == current->mempolicy && 98945c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 990269fbe72SBen Widawsky *policy = next_node_in(current->il_prev, pol->nodes); 9911da177e4SLinus Torvalds } else { 9921da177e4SLinus Torvalds err = -EINVAL; 9931da177e4SLinus Torvalds goto out; 9941da177e4SLinus Torvalds } 995bea904d5SLee Schermerhorn } else { 996bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 997bea904d5SLee Schermerhorn pol->mode; 998d79df630SDavid Rientjes /* 999d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 1000d79df630SDavid Rientjes * the policy to userspace. 1001d79df630SDavid Rientjes */ 1002d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 1003bea904d5SLee Schermerhorn } 10041da177e4SLinus Torvalds 10051da177e4SLinus Torvalds err = 0; 100658568d2aSMiao Xie if (nmask) { 1007c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 1008c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 1009c6b6ef8bSLee Schermerhorn } else { 101058568d2aSMiao Xie task_lock(current); 1011bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 101258568d2aSMiao Xie task_unlock(current); 101358568d2aSMiao Xie } 1014c6b6ef8bSLee Schermerhorn } 10151da177e4SLinus Torvalds 10161da177e4SLinus Torvalds out: 101752cd3b07SLee Schermerhorn mpol_cond_put(pol); 10181da177e4SLinus Torvalds if (vma) 1019d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 10203b9aadf7SAndrea Arcangeli if (pol_refcount) 10213b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10221da177e4SLinus Torvalds return err; 10231da177e4SLinus Torvalds } 10241da177e4SLinus Torvalds 1025b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10268bccd85fSChristoph Lameter /* 1027c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 10286ce3c4c0SChristoph Lameter */ 1029a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1030fc301289SChristoph Lameter unsigned long flags) 10316ce3c4c0SChristoph Lameter { 1032c8633798SNaoya Horiguchi struct page *head = compound_head(page); 10336ce3c4c0SChristoph Lameter /* 1034fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 10356ce3c4c0SChristoph Lameter */ 1036c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1037c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 1038c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 1039c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 10409de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head), 10416c357848SMatthew Wilcox (Oracle) thp_nr_pages(head)); 1042a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1043a53190a4SYang Shi /* 1044a53190a4SYang Shi * Non-movable page may reach here. And, there may be 1045a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 1046a53190a4SYang Shi * Treat them as unmovable pages since they can't be 1047a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1048a53190a4SYang Shi * should return -EIO for this case too. 1049a53190a4SYang Shi */ 1050a53190a4SYang Shi return -EIO; 105162695a84SNick Piggin } 105262695a84SNick Piggin } 1053a53190a4SYang Shi 1054a53190a4SYang Shi return 0; 10556ce3c4c0SChristoph Lameter } 10566ce3c4c0SChristoph Lameter 10576ce3c4c0SChristoph Lameter /* 10587e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10597e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10607e2ab150SChristoph Lameter */ 1061dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1062dbcb0f19SAdrian Bunk int flags) 10637e2ab150SChristoph Lameter { 10647e2ab150SChristoph Lameter nodemask_t nmask; 106566850be5SLiam R. Howlett struct vm_area_struct *vma; 10667e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10677e2ab150SChristoph Lameter int err = 0; 1068a0976311SJoonsoo Kim struct migration_target_control mtc = { 1069a0976311SJoonsoo Kim .nid = dest, 1070a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1071a0976311SJoonsoo Kim }; 10727e2ab150SChristoph Lameter 10737e2ab150SChristoph Lameter nodes_clear(nmask); 10747e2ab150SChristoph Lameter node_set(source, nmask); 10757e2ab150SChristoph Lameter 107608270807SMinchan Kim /* 107708270807SMinchan Kim * This does not "check" the range but isolates all pages that 107808270807SMinchan Kim * need migration. Between passing in the full user address 107908270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 108008270807SMinchan Kim */ 108166850be5SLiam R. Howlett vma = find_vma(mm, 0); 108208270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 108366850be5SLiam R. Howlett queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, 10847e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10857e2ab150SChristoph Lameter 1086cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1087a0976311SJoonsoo Kim err = migrate_pages(&pagelist, alloc_migration_target, NULL, 10885ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1089cf608ac1SMinchan Kim if (err) 1090e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1091cf608ac1SMinchan Kim } 109295a402c3SChristoph Lameter 10937e2ab150SChristoph Lameter return err; 10947e2ab150SChristoph Lameter } 10957e2ab150SChristoph Lameter 10967e2ab150SChristoph Lameter /* 10977e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10987e2ab150SChristoph Lameter * layout as much as possible. 109939743889SChristoph Lameter * 110039743889SChristoph Lameter * Returns the number of page that could not be moved. 110139743889SChristoph Lameter */ 11020ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11030ce72d4fSAndrew Morton const nodemask_t *to, int flags) 110439743889SChristoph Lameter { 11057e2ab150SChristoph Lameter int busy = 0; 1106f555befdSJan Stancek int err = 0; 11077e2ab150SChristoph Lameter nodemask_t tmp; 110839743889SChristoph Lameter 1109361a2a22SMinchan Kim lru_cache_disable(); 11100aedadf9SChristoph Lameter 1111d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1112d4984711SChristoph Lameter 11137e2ab150SChristoph Lameter /* 11147e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11157e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11167e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11177e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11187e2ab150SChristoph Lameter * 11197e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11207e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11217e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11227e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11237e2ab150SChristoph Lameter * 11247e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11257e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11267e2ab150SChristoph Lameter * (nothing left to migrate). 11277e2ab150SChristoph Lameter * 11287e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11297e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11307e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11317e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11327e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11337e2ab150SChristoph Lameter * 11347e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11357e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11367e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11377e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1138ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11397e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11407e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11417e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11427e2ab150SChristoph Lameter */ 11437e2ab150SChristoph Lameter 11440ce72d4fSAndrew Morton tmp = *from; 11457e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11467e2ab150SChristoph Lameter int s, d; 1147b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11487e2ab150SChristoph Lameter int dest = 0; 11497e2ab150SChristoph Lameter 11507e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11514a5b18ccSLarry Woodman 11524a5b18ccSLarry Woodman /* 11534a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11544a5b18ccSLarry Woodman * node relationship of the pages established between 11554a5b18ccSLarry Woodman * threads and memory areas. 11564a5b18ccSLarry Woodman * 11574a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11584a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11594a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11604a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11614a5b18ccSLarry Woodman * mask. 11624a5b18ccSLarry Woodman * 11634a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11644a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11654a5b18ccSLarry Woodman */ 11664a5b18ccSLarry Woodman 11670ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11680ce72d4fSAndrew Morton (node_isset(s, *to))) 11694a5b18ccSLarry Woodman continue; 11704a5b18ccSLarry Woodman 11710ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11727e2ab150SChristoph Lameter if (s == d) 11737e2ab150SChristoph Lameter continue; 11747e2ab150SChristoph Lameter 11757e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11767e2ab150SChristoph Lameter dest = d; 11777e2ab150SChristoph Lameter 11787e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11797e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11807e2ab150SChristoph Lameter break; 11817e2ab150SChristoph Lameter } 1182b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11837e2ab150SChristoph Lameter break; 11847e2ab150SChristoph Lameter 11857e2ab150SChristoph Lameter node_clear(source, tmp); 11867e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11877e2ab150SChristoph Lameter if (err > 0) 11887e2ab150SChristoph Lameter busy += err; 11897e2ab150SChristoph Lameter if (err < 0) 11907e2ab150SChristoph Lameter break; 119139743889SChristoph Lameter } 1192d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1193d479960eSMinchan Kim 1194361a2a22SMinchan Kim lru_cache_enable(); 11957e2ab150SChristoph Lameter if (err < 0) 11967e2ab150SChristoph Lameter return err; 11977e2ab150SChristoph Lameter return busy; 1198b20a3503SChristoph Lameter 119939743889SChristoph Lameter } 120039743889SChristoph Lameter 12013ad33b24SLee Schermerhorn /* 12023ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1203d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 12043ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 12053ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 12063ad33b24SLee Schermerhorn * is in virtual address order. 12073ad33b24SLee Schermerhorn */ 1208666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 120995a402c3SChristoph Lameter { 1210ec4858e0SMatthew Wilcox (Oracle) struct folio *dst, *src = page_folio(page); 1211d05f0cdcSHugh Dickins struct vm_area_struct *vma; 12123f649ab7SKees Cook unsigned long address; 121366850be5SLiam R. Howlett VMA_ITERATOR(vmi, current->mm, start); 1214ec4858e0SMatthew Wilcox (Oracle) gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; 121595a402c3SChristoph Lameter 121666850be5SLiam R. Howlett for_each_vma(vmi, vma) { 12173ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 12183ad33b24SLee Schermerhorn if (address != -EFAULT) 12193ad33b24SLee Schermerhorn break; 12203ad33b24SLee Schermerhorn } 12213ad33b24SLee Schermerhorn 1222ec4858e0SMatthew Wilcox (Oracle) if (folio_test_hugetlb(src)) 1223ec4858e0SMatthew Wilcox (Oracle) return alloc_huge_page_vma(page_hstate(&src->page), 1224389c8178SMichal Hocko vma, address); 1225c8633798SNaoya Horiguchi 1226ec4858e0SMatthew Wilcox (Oracle) if (folio_test_large(src)) 1227ec4858e0SMatthew Wilcox (Oracle) gfp = GFP_TRANSHUGE; 1228ec4858e0SMatthew Wilcox (Oracle) 122911c731e8SWanpeng Li /* 1230ec4858e0SMatthew Wilcox (Oracle) * if !vma, vma_alloc_folio() will use task or system default policy 123111c731e8SWanpeng Li */ 1232ec4858e0SMatthew Wilcox (Oracle) dst = vma_alloc_folio(gfp, folio_order(src), vma, address, 1233ec4858e0SMatthew Wilcox (Oracle) folio_test_large(src)); 1234ec4858e0SMatthew Wilcox (Oracle) return &dst->page; 123595a402c3SChristoph Lameter } 1236b20a3503SChristoph Lameter #else 1237b20a3503SChristoph Lameter 1238a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1239b20a3503SChristoph Lameter unsigned long flags) 1240b20a3503SChristoph Lameter { 1241a53190a4SYang Shi return -EIO; 1242b20a3503SChristoph Lameter } 1243b20a3503SChristoph Lameter 12440ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12450ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1246b20a3503SChristoph Lameter { 1247b20a3503SChristoph Lameter return -ENOSYS; 1248b20a3503SChristoph Lameter } 124995a402c3SChristoph Lameter 1250666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 125195a402c3SChristoph Lameter { 125295a402c3SChristoph Lameter return NULL; 125395a402c3SChristoph Lameter } 1254b20a3503SChristoph Lameter #endif 1255b20a3503SChristoph Lameter 1256dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1257028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1258028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12596ce3c4c0SChristoph Lameter { 12606ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12616ce3c4c0SChristoph Lameter struct mempolicy *new; 12626ce3c4c0SChristoph Lameter unsigned long end; 12636ce3c4c0SChristoph Lameter int err; 1264d8835445SYang Shi int ret; 12656ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12666ce3c4c0SChristoph Lameter 1267b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12686ce3c4c0SChristoph Lameter return -EINVAL; 126974c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12706ce3c4c0SChristoph Lameter return -EPERM; 12716ce3c4c0SChristoph Lameter 12726ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12736ce3c4c0SChristoph Lameter return -EINVAL; 12746ce3c4c0SChristoph Lameter 12756ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12766ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12776ce3c4c0SChristoph Lameter 1278aaa31e05Sze zuo len = PAGE_ALIGN(len); 12796ce3c4c0SChristoph Lameter end = start + len; 12806ce3c4c0SChristoph Lameter 12816ce3c4c0SChristoph Lameter if (end < start) 12826ce3c4c0SChristoph Lameter return -EINVAL; 12836ce3c4c0SChristoph Lameter if (end == start) 12846ce3c4c0SChristoph Lameter return 0; 12856ce3c4c0SChristoph Lameter 1286028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12876ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12886ce3c4c0SChristoph Lameter return PTR_ERR(new); 12896ce3c4c0SChristoph Lameter 1290b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1291b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1292b24f53a0SLee Schermerhorn 12936ce3c4c0SChristoph Lameter /* 12946ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12956ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12966ce3c4c0SChristoph Lameter */ 12976ce3c4c0SChristoph Lameter if (!new) 12986ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12996ce3c4c0SChristoph Lameter 1300028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1301028fec41SDavid Rientjes start, start + len, mode, mode_flags, 130200ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 13036ce3c4c0SChristoph Lameter 13040aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 13050aedadf9SChristoph Lameter 1306361a2a22SMinchan Kim lru_cache_disable(); 13070aedadf9SChristoph Lameter } 13084bfc4495SKAMEZAWA Hiroyuki { 13094bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 13104bfc4495SKAMEZAWA Hiroyuki if (scratch) { 1311d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 13124bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 13134bfc4495SKAMEZAWA Hiroyuki if (err) 1314d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 13154bfc4495SKAMEZAWA Hiroyuki } else 13164bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13174bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13184bfc4495SKAMEZAWA Hiroyuki } 1319b05ca738SKOSAKI Motohiro if (err) 1320b05ca738SKOSAKI Motohiro goto mpol_out; 1321b05ca738SKOSAKI Motohiro 1322d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13236ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1324d8835445SYang Shi 1325d8835445SYang Shi if (ret < 0) { 1326a85dfc30SYang Shi err = ret; 1327d8835445SYang Shi goto up_out; 1328d8835445SYang Shi } 1329d8835445SYang Shi 13309d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13317e2ab150SChristoph Lameter 1332b24f53a0SLee Schermerhorn if (!err) { 1333b24f53a0SLee Schermerhorn int nr_failed = 0; 1334b24f53a0SLee Schermerhorn 1335cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1336b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1337d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 13385ac95884SYang Shi start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1339cf608ac1SMinchan Kim if (nr_failed) 134074060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1341cf608ac1SMinchan Kim } 13426ce3c4c0SChristoph Lameter 1343d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13446ce3c4c0SChristoph Lameter err = -EIO; 1345a85dfc30SYang Shi } else { 1346d8835445SYang Shi up_out: 1347a85dfc30SYang Shi if (!list_empty(&pagelist)) 1348a85dfc30SYang Shi putback_movable_pages(&pagelist); 1349a85dfc30SYang Shi } 1350a85dfc30SYang Shi 1351d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1352b05ca738SKOSAKI Motohiro mpol_out: 1353f0be3d32SLee Schermerhorn mpol_put(new); 1354d479960eSMinchan Kim if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1355361a2a22SMinchan Kim lru_cache_enable(); 13566ce3c4c0SChristoph Lameter return err; 13576ce3c4c0SChristoph Lameter } 13586ce3c4c0SChristoph Lameter 135939743889SChristoph Lameter /* 13608bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13618bccd85fSChristoph Lameter */ 1362e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, 1363e130242dSArnd Bergmann unsigned long maxnode) 1364e130242dSArnd Bergmann { 1365e130242dSArnd Bergmann unsigned long nlongs = BITS_TO_LONGS(maxnode); 1366e130242dSArnd Bergmann int ret; 1367e130242dSArnd Bergmann 1368e130242dSArnd Bergmann if (in_compat_syscall()) 1369e130242dSArnd Bergmann ret = compat_get_bitmap(mask, 1370e130242dSArnd Bergmann (const compat_ulong_t __user *)nmask, 1371e130242dSArnd Bergmann maxnode); 1372e130242dSArnd Bergmann else 1373e130242dSArnd Bergmann ret = copy_from_user(mask, nmask, 1374e130242dSArnd Bergmann nlongs * sizeof(unsigned long)); 1375e130242dSArnd Bergmann 1376e130242dSArnd Bergmann if (ret) 1377e130242dSArnd Bergmann return -EFAULT; 1378e130242dSArnd Bergmann 1379e130242dSArnd Bergmann if (maxnode % BITS_PER_LONG) 1380e130242dSArnd Bergmann mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; 1381e130242dSArnd Bergmann 1382e130242dSArnd Bergmann return 0; 1383e130242dSArnd Bergmann } 13848bccd85fSChristoph Lameter 13858bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 138639743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13878bccd85fSChristoph Lameter unsigned long maxnode) 13888bccd85fSChristoph Lameter { 13898bccd85fSChristoph Lameter --maxnode; 13908bccd85fSChristoph Lameter nodes_clear(*nodes); 13918bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13928bccd85fSChristoph Lameter return 0; 1393a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1394636f13c1SChris Wright return -EINVAL; 13958bccd85fSChristoph Lameter 139656521e7aSYisheng Xie /* 139756521e7aSYisheng Xie * When the user specified more nodes than supported just check 1398e130242dSArnd Bergmann * if the non supported part is all zero, one word at a time, 1399e130242dSArnd Bergmann * starting at the end. 140056521e7aSYisheng Xie */ 1401e130242dSArnd Bergmann while (maxnode > MAX_NUMNODES) { 1402e130242dSArnd Bergmann unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG); 1403e130242dSArnd Bergmann unsigned long t; 14048bccd85fSChristoph Lameter 1405000eca5dSTianyu Li if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) 140656521e7aSYisheng Xie return -EFAULT; 1407e130242dSArnd Bergmann 1408e130242dSArnd Bergmann if (maxnode - bits >= MAX_NUMNODES) { 1409e130242dSArnd Bergmann maxnode -= bits; 1410e130242dSArnd Bergmann } else { 1411e130242dSArnd Bergmann maxnode = MAX_NUMNODES; 1412e130242dSArnd Bergmann t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 1413e130242dSArnd Bergmann } 1414e130242dSArnd Bergmann if (t) 141556521e7aSYisheng Xie return -EINVAL; 141656521e7aSYisheng Xie } 141756521e7aSYisheng Xie 1418e130242dSArnd Bergmann return get_bitmap(nodes_addr(*nodes), nmask, maxnode); 14198bccd85fSChristoph Lameter } 14208bccd85fSChristoph Lameter 14218bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14228bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14238bccd85fSChristoph Lameter nodemask_t *nodes) 14248bccd85fSChristoph Lameter { 14258bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1426050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 1427e130242dSArnd Bergmann bool compat = in_compat_syscall(); 1428e130242dSArnd Bergmann 1429e130242dSArnd Bergmann if (compat) 1430e130242dSArnd Bergmann nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); 14318bccd85fSChristoph Lameter 14328bccd85fSChristoph Lameter if (copy > nbytes) { 14338bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14348bccd85fSChristoph Lameter return -EINVAL; 14358bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14368bccd85fSChristoph Lameter return -EFAULT; 14378bccd85fSChristoph Lameter copy = nbytes; 1438e130242dSArnd Bergmann maxnode = nr_node_ids; 14398bccd85fSChristoph Lameter } 1440e130242dSArnd Bergmann 1441e130242dSArnd Bergmann if (compat) 1442e130242dSArnd Bergmann return compat_put_bitmap((compat_ulong_t __user *)mask, 1443e130242dSArnd Bergmann nodes_addr(*nodes), maxnode); 1444e130242dSArnd Bergmann 14458bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14468bccd85fSChristoph Lameter } 14478bccd85fSChristoph Lameter 144895837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 144995837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 145095837924SFeng Tang { 145195837924SFeng Tang *flags = *mode & MPOL_MODE_FLAGS; 145295837924SFeng Tang *mode &= ~MPOL_MODE_FLAGS; 1453b27abaccSDave Hansen 1454a38a59fdSBen Widawsky if ((unsigned int)(*mode) >= MPOL_MAX) 145595837924SFeng Tang return -EINVAL; 145695837924SFeng Tang if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 145795837924SFeng Tang return -EINVAL; 14586d2aec9eSEric Dumazet if (*flags & MPOL_F_NUMA_BALANCING) { 14596d2aec9eSEric Dumazet if (*mode != MPOL_BIND) 14606d2aec9eSEric Dumazet return -EINVAL; 14616d2aec9eSEric Dumazet *flags |= (MPOL_F_MOF | MPOL_F_MORON); 14626d2aec9eSEric Dumazet } 146395837924SFeng Tang return 0; 146495837924SFeng Tang } 146595837924SFeng Tang 1466e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1467e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1468e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14698bccd85fSChristoph Lameter { 1470028fec41SDavid Rientjes unsigned short mode_flags; 147195837924SFeng Tang nodemask_t nodes; 147295837924SFeng Tang int lmode = mode; 147395837924SFeng Tang int err; 14748bccd85fSChristoph Lameter 1475057d3389SAndrey Konovalov start = untagged_addr(start); 147695837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 147795837924SFeng Tang if (err) 147895837924SFeng Tang return err; 147995837924SFeng Tang 14808bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14818bccd85fSChristoph Lameter if (err) 14828bccd85fSChristoph Lameter return err; 148395837924SFeng Tang 148495837924SFeng Tang return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 14858bccd85fSChristoph Lameter } 14868bccd85fSChristoph Lameter 1487c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len, 1488c6018b4bSAneesh Kumar K.V unsigned long, home_node, unsigned long, flags) 1489c6018b4bSAneesh Kumar K.V { 1490c6018b4bSAneesh Kumar K.V struct mm_struct *mm = current->mm; 1491c6018b4bSAneesh Kumar K.V struct vm_area_struct *vma; 1492c6018b4bSAneesh Kumar K.V struct mempolicy *new; 1493c6018b4bSAneesh Kumar K.V unsigned long vmstart; 1494c6018b4bSAneesh Kumar K.V unsigned long vmend; 1495c6018b4bSAneesh Kumar K.V unsigned long end; 1496c6018b4bSAneesh Kumar K.V int err = -ENOENT; 149766850be5SLiam R. Howlett VMA_ITERATOR(vmi, mm, start); 1498c6018b4bSAneesh Kumar K.V 1499c6018b4bSAneesh Kumar K.V start = untagged_addr(start); 1500c6018b4bSAneesh Kumar K.V if (start & ~PAGE_MASK) 1501c6018b4bSAneesh Kumar K.V return -EINVAL; 1502c6018b4bSAneesh Kumar K.V /* 1503c6018b4bSAneesh Kumar K.V * flags is used for future extension if any. 1504c6018b4bSAneesh Kumar K.V */ 1505c6018b4bSAneesh Kumar K.V if (flags != 0) 1506c6018b4bSAneesh Kumar K.V return -EINVAL; 1507c6018b4bSAneesh Kumar K.V 1508c6018b4bSAneesh Kumar K.V /* 1509c6018b4bSAneesh Kumar K.V * Check home_node is online to avoid accessing uninitialized 1510c6018b4bSAneesh Kumar K.V * NODE_DATA. 1511c6018b4bSAneesh Kumar K.V */ 1512c6018b4bSAneesh Kumar K.V if (home_node >= MAX_NUMNODES || !node_online(home_node)) 1513c6018b4bSAneesh Kumar K.V return -EINVAL; 1514c6018b4bSAneesh Kumar K.V 1515aaa31e05Sze zuo len = PAGE_ALIGN(len); 1516c6018b4bSAneesh Kumar K.V end = start + len; 1517c6018b4bSAneesh Kumar K.V 1518c6018b4bSAneesh Kumar K.V if (end < start) 1519c6018b4bSAneesh Kumar K.V return -EINVAL; 1520c6018b4bSAneesh Kumar K.V if (end == start) 1521c6018b4bSAneesh Kumar K.V return 0; 1522c6018b4bSAneesh Kumar K.V mmap_write_lock(mm); 152366850be5SLiam R. Howlett for_each_vma_range(vmi, vma, end) { 1524c6018b4bSAneesh Kumar K.V vmstart = max(start, vma->vm_start); 1525c6018b4bSAneesh Kumar K.V vmend = min(end, vma->vm_end); 1526c6018b4bSAneesh Kumar K.V new = mpol_dup(vma_policy(vma)); 1527c6018b4bSAneesh Kumar K.V if (IS_ERR(new)) { 1528c6018b4bSAneesh Kumar K.V err = PTR_ERR(new); 1529c6018b4bSAneesh Kumar K.V break; 1530c6018b4bSAneesh Kumar K.V } 1531c6018b4bSAneesh Kumar K.V /* 1532c6018b4bSAneesh Kumar K.V * Only update home node if there is an existing vma policy 1533c6018b4bSAneesh Kumar K.V */ 1534c6018b4bSAneesh Kumar K.V if (!new) 1535c6018b4bSAneesh Kumar K.V continue; 1536c6018b4bSAneesh Kumar K.V 1537c6018b4bSAneesh Kumar K.V /* 1538c6018b4bSAneesh Kumar K.V * If any vma in the range got policy other than MPOL_BIND 1539c6018b4bSAneesh Kumar K.V * or MPOL_PREFERRED_MANY we return error. We don't reset 1540c6018b4bSAneesh Kumar K.V * the home node for vmas we already updated before. 1541c6018b4bSAneesh Kumar K.V */ 1542c6018b4bSAneesh Kumar K.V if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) { 1543c6018b4bSAneesh Kumar K.V err = -EOPNOTSUPP; 1544c6018b4bSAneesh Kumar K.V break; 1545c6018b4bSAneesh Kumar K.V } 1546c6018b4bSAneesh Kumar K.V 1547c6018b4bSAneesh Kumar K.V new->home_node = home_node; 1548c6018b4bSAneesh Kumar K.V err = mbind_range(mm, vmstart, vmend, new); 1549c6018b4bSAneesh Kumar K.V mpol_put(new); 1550c6018b4bSAneesh Kumar K.V if (err) 1551c6018b4bSAneesh Kumar K.V break; 1552c6018b4bSAneesh Kumar K.V } 1553c6018b4bSAneesh Kumar K.V mmap_write_unlock(mm); 1554c6018b4bSAneesh Kumar K.V return err; 1555c6018b4bSAneesh Kumar K.V } 1556c6018b4bSAneesh Kumar K.V 1557e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1558e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1559e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1560e7dc9ad6SDominik Brodowski { 1561e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1562e7dc9ad6SDominik Brodowski } 1563e7dc9ad6SDominik Brodowski 15648bccd85fSChristoph Lameter /* Set the process memory policy */ 1565af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1566af03c4acSDominik Brodowski unsigned long maxnode) 15678bccd85fSChristoph Lameter { 156895837924SFeng Tang unsigned short mode_flags; 15698bccd85fSChristoph Lameter nodemask_t nodes; 157095837924SFeng Tang int lmode = mode; 157195837924SFeng Tang int err; 15728bccd85fSChristoph Lameter 157395837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 157495837924SFeng Tang if (err) 157595837924SFeng Tang return err; 157695837924SFeng Tang 15778bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 15788bccd85fSChristoph Lameter if (err) 15798bccd85fSChristoph Lameter return err; 158095837924SFeng Tang 158195837924SFeng Tang return do_set_mempolicy(lmode, mode_flags, &nodes); 15828bccd85fSChristoph Lameter } 15838bccd85fSChristoph Lameter 1584af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1585af03c4acSDominik Brodowski unsigned long, maxnode) 1586af03c4acSDominik Brodowski { 1587af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1588af03c4acSDominik Brodowski } 1589af03c4acSDominik Brodowski 1590b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1591b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1592b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 159339743889SChristoph Lameter { 1594596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 159539743889SChristoph Lameter struct task_struct *task; 159639743889SChristoph Lameter nodemask_t task_nodes; 159739743889SChristoph Lameter int err; 1598596d7cfaSKOSAKI Motohiro nodemask_t *old; 1599596d7cfaSKOSAKI Motohiro nodemask_t *new; 1600596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 160139743889SChristoph Lameter 1602596d7cfaSKOSAKI Motohiro if (!scratch) 1603596d7cfaSKOSAKI Motohiro return -ENOMEM; 160439743889SChristoph Lameter 1605596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1606596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1607596d7cfaSKOSAKI Motohiro 1608596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 160939743889SChristoph Lameter if (err) 1610596d7cfaSKOSAKI Motohiro goto out; 1611596d7cfaSKOSAKI Motohiro 1612596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1613596d7cfaSKOSAKI Motohiro if (err) 1614596d7cfaSKOSAKI Motohiro goto out; 161539743889SChristoph Lameter 161639743889SChristoph Lameter /* Find the mm_struct */ 161755cfaa3cSZeng Zhaoming rcu_read_lock(); 1618228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 161939743889SChristoph Lameter if (!task) { 162055cfaa3cSZeng Zhaoming rcu_read_unlock(); 1621596d7cfaSKOSAKI Motohiro err = -ESRCH; 1622596d7cfaSKOSAKI Motohiro goto out; 162339743889SChristoph Lameter } 16243268c63eSChristoph Lameter get_task_struct(task); 162539743889SChristoph Lameter 1626596d7cfaSKOSAKI Motohiro err = -EINVAL; 162739743889SChristoph Lameter 162839743889SChristoph Lameter /* 162931367466SOtto Ebeling * Check if this process has the right to modify the specified process. 163031367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 163139743889SChristoph Lameter */ 163231367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1633c69e8d9cSDavid Howells rcu_read_unlock(); 163439743889SChristoph Lameter err = -EPERM; 16353268c63eSChristoph Lameter goto out_put; 163639743889SChristoph Lameter } 1637c69e8d9cSDavid Howells rcu_read_unlock(); 163839743889SChristoph Lameter 163939743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 164039743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1641596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 164239743889SChristoph Lameter err = -EPERM; 16433268c63eSChristoph Lameter goto out_put; 164439743889SChristoph Lameter } 164539743889SChristoph Lameter 16460486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 16470486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 16480486a38bSYisheng Xie if (nodes_empty(*new)) 16493268c63eSChristoph Lameter goto out_put; 16500486a38bSYisheng Xie 165186c3a764SDavid Quigley err = security_task_movememory(task); 165286c3a764SDavid Quigley if (err) 16533268c63eSChristoph Lameter goto out_put; 165486c3a764SDavid Quigley 16553268c63eSChristoph Lameter mm = get_task_mm(task); 16563268c63eSChristoph Lameter put_task_struct(task); 1657f2a9ef88SSasha Levin 1658f2a9ef88SSasha Levin if (!mm) { 1659f2a9ef88SSasha Levin err = -EINVAL; 1660f2a9ef88SSasha Levin goto out; 1661f2a9ef88SSasha Levin } 1662f2a9ef88SSasha Levin 1663596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 166474c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 16653268c63eSChristoph Lameter 166639743889SChristoph Lameter mmput(mm); 16673268c63eSChristoph Lameter out: 1668596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1669596d7cfaSKOSAKI Motohiro 167039743889SChristoph Lameter return err; 16713268c63eSChristoph Lameter 16723268c63eSChristoph Lameter out_put: 16733268c63eSChristoph Lameter put_task_struct(task); 16743268c63eSChristoph Lameter goto out; 16753268c63eSChristoph Lameter 167639743889SChristoph Lameter } 167739743889SChristoph Lameter 1678b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1679b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1680b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1681b6e9b0baSDominik Brodowski { 1682b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1683b6e9b0baSDominik Brodowski } 1684b6e9b0baSDominik Brodowski 168539743889SChristoph Lameter 16868bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1687af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1688af03c4acSDominik Brodowski unsigned long __user *nmask, 1689af03c4acSDominik Brodowski unsigned long maxnode, 1690af03c4acSDominik Brodowski unsigned long addr, 1691af03c4acSDominik Brodowski unsigned long flags) 16928bccd85fSChristoph Lameter { 1693dbcb0f19SAdrian Bunk int err; 16943f649ab7SKees Cook int pval; 16958bccd85fSChristoph Lameter nodemask_t nodes; 16968bccd85fSChristoph Lameter 1697050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 16988bccd85fSChristoph Lameter return -EINVAL; 16998bccd85fSChristoph Lameter 17004605f057SWenchao Hao addr = untagged_addr(addr); 17014605f057SWenchao Hao 17028bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 17038bccd85fSChristoph Lameter 17048bccd85fSChristoph Lameter if (err) 17058bccd85fSChristoph Lameter return err; 17068bccd85fSChristoph Lameter 17078bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 17088bccd85fSChristoph Lameter return -EFAULT; 17098bccd85fSChristoph Lameter 17108bccd85fSChristoph Lameter if (nmask) 17118bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 17128bccd85fSChristoph Lameter 17138bccd85fSChristoph Lameter return err; 17148bccd85fSChristoph Lameter } 17158bccd85fSChristoph Lameter 1716af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1717af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1718af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1719af03c4acSDominik Brodowski { 1720af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1721af03c4acSDominik Brodowski } 1722af03c4acSDominik Brodowski 172320ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 172420ca87f2SLi Xinhai { 172520ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 172620ca87f2SLi Xinhai return false; 172720ca87f2SLi Xinhai 172820ca87f2SLi Xinhai /* 172920ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 173020ca87f2SLi Xinhai * incurring periodic faults. 173120ca87f2SLi Xinhai */ 173220ca87f2SLi Xinhai if (vma_is_dax(vma)) 173320ca87f2SLi Xinhai return false; 173420ca87f2SLi Xinhai 173520ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 173620ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 173720ca87f2SLi Xinhai return false; 173820ca87f2SLi Xinhai 173920ca87f2SLi Xinhai /* 174020ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 174120ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 174220ca87f2SLi Xinhai * possible. 174320ca87f2SLi Xinhai */ 174420ca87f2SLi Xinhai if (vma->vm_file && 174520ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 174620ca87f2SLi Xinhai < policy_zone) 174720ca87f2SLi Xinhai return false; 174820ca87f2SLi Xinhai return true; 174920ca87f2SLi Xinhai } 175020ca87f2SLi Xinhai 175174d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 175274d2c3a0SOleg Nesterov unsigned long addr) 17531da177e4SLinus Torvalds { 17548d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17551da177e4SLinus Torvalds 17561da177e4SLinus Torvalds if (vma) { 1757480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17588d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 175900442ad0SMel Gorman } else if (vma->vm_policy) { 17601da177e4SLinus Torvalds pol = vma->vm_policy; 176100442ad0SMel Gorman 176200442ad0SMel Gorman /* 176300442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 176400442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 176500442ad0SMel Gorman * count on these policies which will be dropped by 176600442ad0SMel Gorman * mpol_cond_put() later 176700442ad0SMel Gorman */ 176800442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 176900442ad0SMel Gorman mpol_get(pol); 177000442ad0SMel Gorman } 17711da177e4SLinus Torvalds } 1772f15ca78eSOleg Nesterov 177374d2c3a0SOleg Nesterov return pol; 177474d2c3a0SOleg Nesterov } 177574d2c3a0SOleg Nesterov 177674d2c3a0SOleg Nesterov /* 1777dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 177874d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 177974d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 178074d2c3a0SOleg Nesterov * 178174d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1782dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 178374d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 178474d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 178574d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 178674d2c3a0SOleg Nesterov * extra reference for shared policies. 178774d2c3a0SOleg Nesterov */ 1788ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1789dd6eecb9SOleg Nesterov unsigned long addr) 179074d2c3a0SOleg Nesterov { 179174d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 179274d2c3a0SOleg Nesterov 17938d90274bSOleg Nesterov if (!pol) 1794dd6eecb9SOleg Nesterov pol = get_task_policy(current); 17958d90274bSOleg Nesterov 17961da177e4SLinus Torvalds return pol; 17971da177e4SLinus Torvalds } 17981da177e4SLinus Torvalds 17996b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1800fc314724SMel Gorman { 18016b6482bbSOleg Nesterov struct mempolicy *pol; 1802f15ca78eSOleg Nesterov 1803fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1804fc314724SMel Gorman bool ret = false; 1805fc314724SMel Gorman 1806fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1807fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1808fc314724SMel Gorman ret = true; 1809fc314724SMel Gorman mpol_cond_put(pol); 1810fc314724SMel Gorman 1811fc314724SMel Gorman return ret; 18128d90274bSOleg Nesterov } 18138d90274bSOleg Nesterov 1814fc314724SMel Gorman pol = vma->vm_policy; 18158d90274bSOleg Nesterov if (!pol) 18166b6482bbSOleg Nesterov pol = get_task_policy(current); 1817fc314724SMel Gorman 1818fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1819fc314724SMel Gorman } 1820fc314724SMel Gorman 1821d2226ebdSFeng Tang bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1822d3eb1570SLai Jiangshan { 1823d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1824d3eb1570SLai Jiangshan 1825d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1826d3eb1570SLai Jiangshan 1827d3eb1570SLai Jiangshan /* 1828269fbe72SBen Widawsky * if policy->nodes has movable memory only, 1829d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1830d3eb1570SLai Jiangshan * 1831269fbe72SBen Widawsky * policy->nodes is intersect with node_states[N_MEMORY]. 1832f0953a1bSIngo Molnar * so if the following test fails, it implies 1833269fbe72SBen Widawsky * policy->nodes has movable memory only. 1834d3eb1570SLai Jiangshan */ 1835269fbe72SBen Widawsky if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1836d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1837d3eb1570SLai Jiangshan 1838d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1839d3eb1570SLai Jiangshan } 1840d3eb1570SLai Jiangshan 184152cd3b07SLee Schermerhorn /* 184252cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 184352cd3b07SLee Schermerhorn * page allocation 184452cd3b07SLee Schermerhorn */ 18458ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 184619770b32SMel Gorman { 1847b27abaccSDave Hansen int mode = policy->mode; 1848b27abaccSDave Hansen 184919770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1850b27abaccSDave Hansen if (unlikely(mode == MPOL_BIND) && 1851d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 1852269fbe72SBen Widawsky cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1853269fbe72SBen Widawsky return &policy->nodes; 185419770b32SMel Gorman 1855b27abaccSDave Hansen if (mode == MPOL_PREFERRED_MANY) 1856b27abaccSDave Hansen return &policy->nodes; 1857b27abaccSDave Hansen 185819770b32SMel Gorman return NULL; 185919770b32SMel Gorman } 186019770b32SMel Gorman 1861b27abaccSDave Hansen /* 1862b27abaccSDave Hansen * Return the preferred node id for 'prefer' mempolicy, and return 1863b27abaccSDave Hansen * the given id for all other policies. 1864b27abaccSDave Hansen * 1865b27abaccSDave Hansen * policy_node() is always coupled with policy_nodemask(), which 1866b27abaccSDave Hansen * secures the nodemask limit for 'bind' and 'prefer-many' policy. 1867b27abaccSDave Hansen */ 1868f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 18691da177e4SLinus Torvalds { 18707858d7bcSFeng Tang if (policy->mode == MPOL_PREFERRED) { 1871269fbe72SBen Widawsky nd = first_node(policy->nodes); 18727858d7bcSFeng Tang } else { 187319770b32SMel Gorman /* 18746d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18756d840958SMichal Hocko * because we might easily break the expectation to stay on the 18766d840958SMichal Hocko * requested node and not break the policy. 187719770b32SMel Gorman */ 18786d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 18791da177e4SLinus Torvalds } 18806d840958SMichal Hocko 1881c6018b4bSAneesh Kumar K.V if ((policy->mode == MPOL_BIND || 1882c6018b4bSAneesh Kumar K.V policy->mode == MPOL_PREFERRED_MANY) && 1883c6018b4bSAneesh Kumar K.V policy->home_node != NUMA_NO_NODE) 1884c6018b4bSAneesh Kumar K.V return policy->home_node; 1885c6018b4bSAneesh Kumar K.V 188604ec6264SVlastimil Babka return nd; 18871da177e4SLinus Torvalds } 18881da177e4SLinus Torvalds 18891da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 18901da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 18911da177e4SLinus Torvalds { 189245816682SVlastimil Babka unsigned next; 18931da177e4SLinus Torvalds struct task_struct *me = current; 18941da177e4SLinus Torvalds 1895269fbe72SBen Widawsky next = next_node_in(me->il_prev, policy->nodes); 1896f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 189745816682SVlastimil Babka me->il_prev = next; 189845816682SVlastimil Babka return next; 18991da177e4SLinus Torvalds } 19001da177e4SLinus Torvalds 1901dc85da15SChristoph Lameter /* 1902dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1903dc85da15SChristoph Lameter * next slab entry. 1904dc85da15SChristoph Lameter */ 19052a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1906dc85da15SChristoph Lameter { 1907e7b691b0SAndi Kleen struct mempolicy *policy; 19082a389610SDavid Rientjes int node = numa_mem_id(); 1909e7b691b0SAndi Kleen 191038b031ddSVasily Averin if (!in_task()) 19112a389610SDavid Rientjes return node; 1912e7b691b0SAndi Kleen 1913e7b691b0SAndi Kleen policy = current->mempolicy; 19147858d7bcSFeng Tang if (!policy) 19152a389610SDavid Rientjes return node; 1916765c4507SChristoph Lameter 1917bea904d5SLee Schermerhorn switch (policy->mode) { 1918bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1919269fbe72SBen Widawsky return first_node(policy->nodes); 1920bea904d5SLee Schermerhorn 1921dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1922dc85da15SChristoph Lameter return interleave_nodes(policy); 1923dc85da15SChristoph Lameter 1924b27abaccSDave Hansen case MPOL_BIND: 1925b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 1926b27abaccSDave Hansen { 1927c33d6c06SMel Gorman struct zoneref *z; 1928c33d6c06SMel Gorman 1929dc85da15SChristoph Lameter /* 1930dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1931dc85da15SChristoph Lameter * first node. 1932dc85da15SChristoph Lameter */ 193319770b32SMel Gorman struct zonelist *zonelist; 193419770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1935c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1936c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1937269fbe72SBen Widawsky &policy->nodes); 1938c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1939dd1a239fSMel Gorman } 19407858d7bcSFeng Tang case MPOL_LOCAL: 19417858d7bcSFeng Tang return node; 1942dc85da15SChristoph Lameter 1943dc85da15SChristoph Lameter default: 1944bea904d5SLee Schermerhorn BUG(); 1945dc85da15SChristoph Lameter } 1946dc85da15SChristoph Lameter } 1947dc85da15SChristoph Lameter 1948fee83b3aSAndrew Morton /* 1949fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1950269fbe72SBen Widawsky * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1951fee83b3aSAndrew Morton * number of present nodes. 1952fee83b3aSAndrew Morton */ 195398c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19541da177e4SLinus Torvalds { 1955276aeee1Syanghui nodemask_t nodemask = pol->nodes; 1956276aeee1Syanghui unsigned int target, nnodes; 1957fee83b3aSAndrew Morton int i; 1958fee83b3aSAndrew Morton int nid; 1959276aeee1Syanghui /* 1960276aeee1Syanghui * The barrier will stabilize the nodemask in a register or on 1961276aeee1Syanghui * the stack so that it will stop changing under the code. 1962276aeee1Syanghui * 1963276aeee1Syanghui * Between first_node() and next_node(), pol->nodes could be changed 1964276aeee1Syanghui * by other threads. So we put pol->nodes in a local stack. 1965276aeee1Syanghui */ 1966276aeee1Syanghui barrier(); 19671da177e4SLinus Torvalds 1968276aeee1Syanghui nnodes = nodes_weight(nodemask); 1969f5b087b5SDavid Rientjes if (!nnodes) 1970f5b087b5SDavid Rientjes return numa_node_id(); 1971fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1972276aeee1Syanghui nid = first_node(nodemask); 1973fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1974276aeee1Syanghui nid = next_node(nid, nodemask); 19751da177e4SLinus Torvalds return nid; 19761da177e4SLinus Torvalds } 19771da177e4SLinus Torvalds 19785da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19795da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19805da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19815da7ca86SChristoph Lameter { 19825da7ca86SChristoph Lameter if (vma) { 19835da7ca86SChristoph Lameter unsigned long off; 19845da7ca86SChristoph Lameter 19853b98b087SNishanth Aravamudan /* 19863b98b087SNishanth Aravamudan * for small pages, there is no difference between 19873b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 19883b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 19893b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 19903b98b087SNishanth Aravamudan * a useful offset. 19913b98b087SNishanth Aravamudan */ 19923b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 19933b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 19945da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 199598c70baaSLaurent Dufour return offset_il_node(pol, off); 19965da7ca86SChristoph Lameter } else 19975da7ca86SChristoph Lameter return interleave_nodes(pol); 19985da7ca86SChristoph Lameter } 19995da7ca86SChristoph Lameter 200000ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 2001480eccf9SLee Schermerhorn /* 200204ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 2003b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 2004b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 2005b46e14acSFabian Frederick * @gfp_flags: for requested zone 2006b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2007b27abaccSDave Hansen * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 2008480eccf9SLee Schermerhorn * 200904ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 201052cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 2011b27abaccSDave Hansen * If the effective policy is 'bind' or 'prefer-many', returns a pointer 2012b27abaccSDave Hansen * to the mempolicy's @nodemask for filtering the zonelist. 2013c0ff7453SMiao Xie * 2014d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2015480eccf9SLee Schermerhorn */ 201604ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 201704ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20185da7ca86SChristoph Lameter { 201904ec6264SVlastimil Babka int nid; 2020b27abaccSDave Hansen int mode; 20215da7ca86SChristoph Lameter 2022dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 2023b27abaccSDave Hansen *nodemask = NULL; 2024b27abaccSDave Hansen mode = (*mpol)->mode; 20255da7ca86SChristoph Lameter 2026b27abaccSDave Hansen if (unlikely(mode == MPOL_INTERLEAVE)) { 202704ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 202804ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 202952cd3b07SLee Schermerhorn } else { 203004ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 2031b27abaccSDave Hansen if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) 2032269fbe72SBen Widawsky *nodemask = &(*mpol)->nodes; 2033480eccf9SLee Schermerhorn } 203404ec6264SVlastimil Babka return nid; 20355da7ca86SChristoph Lameter } 203606808b08SLee Schermerhorn 203706808b08SLee Schermerhorn /* 203806808b08SLee Schermerhorn * init_nodemask_of_mempolicy 203906808b08SLee Schermerhorn * 204006808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 204106808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 204206808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 204306808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 204406808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 204506808b08SLee Schermerhorn * of non-default mempolicy. 204606808b08SLee Schermerhorn * 204706808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 204806808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 204906808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 205006808b08SLee Schermerhorn * 205106808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 205206808b08SLee Schermerhorn */ 205306808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 205406808b08SLee Schermerhorn { 205506808b08SLee Schermerhorn struct mempolicy *mempolicy; 205606808b08SLee Schermerhorn 205706808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 205806808b08SLee Schermerhorn return false; 205906808b08SLee Schermerhorn 2060c0ff7453SMiao Xie task_lock(current); 206106808b08SLee Schermerhorn mempolicy = current->mempolicy; 206206808b08SLee Schermerhorn switch (mempolicy->mode) { 206306808b08SLee Schermerhorn case MPOL_PREFERRED: 2064b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 206506808b08SLee Schermerhorn case MPOL_BIND: 206606808b08SLee Schermerhorn case MPOL_INTERLEAVE: 2067269fbe72SBen Widawsky *mask = mempolicy->nodes; 206806808b08SLee Schermerhorn break; 206906808b08SLee Schermerhorn 20707858d7bcSFeng Tang case MPOL_LOCAL: 2071269fbe72SBen Widawsky init_nodemask_of_node(mask, numa_node_id()); 20727858d7bcSFeng Tang break; 20737858d7bcSFeng Tang 207406808b08SLee Schermerhorn default: 207506808b08SLee Schermerhorn BUG(); 207606808b08SLee Schermerhorn } 2077c0ff7453SMiao Xie task_unlock(current); 207806808b08SLee Schermerhorn 207906808b08SLee Schermerhorn return true; 208006808b08SLee Schermerhorn } 208100ac59adSChen, Kenneth W #endif 20825da7ca86SChristoph Lameter 20836f48d0ebSDavid Rientjes /* 2084b26e517aSFeng Tang * mempolicy_in_oom_domain 20856f48d0ebSDavid Rientjes * 2086b26e517aSFeng Tang * If tsk's mempolicy is "bind", check for intersection between mask and 2087b26e517aSFeng Tang * the policy nodemask. Otherwise, return true for all other policies 2088b26e517aSFeng Tang * including "interleave", as a tsk with "interleave" policy may have 2089b26e517aSFeng Tang * memory allocated from all nodes in system. 20906f48d0ebSDavid Rientjes * 20916f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 20926f48d0ebSDavid Rientjes */ 2093b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk, 20946f48d0ebSDavid Rientjes const nodemask_t *mask) 20956f48d0ebSDavid Rientjes { 20966f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 20976f48d0ebSDavid Rientjes bool ret = true; 20986f48d0ebSDavid Rientjes 20996f48d0ebSDavid Rientjes if (!mask) 21006f48d0ebSDavid Rientjes return ret; 2101b26e517aSFeng Tang 21026f48d0ebSDavid Rientjes task_lock(tsk); 21036f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 2104b26e517aSFeng Tang if (mempolicy && mempolicy->mode == MPOL_BIND) 2105269fbe72SBen Widawsky ret = nodes_intersects(mempolicy->nodes, *mask); 21066f48d0ebSDavid Rientjes task_unlock(tsk); 2107b26e517aSFeng Tang 21086f48d0ebSDavid Rientjes return ret; 21096f48d0ebSDavid Rientjes } 21106f48d0ebSDavid Rientjes 21111da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 21121da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2113662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2114662f3a0bSAndi Kleen unsigned nid) 21151da177e4SLinus Torvalds { 21161da177e4SLinus Torvalds struct page *page; 21171da177e4SLinus Torvalds 211884172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, nid, NULL); 21194518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21204518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21214518085eSKemi Wang return page; 2122de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2123de55c8b2SAndrey Ryabinin preempt_disable(); 2124f19298b9SMel Gorman __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2125de55c8b2SAndrey Ryabinin preempt_enable(); 2126de55c8b2SAndrey Ryabinin } 21271da177e4SLinus Torvalds return page; 21281da177e4SLinus Torvalds } 21291da177e4SLinus Torvalds 21304c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 21314c54d949SFeng Tang int nid, struct mempolicy *pol) 21324c54d949SFeng Tang { 21334c54d949SFeng Tang struct page *page; 21344c54d949SFeng Tang gfp_t preferred_gfp; 21354c54d949SFeng Tang 21364c54d949SFeng Tang /* 21374c54d949SFeng Tang * This is a two pass approach. The first pass will only try the 21384c54d949SFeng Tang * preferred nodes but skip the direct reclaim and allow the 21394c54d949SFeng Tang * allocation to fail, while the second pass will try all the 21404c54d949SFeng Tang * nodes in system. 21414c54d949SFeng Tang */ 21424c54d949SFeng Tang preferred_gfp = gfp | __GFP_NOWARN; 21434c54d949SFeng Tang preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 21444c54d949SFeng Tang page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); 21454c54d949SFeng Tang if (!page) 2146c0455116SAneesh Kumar K.V page = __alloc_pages(gfp, order, nid, NULL); 21474c54d949SFeng Tang 21484c54d949SFeng Tang return page; 21494c54d949SFeng Tang } 21504c54d949SFeng Tang 21511da177e4SLinus Torvalds /** 2152adf88aa8SMatthew Wilcox (Oracle) * vma_alloc_folio - Allocate a folio for a VMA. 2153eb350739SMatthew Wilcox (Oracle) * @gfp: GFP flags. 2154adf88aa8SMatthew Wilcox (Oracle) * @order: Order of the folio. 21551da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 2156eb350739SMatthew Wilcox (Oracle) * @addr: Virtual address of the allocation. Must be inside @vma. 2157eb350739SMatthew Wilcox (Oracle) * @hugepage: For hugepages try only the preferred node if possible. 21581da177e4SLinus Torvalds * 2159adf88aa8SMatthew Wilcox (Oracle) * Allocate a folio for a specific address in @vma, using the appropriate 2160eb350739SMatthew Wilcox (Oracle) * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2161eb350739SMatthew Wilcox (Oracle) * of the mm_struct of the VMA to prevent it from going away. Should be 2162adf88aa8SMatthew Wilcox (Oracle) * used for all allocations for folios that will be mapped into user space. 2163eb350739SMatthew Wilcox (Oracle) * 2164adf88aa8SMatthew Wilcox (Oracle) * Return: The folio on success or NULL if allocation fails. 21651da177e4SLinus Torvalds */ 2166adf88aa8SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, 2167be1a13ebSMichal Hocko unsigned long addr, bool hugepage) 21681da177e4SLinus Torvalds { 2169cc9a6c87SMel Gorman struct mempolicy *pol; 2170be1a13ebSMichal Hocko int node = numa_node_id(); 2171adf88aa8SMatthew Wilcox (Oracle) struct folio *folio; 217204ec6264SVlastimil Babka int preferred_nid; 2173be97a41bSVlastimil Babka nodemask_t *nmask; 21741da177e4SLinus Torvalds 2175dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2176cc9a6c87SMel Gorman 2177be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 2178adf88aa8SMatthew Wilcox (Oracle) struct page *page; 21791da177e4SLinus Torvalds unsigned nid; 21805da7ca86SChristoph Lameter 21818eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 218252cd3b07SLee Schermerhorn mpol_cond_put(pol); 2183adf88aa8SMatthew Wilcox (Oracle) gfp |= __GFP_COMP; 21840bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2185adf88aa8SMatthew Wilcox (Oracle) if (page && order > 1) 2186adf88aa8SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2187adf88aa8SMatthew Wilcox (Oracle) folio = (struct folio *)page; 2188be97a41bSVlastimil Babka goto out; 21891da177e4SLinus Torvalds } 21901da177e4SLinus Torvalds 21914c54d949SFeng Tang if (pol->mode == MPOL_PREFERRED_MANY) { 2192adf88aa8SMatthew Wilcox (Oracle) struct page *page; 2193adf88aa8SMatthew Wilcox (Oracle) 2194c0455116SAneesh Kumar K.V node = policy_node(gfp, pol, node); 2195adf88aa8SMatthew Wilcox (Oracle) gfp |= __GFP_COMP; 21964c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, node, pol); 21974c54d949SFeng Tang mpol_cond_put(pol); 2198adf88aa8SMatthew Wilcox (Oracle) if (page && order > 1) 2199adf88aa8SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2200adf88aa8SMatthew Wilcox (Oracle) folio = (struct folio *)page; 22014c54d949SFeng Tang goto out; 22024c54d949SFeng Tang } 22034c54d949SFeng Tang 220419deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 220519deb769SDavid Rientjes int hpage_node = node; 220619deb769SDavid Rientjes 220719deb769SDavid Rientjes /* 220819deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 220919deb769SDavid Rientjes * allows the current node (or other explicitly preferred 221019deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 221119deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 221219deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 221319deb769SDavid Rientjes * 2214b27abaccSDave Hansen * If the policy is interleave or does not allow the current 221519deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 221619deb769SDavid Rientjes */ 22177858d7bcSFeng Tang if (pol->mode == MPOL_PREFERRED) 2218269fbe72SBen Widawsky hpage_node = first_node(pol->nodes); 221919deb769SDavid Rientjes 222019deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 222119deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 222219deb769SDavid Rientjes mpol_cond_put(pol); 2223cc638f32SVlastimil Babka /* 2224cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2225cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2226cc638f32SVlastimil Babka */ 2227adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc_node(gfp | __GFP_THISNODE | 2228adf88aa8SMatthew Wilcox (Oracle) __GFP_NORETRY, order, hpage_node); 222976e654ccSDavid Rientjes 223076e654ccSDavid Rientjes /* 223176e654ccSDavid Rientjes * If hugepage allocations are configured to always 223276e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 223376e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2234cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 223576e654ccSDavid Rientjes */ 2236adf88aa8SMatthew Wilcox (Oracle) if (!folio && (gfp & __GFP_DIRECT_RECLAIM)) 2237adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, order, hpage_node, 2238adf88aa8SMatthew Wilcox (Oracle) nmask); 223976e654ccSDavid Rientjes 224019deb769SDavid Rientjes goto out; 224119deb769SDavid Rientjes } 224219deb769SDavid Rientjes } 224319deb769SDavid Rientjes 2244077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 224504ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 2246adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, order, preferred_nid, nmask); 2247d51e9894SVlastimil Babka mpol_cond_put(pol); 2248be97a41bSVlastimil Babka out: 2249f584b680SMatthew Wilcox (Oracle) return folio; 2250f584b680SMatthew Wilcox (Oracle) } 2251adf88aa8SMatthew Wilcox (Oracle) EXPORT_SYMBOL(vma_alloc_folio); 2252f584b680SMatthew Wilcox (Oracle) 22531da177e4SLinus Torvalds /** 2254d7f946d0SMatthew Wilcox (Oracle) * alloc_pages - Allocate pages. 22556421ec76SMatthew Wilcox (Oracle) * @gfp: GFP flags. 22566421ec76SMatthew Wilcox (Oracle) * @order: Power of two of number of pages to allocate. 22571da177e4SLinus Torvalds * 22586421ec76SMatthew Wilcox (Oracle) * Allocate 1 << @order contiguous pages. The physical address of the 22596421ec76SMatthew Wilcox (Oracle) * first page is naturally aligned (eg an order-3 allocation will be aligned 22606421ec76SMatthew Wilcox (Oracle) * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 22616421ec76SMatthew Wilcox (Oracle) * process is honoured when in process context. 22621da177e4SLinus Torvalds * 22636421ec76SMatthew Wilcox (Oracle) * Context: Can be called from any context, providing the appropriate GFP 22646421ec76SMatthew Wilcox (Oracle) * flags are used. 22656421ec76SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 22661da177e4SLinus Torvalds */ 2267d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order) 22681da177e4SLinus Torvalds { 22698d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2270c0ff7453SMiao Xie struct page *page; 22711da177e4SLinus Torvalds 22728d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22738d90274bSOleg Nesterov pol = get_task_policy(current); 227452cd3b07SLee Schermerhorn 227552cd3b07SLee Schermerhorn /* 227652cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 227752cd3b07SLee Schermerhorn * nor system default_policy 227852cd3b07SLee Schermerhorn */ 227945c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2280c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 22814c54d949SFeng Tang else if (pol->mode == MPOL_PREFERRED_MANY) 22824c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, 2283c0455116SAneesh Kumar K.V policy_node(gfp, pol, numa_node_id()), pol); 2284c0ff7453SMiao Xie else 228584172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, 228604ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 22875c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2288cc9a6c87SMel Gorman 2289c0ff7453SMiao Xie return page; 22901da177e4SLinus Torvalds } 2291d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages); 22921da177e4SLinus Torvalds 2293cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order) 2294cc09cb13SMatthew Wilcox (Oracle) { 2295cc09cb13SMatthew Wilcox (Oracle) struct page *page = alloc_pages(gfp | __GFP_COMP, order); 2296cc09cb13SMatthew Wilcox (Oracle) 2297cc09cb13SMatthew Wilcox (Oracle) if (page && order > 1) 2298cc09cb13SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2299cc09cb13SMatthew Wilcox (Oracle) return (struct folio *)page; 2300cc09cb13SMatthew Wilcox (Oracle) } 2301cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc); 2302cc09cb13SMatthew Wilcox (Oracle) 2303c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, 2304c00b6b96SChen Wandun struct mempolicy *pol, unsigned long nr_pages, 2305c00b6b96SChen Wandun struct page **page_array) 2306c00b6b96SChen Wandun { 2307c00b6b96SChen Wandun int nodes; 2308c00b6b96SChen Wandun unsigned long nr_pages_per_node; 2309c00b6b96SChen Wandun int delta; 2310c00b6b96SChen Wandun int i; 2311c00b6b96SChen Wandun unsigned long nr_allocated; 2312c00b6b96SChen Wandun unsigned long total_allocated = 0; 2313c00b6b96SChen Wandun 2314c00b6b96SChen Wandun nodes = nodes_weight(pol->nodes); 2315c00b6b96SChen Wandun nr_pages_per_node = nr_pages / nodes; 2316c00b6b96SChen Wandun delta = nr_pages - nodes * nr_pages_per_node; 2317c00b6b96SChen Wandun 2318c00b6b96SChen Wandun for (i = 0; i < nodes; i++) { 2319c00b6b96SChen Wandun if (delta) { 2320c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(gfp, 2321c00b6b96SChen Wandun interleave_nodes(pol), NULL, 2322c00b6b96SChen Wandun nr_pages_per_node + 1, NULL, 2323c00b6b96SChen Wandun page_array); 2324c00b6b96SChen Wandun delta--; 2325c00b6b96SChen Wandun } else { 2326c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(gfp, 2327c00b6b96SChen Wandun interleave_nodes(pol), NULL, 2328c00b6b96SChen Wandun nr_pages_per_node, NULL, page_array); 2329c00b6b96SChen Wandun } 2330c00b6b96SChen Wandun 2331c00b6b96SChen Wandun page_array += nr_allocated; 2332c00b6b96SChen Wandun total_allocated += nr_allocated; 2333c00b6b96SChen Wandun } 2334c00b6b96SChen Wandun 2335c00b6b96SChen Wandun return total_allocated; 2336c00b6b96SChen Wandun } 2337c00b6b96SChen Wandun 2338c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, 2339c00b6b96SChen Wandun struct mempolicy *pol, unsigned long nr_pages, 2340c00b6b96SChen Wandun struct page **page_array) 2341c00b6b96SChen Wandun { 2342c00b6b96SChen Wandun gfp_t preferred_gfp; 2343c00b6b96SChen Wandun unsigned long nr_allocated = 0; 2344c00b6b96SChen Wandun 2345c00b6b96SChen Wandun preferred_gfp = gfp | __GFP_NOWARN; 2346c00b6b96SChen Wandun preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2347c00b6b96SChen Wandun 2348c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, 2349c00b6b96SChen Wandun nr_pages, NULL, page_array); 2350c00b6b96SChen Wandun 2351c00b6b96SChen Wandun if (nr_allocated < nr_pages) 2352c00b6b96SChen Wandun nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, 2353c00b6b96SChen Wandun nr_pages - nr_allocated, NULL, 2354c00b6b96SChen Wandun page_array + nr_allocated); 2355c00b6b96SChen Wandun return nr_allocated; 2356c00b6b96SChen Wandun } 2357c00b6b96SChen Wandun 2358c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the 2359c00b6b96SChen Wandun * same time in some situation such as vmalloc. 2360c00b6b96SChen Wandun * 2361c00b6b96SChen Wandun * It can accelerate memory allocation especially interleaving 2362c00b6b96SChen Wandun * allocate memory. 2363c00b6b96SChen Wandun */ 2364c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, 2365c00b6b96SChen Wandun unsigned long nr_pages, struct page **page_array) 2366c00b6b96SChen Wandun { 2367c00b6b96SChen Wandun struct mempolicy *pol = &default_policy; 2368c00b6b96SChen Wandun 2369c00b6b96SChen Wandun if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2370c00b6b96SChen Wandun pol = get_task_policy(current); 2371c00b6b96SChen Wandun 2372c00b6b96SChen Wandun if (pol->mode == MPOL_INTERLEAVE) 2373c00b6b96SChen Wandun return alloc_pages_bulk_array_interleave(gfp, pol, 2374c00b6b96SChen Wandun nr_pages, page_array); 2375c00b6b96SChen Wandun 2376c00b6b96SChen Wandun if (pol->mode == MPOL_PREFERRED_MANY) 2377c00b6b96SChen Wandun return alloc_pages_bulk_array_preferred_many(gfp, 2378c00b6b96SChen Wandun numa_node_id(), pol, nr_pages, page_array); 2379c00b6b96SChen Wandun 2380c00b6b96SChen Wandun return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()), 2381c00b6b96SChen Wandun policy_nodemask(gfp, pol), nr_pages, NULL, 2382c00b6b96SChen Wandun page_array); 2383c00b6b96SChen Wandun } 2384c00b6b96SChen Wandun 2385ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2386ef0855d3SOleg Nesterov { 2387ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2388ef0855d3SOleg Nesterov 2389ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2390ef0855d3SOleg Nesterov return PTR_ERR(pol); 2391ef0855d3SOleg Nesterov dst->vm_policy = pol; 2392ef0855d3SOleg Nesterov return 0; 2393ef0855d3SOleg Nesterov } 2394ef0855d3SOleg Nesterov 23954225399aSPaul Jackson /* 2396846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 23974225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 23984225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 23994225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 24004225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2401708c1bbcSMiao Xie * 2402708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2403708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 24044225399aSPaul Jackson */ 24054225399aSPaul Jackson 2406846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2407846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 24081da177e4SLinus Torvalds { 24091da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 24101da177e4SLinus Torvalds 24111da177e4SLinus Torvalds if (!new) 24121da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2413708c1bbcSMiao Xie 2414708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2415708c1bbcSMiao Xie if (old == current->mempolicy) { 2416708c1bbcSMiao Xie task_lock(current); 2417708c1bbcSMiao Xie *new = *old; 2418708c1bbcSMiao Xie task_unlock(current); 2419708c1bbcSMiao Xie } else 2420708c1bbcSMiao Xie *new = *old; 2421708c1bbcSMiao Xie 24224225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 24234225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2424213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 24254225399aSPaul Jackson } 24261da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 24271da177e4SLinus Torvalds return new; 24281da177e4SLinus Torvalds } 24291da177e4SLinus Torvalds 24301da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2431fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 24321da177e4SLinus Torvalds { 24331da177e4SLinus Torvalds if (!a || !b) 2434fcfb4dccSKOSAKI Motohiro return false; 243545c4745aSLee Schermerhorn if (a->mode != b->mode) 2436fcfb4dccSKOSAKI Motohiro return false; 243719800502SBob Liu if (a->flags != b->flags) 2438fcfb4dccSKOSAKI Motohiro return false; 2439c6018b4bSAneesh Kumar K.V if (a->home_node != b->home_node) 2440c6018b4bSAneesh Kumar K.V return false; 244119800502SBob Liu if (mpol_store_user_nodemask(a)) 244219800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2443fcfb4dccSKOSAKI Motohiro return false; 244419800502SBob Liu 244545c4745aSLee Schermerhorn switch (a->mode) { 244619770b32SMel Gorman case MPOL_BIND: 24471da177e4SLinus Torvalds case MPOL_INTERLEAVE: 24481da177e4SLinus Torvalds case MPOL_PREFERRED: 2449b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2450269fbe72SBen Widawsky return !!nodes_equal(a->nodes, b->nodes); 24517858d7bcSFeng Tang case MPOL_LOCAL: 24527858d7bcSFeng Tang return true; 24531da177e4SLinus Torvalds default: 24541da177e4SLinus Torvalds BUG(); 2455fcfb4dccSKOSAKI Motohiro return false; 24561da177e4SLinus Torvalds } 24571da177e4SLinus Torvalds } 24581da177e4SLinus Torvalds 24591da177e4SLinus Torvalds /* 24601da177e4SLinus Torvalds * Shared memory backing store policy support. 24611da177e4SLinus Torvalds * 24621da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 24631da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 24644a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 24651da177e4SLinus Torvalds * for any accesses to the tree. 24661da177e4SLinus Torvalds */ 24671da177e4SLinus Torvalds 24684a8c7bb5SNathan Zimmer /* 24694a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 24704a8c7bb5SNathan Zimmer * reading or for writing 24714a8c7bb5SNathan Zimmer */ 24721da177e4SLinus Torvalds static struct sp_node * 24731da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 24741da177e4SLinus Torvalds { 24751da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 24761da177e4SLinus Torvalds 24771da177e4SLinus Torvalds while (n) { 24781da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 24791da177e4SLinus Torvalds 24801da177e4SLinus Torvalds if (start >= p->end) 24811da177e4SLinus Torvalds n = n->rb_right; 24821da177e4SLinus Torvalds else if (end <= p->start) 24831da177e4SLinus Torvalds n = n->rb_left; 24841da177e4SLinus Torvalds else 24851da177e4SLinus Torvalds break; 24861da177e4SLinus Torvalds } 24871da177e4SLinus Torvalds if (!n) 24881da177e4SLinus Torvalds return NULL; 24891da177e4SLinus Torvalds for (;;) { 24901da177e4SLinus Torvalds struct sp_node *w = NULL; 24911da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 24921da177e4SLinus Torvalds if (!prev) 24931da177e4SLinus Torvalds break; 24941da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 24951da177e4SLinus Torvalds if (w->end <= start) 24961da177e4SLinus Torvalds break; 24971da177e4SLinus Torvalds n = prev; 24981da177e4SLinus Torvalds } 24991da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 25001da177e4SLinus Torvalds } 25011da177e4SLinus Torvalds 25024a8c7bb5SNathan Zimmer /* 25034a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 25044a8c7bb5SNathan Zimmer * writing. 25054a8c7bb5SNathan Zimmer */ 25061da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 25071da177e4SLinus Torvalds { 25081da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 25091da177e4SLinus Torvalds struct rb_node *parent = NULL; 25101da177e4SLinus Torvalds struct sp_node *nd; 25111da177e4SLinus Torvalds 25121da177e4SLinus Torvalds while (*p) { 25131da177e4SLinus Torvalds parent = *p; 25141da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 25151da177e4SLinus Torvalds if (new->start < nd->start) 25161da177e4SLinus Torvalds p = &(*p)->rb_left; 25171da177e4SLinus Torvalds else if (new->end > nd->end) 25181da177e4SLinus Torvalds p = &(*p)->rb_right; 25191da177e4SLinus Torvalds else 25201da177e4SLinus Torvalds BUG(); 25211da177e4SLinus Torvalds } 25221da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 25231da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2524140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 252545c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 25261da177e4SLinus Torvalds } 25271da177e4SLinus Torvalds 25281da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 25291da177e4SLinus Torvalds struct mempolicy * 25301da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 25311da177e4SLinus Torvalds { 25321da177e4SLinus Torvalds struct mempolicy *pol = NULL; 25331da177e4SLinus Torvalds struct sp_node *sn; 25341da177e4SLinus Torvalds 25351da177e4SLinus Torvalds if (!sp->root.rb_node) 25361da177e4SLinus Torvalds return NULL; 25374a8c7bb5SNathan Zimmer read_lock(&sp->lock); 25381da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 25391da177e4SLinus Torvalds if (sn) { 25401da177e4SLinus Torvalds mpol_get(sn->policy); 25411da177e4SLinus Torvalds pol = sn->policy; 25421da177e4SLinus Torvalds } 25434a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 25441da177e4SLinus Torvalds return pol; 25451da177e4SLinus Torvalds } 25461da177e4SLinus Torvalds 254763f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 254863f74ca2SKOSAKI Motohiro { 254963f74ca2SKOSAKI Motohiro mpol_put(n->policy); 255063f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 255163f74ca2SKOSAKI Motohiro } 255263f74ca2SKOSAKI Motohiro 2553771fb4d8SLee Schermerhorn /** 2554771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2555771fb4d8SLee Schermerhorn * 2556b46e14acSFabian Frederick * @page: page to be checked 2557b46e14acSFabian Frederick * @vma: vm area where page mapped 2558b46e14acSFabian Frederick * @addr: virtual address where page mapped 2559771fb4d8SLee Schermerhorn * 2560771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 25615f076944SMatthew Wilcox (Oracle) * node id. Policy determination "mimics" alloc_page_vma(). 2562771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 25635f076944SMatthew Wilcox (Oracle) * 2564062db293SBaolin Wang * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2565062db293SBaolin Wang * policy, or a suitable node ID to allocate a replacement page from. 2566771fb4d8SLee Schermerhorn */ 2567771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2568771fb4d8SLee Schermerhorn { 2569771fb4d8SLee Schermerhorn struct mempolicy *pol; 2570c33d6c06SMel Gorman struct zoneref *z; 2571771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2572771fb4d8SLee Schermerhorn unsigned long pgoff; 257390572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 257490572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 257598fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2576062db293SBaolin Wang int ret = NUMA_NO_NODE; 2577771fb4d8SLee Schermerhorn 2578dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2579771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2580771fb4d8SLee Schermerhorn goto out; 2581771fb4d8SLee Schermerhorn 2582771fb4d8SLee Schermerhorn switch (pol->mode) { 2583771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2584771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2585771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 258698c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2587771fb4d8SLee Schermerhorn break; 2588771fb4d8SLee Schermerhorn 2589771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2590b27abaccSDave Hansen if (node_isset(curnid, pol->nodes)) 2591b27abaccSDave Hansen goto out; 2592269fbe72SBen Widawsky polnid = first_node(pol->nodes); 2593771fb4d8SLee Schermerhorn break; 2594771fb4d8SLee Schermerhorn 25957858d7bcSFeng Tang case MPOL_LOCAL: 25967858d7bcSFeng Tang polnid = numa_node_id(); 25977858d7bcSFeng Tang break; 25987858d7bcSFeng Tang 2599771fb4d8SLee Schermerhorn case MPOL_BIND: 2600bda420b9SHuang Ying /* Optimize placement among multiple nodes via NUMA balancing */ 2601bda420b9SHuang Ying if (pol->flags & MPOL_F_MORON) { 2602269fbe72SBen Widawsky if (node_isset(thisnid, pol->nodes)) 2603bda420b9SHuang Ying break; 2604bda420b9SHuang Ying goto out; 2605bda420b9SHuang Ying } 2606b27abaccSDave Hansen fallthrough; 2607c33d6c06SMel Gorman 2608b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2609771fb4d8SLee Schermerhorn /* 2610771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2611771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2612771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2613771fb4d8SLee Schermerhorn */ 2614269fbe72SBen Widawsky if (node_isset(curnid, pol->nodes)) 2615771fb4d8SLee Schermerhorn goto out; 2616c33d6c06SMel Gorman z = first_zones_zonelist( 2617771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2618771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2619269fbe72SBen Widawsky &pol->nodes); 2620c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2621771fb4d8SLee Schermerhorn break; 2622771fb4d8SLee Schermerhorn 2623771fb4d8SLee Schermerhorn default: 2624771fb4d8SLee Schermerhorn BUG(); 2625771fb4d8SLee Schermerhorn } 26265606e387SMel Gorman 26275606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2628e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 262990572890SPeter Zijlstra polnid = thisnid; 26305606e387SMel Gorman 263110f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2632de1c9ce6SRik van Riel goto out; 2633de1c9ce6SRik van Riel } 2634e42c8ff2SMel Gorman 2635771fb4d8SLee Schermerhorn if (curnid != polnid) 2636771fb4d8SLee Schermerhorn ret = polnid; 2637771fb4d8SLee Schermerhorn out: 2638771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2639771fb4d8SLee Schermerhorn 2640771fb4d8SLee Schermerhorn return ret; 2641771fb4d8SLee Schermerhorn } 2642771fb4d8SLee Schermerhorn 2643c11600e4SDavid Rientjes /* 2644c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2645c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2646c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2647c11600e4SDavid Rientjes * policy. 2648c11600e4SDavid Rientjes */ 2649c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2650c11600e4SDavid Rientjes { 2651c11600e4SDavid Rientjes struct mempolicy *pol; 2652c11600e4SDavid Rientjes 2653c11600e4SDavid Rientjes task_lock(task); 2654c11600e4SDavid Rientjes pol = task->mempolicy; 2655c11600e4SDavid Rientjes task->mempolicy = NULL; 2656c11600e4SDavid Rientjes task_unlock(task); 2657c11600e4SDavid Rientjes mpol_put(pol); 2658c11600e4SDavid Rientjes } 2659c11600e4SDavid Rientjes 26601da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 26611da177e4SLinus Torvalds { 2662140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 26631da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 266463f74ca2SKOSAKI Motohiro sp_free(n); 26651da177e4SLinus Torvalds } 26661da177e4SLinus Torvalds 266742288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 266842288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 266942288fe3SMel Gorman { 267042288fe3SMel Gorman node->start = start; 267142288fe3SMel Gorman node->end = end; 267242288fe3SMel Gorman node->policy = pol; 267342288fe3SMel Gorman } 267442288fe3SMel Gorman 2675dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2676dbcb0f19SAdrian Bunk struct mempolicy *pol) 26771da177e4SLinus Torvalds { 2678869833f2SKOSAKI Motohiro struct sp_node *n; 2679869833f2SKOSAKI Motohiro struct mempolicy *newpol; 26801da177e4SLinus Torvalds 2681869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 26821da177e4SLinus Torvalds if (!n) 26831da177e4SLinus Torvalds return NULL; 2684869833f2SKOSAKI Motohiro 2685869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2686869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2687869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2688869833f2SKOSAKI Motohiro return NULL; 2689869833f2SKOSAKI Motohiro } 2690869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 269142288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2692869833f2SKOSAKI Motohiro 26931da177e4SLinus Torvalds return n; 26941da177e4SLinus Torvalds } 26951da177e4SLinus Torvalds 26961da177e4SLinus Torvalds /* Replace a policy range. */ 26971da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 26981da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 26991da177e4SLinus Torvalds { 2700b22d127aSMel Gorman struct sp_node *n; 270142288fe3SMel Gorman struct sp_node *n_new = NULL; 270242288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2703b22d127aSMel Gorman int ret = 0; 27041da177e4SLinus Torvalds 270542288fe3SMel Gorman restart: 27064a8c7bb5SNathan Zimmer write_lock(&sp->lock); 27071da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 27081da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 27091da177e4SLinus Torvalds while (n && n->start < end) { 27101da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 27111da177e4SLinus Torvalds if (n->start >= start) { 27121da177e4SLinus Torvalds if (n->end <= end) 27131da177e4SLinus Torvalds sp_delete(sp, n); 27141da177e4SLinus Torvalds else 27151da177e4SLinus Torvalds n->start = end; 27161da177e4SLinus Torvalds } else { 27171da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 27181da177e4SLinus Torvalds if (n->end > end) { 271942288fe3SMel Gorman if (!n_new) 272042288fe3SMel Gorman goto alloc_new; 272142288fe3SMel Gorman 272242288fe3SMel Gorman *mpol_new = *n->policy; 272342288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 27247880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 27251da177e4SLinus Torvalds n->end = start; 27265ca39575SHillf Danton sp_insert(sp, n_new); 272742288fe3SMel Gorman n_new = NULL; 272842288fe3SMel Gorman mpol_new = NULL; 27291da177e4SLinus Torvalds break; 27301da177e4SLinus Torvalds } else 27311da177e4SLinus Torvalds n->end = start; 27321da177e4SLinus Torvalds } 27331da177e4SLinus Torvalds if (!next) 27341da177e4SLinus Torvalds break; 27351da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27361da177e4SLinus Torvalds } 27371da177e4SLinus Torvalds if (new) 27381da177e4SLinus Torvalds sp_insert(sp, new); 27394a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 274042288fe3SMel Gorman ret = 0; 274142288fe3SMel Gorman 274242288fe3SMel Gorman err_out: 274342288fe3SMel Gorman if (mpol_new) 274442288fe3SMel Gorman mpol_put(mpol_new); 274542288fe3SMel Gorman if (n_new) 274642288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 274742288fe3SMel Gorman 2748b22d127aSMel Gorman return ret; 274942288fe3SMel Gorman 275042288fe3SMel Gorman alloc_new: 27514a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 275242288fe3SMel Gorman ret = -ENOMEM; 275342288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 275442288fe3SMel Gorman if (!n_new) 275542288fe3SMel Gorman goto err_out; 275642288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 275742288fe3SMel Gorman if (!mpol_new) 275842288fe3SMel Gorman goto err_out; 27594ad09955SMiaohe Lin atomic_set(&mpol_new->refcnt, 1); 276042288fe3SMel Gorman goto restart; 27611da177e4SLinus Torvalds } 27621da177e4SLinus Torvalds 276371fe804bSLee Schermerhorn /** 276471fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 276571fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 276671fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 276771fe804bSLee Schermerhorn * 276871fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 276971fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 277071fe804bSLee Schermerhorn * This must be released on exit. 27714bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 277271fe804bSLee Schermerhorn */ 277371fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 27747339ff83SRobin Holt { 277558568d2aSMiao Xie int ret; 277658568d2aSMiao Xie 277771fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 27784a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 27797339ff83SRobin Holt 278071fe804bSLee Schermerhorn if (mpol) { 27817339ff83SRobin Holt struct vm_area_struct pvma; 278271fe804bSLee Schermerhorn struct mempolicy *new; 27834bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 27847339ff83SRobin Holt 27854bfc4495SKAMEZAWA Hiroyuki if (!scratch) 27865c0c1654SLee Schermerhorn goto put_mpol; 278771fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 278871fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 278915d77835SLee Schermerhorn if (IS_ERR(new)) 27900cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 279158568d2aSMiao Xie 279258568d2aSMiao Xie task_lock(current); 27934bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 279458568d2aSMiao Xie task_unlock(current); 279515d77835SLee Schermerhorn if (ret) 27965c0c1654SLee Schermerhorn goto put_new; 279771fe804bSLee Schermerhorn 279871fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 27992c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 280071fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 280171fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 280215d77835SLee Schermerhorn 28035c0c1654SLee Schermerhorn put_new: 280471fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 28050cae3457SDan Carpenter free_scratch: 28064bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 28075c0c1654SLee Schermerhorn put_mpol: 28085c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 28097339ff83SRobin Holt } 28107339ff83SRobin Holt } 28117339ff83SRobin Holt 28121da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 28131da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 28141da177e4SLinus Torvalds { 28151da177e4SLinus Torvalds int err; 28161da177e4SLinus Torvalds struct sp_node *new = NULL; 28171da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 28181da177e4SLinus Torvalds 2819028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 28201da177e4SLinus Torvalds vma->vm_pgoff, 282145c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2822028fec41SDavid Rientjes npol ? npol->flags : -1, 2823269fbe72SBen Widawsky npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 28241da177e4SLinus Torvalds 28251da177e4SLinus Torvalds if (npol) { 28261da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 28271da177e4SLinus Torvalds if (!new) 28281da177e4SLinus Torvalds return -ENOMEM; 28291da177e4SLinus Torvalds } 28301da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 28311da177e4SLinus Torvalds if (err && new) 283263f74ca2SKOSAKI Motohiro sp_free(new); 28331da177e4SLinus Torvalds return err; 28341da177e4SLinus Torvalds } 28351da177e4SLinus Torvalds 28361da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 28371da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 28381da177e4SLinus Torvalds { 28391da177e4SLinus Torvalds struct sp_node *n; 28401da177e4SLinus Torvalds struct rb_node *next; 28411da177e4SLinus Torvalds 28421da177e4SLinus Torvalds if (!p->root.rb_node) 28431da177e4SLinus Torvalds return; 28444a8c7bb5SNathan Zimmer write_lock(&p->lock); 28451da177e4SLinus Torvalds next = rb_first(&p->root); 28461da177e4SLinus Torvalds while (next) { 28471da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 28481da177e4SLinus Torvalds next = rb_next(&n->nd); 284963f74ca2SKOSAKI Motohiro sp_delete(p, n); 28501da177e4SLinus Torvalds } 28514a8c7bb5SNathan Zimmer write_unlock(&p->lock); 28521da177e4SLinus Torvalds } 28531da177e4SLinus Torvalds 28541a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2855c297663cSMel Gorman static int __initdata numabalancing_override; 28561a687c2eSMel Gorman 28571a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 28581a687c2eSMel Gorman { 28591a687c2eSMel Gorman bool numabalancing_default = false; 28601a687c2eSMel Gorman 28611a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 28621a687c2eSMel Gorman numabalancing_default = true; 28631a687c2eSMel Gorman 2864c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2865c297663cSMel Gorman if (numabalancing_override) 2866c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2867c297663cSMel Gorman 2868b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2869756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2870c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 28711a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 28721a687c2eSMel Gorman } 28731a687c2eSMel Gorman } 28741a687c2eSMel Gorman 28751a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 28761a687c2eSMel Gorman { 28771a687c2eSMel Gorman int ret = 0; 28781a687c2eSMel Gorman if (!str) 28791a687c2eSMel Gorman goto out; 28801a687c2eSMel Gorman 28811a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2882c297663cSMel Gorman numabalancing_override = 1; 28831a687c2eSMel Gorman ret = 1; 28841a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2885c297663cSMel Gorman numabalancing_override = -1; 28861a687c2eSMel Gorman ret = 1; 28871a687c2eSMel Gorman } 28881a687c2eSMel Gorman out: 28891a687c2eSMel Gorman if (!ret) 28904a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 28911a687c2eSMel Gorman 28921a687c2eSMel Gorman return ret; 28931a687c2eSMel Gorman } 28941a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 28951a687c2eSMel Gorman #else 28961a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 28971a687c2eSMel Gorman { 28981a687c2eSMel Gorman } 28991a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 29001a687c2eSMel Gorman 29011da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 29021da177e4SLinus Torvalds void __init numa_policy_init(void) 29031da177e4SLinus Torvalds { 2904b71636e2SPaul Mundt nodemask_t interleave_nodes; 2905b71636e2SPaul Mundt unsigned long largest = 0; 2906b71636e2SPaul Mundt int nid, prefer = 0; 2907b71636e2SPaul Mundt 29081da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 29091da177e4SLinus Torvalds sizeof(struct mempolicy), 291020c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 29111da177e4SLinus Torvalds 29121da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 29131da177e4SLinus Torvalds sizeof(struct sp_node), 291420c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 29151da177e4SLinus Torvalds 29165606e387SMel Gorman for_each_node(nid) { 29175606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 29185606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 29195606e387SMel Gorman .mode = MPOL_PREFERRED, 29205606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 2921269fbe72SBen Widawsky .nodes = nodemask_of_node(nid), 29225606e387SMel Gorman }; 29235606e387SMel Gorman } 29245606e387SMel Gorman 2925b71636e2SPaul Mundt /* 2926b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2927b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2928b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2929b71636e2SPaul Mundt */ 2930b71636e2SPaul Mundt nodes_clear(interleave_nodes); 293101f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2932b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 29331da177e4SLinus Torvalds 2934b71636e2SPaul Mundt /* Preserve the largest node */ 2935b71636e2SPaul Mundt if (largest < total_pages) { 2936b71636e2SPaul Mundt largest = total_pages; 2937b71636e2SPaul Mundt prefer = nid; 2938b71636e2SPaul Mundt } 2939b71636e2SPaul Mundt 2940b71636e2SPaul Mundt /* Interleave this node? */ 2941b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2942b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2943b71636e2SPaul Mundt } 2944b71636e2SPaul Mundt 2945b71636e2SPaul Mundt /* All too small, use the largest */ 2946b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2947b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2948b71636e2SPaul Mundt 2949028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2950b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 29511a687c2eSMel Gorman 29521a687c2eSMel Gorman check_numabalancing_enable(); 29531da177e4SLinus Torvalds } 29541da177e4SLinus Torvalds 29558bccd85fSChristoph Lameter /* Reset policy of current process to default */ 29561da177e4SLinus Torvalds void numa_default_policy(void) 29571da177e4SLinus Torvalds { 2958028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 29591da177e4SLinus Torvalds } 296068860ec1SPaul Jackson 29614225399aSPaul Jackson /* 2962095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2963095f1fc4SLee Schermerhorn */ 2964095f1fc4SLee Schermerhorn 2965345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2966345ace9cSLee Schermerhorn { 2967345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2968345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2969345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2970345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2971d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2972b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = "prefer (many)", 2973345ace9cSLee Schermerhorn }; 29741a75a6c8SChristoph Lameter 2975095f1fc4SLee Schermerhorn 2976095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2977095f1fc4SLee Schermerhorn /** 2978f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2979095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 298071fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2981095f1fc4SLee Schermerhorn * 2982095f1fc4SLee Schermerhorn * Format of input: 2983095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2984095f1fc4SLee Schermerhorn * 2985dad5b023SRandy Dunlap * Return: %0 on success, else %1 2986095f1fc4SLee Schermerhorn */ 2987a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2988095f1fc4SLee Schermerhorn { 298971fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2990f2a07f40SHugh Dickins unsigned short mode_flags; 299171fe804bSLee Schermerhorn nodemask_t nodes; 2992095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2993095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2994dedf2c73Szhong jiang int err = 1, mode; 2995095f1fc4SLee Schermerhorn 2996c7a91bc7SDan Carpenter if (flags) 2997c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 2998c7a91bc7SDan Carpenter 2999095f1fc4SLee Schermerhorn if (nodelist) { 3000095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 3001095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 300271fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 3003095f1fc4SLee Schermerhorn goto out; 300401f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 3005095f1fc4SLee Schermerhorn goto out; 300671fe804bSLee Schermerhorn } else 300771fe804bSLee Schermerhorn nodes_clear(nodes); 300871fe804bSLee Schermerhorn 3009dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 3010dedf2c73Szhong jiang if (mode < 0) 3011095f1fc4SLee Schermerhorn goto out; 3012095f1fc4SLee Schermerhorn 301371fe804bSLee Schermerhorn switch (mode) { 3014095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 301571fe804bSLee Schermerhorn /* 3016aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 3017aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 3018aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 301971fe804bSLee Schermerhorn */ 3020095f1fc4SLee Schermerhorn if (nodelist) { 3021095f1fc4SLee Schermerhorn char *rest = nodelist; 3022095f1fc4SLee Schermerhorn while (isdigit(*rest)) 3023095f1fc4SLee Schermerhorn rest++; 3024926f2ae0SKOSAKI Motohiro if (*rest) 3025926f2ae0SKOSAKI Motohiro goto out; 3026aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 3027aa9f7d51SRandy Dunlap goto out; 3028095f1fc4SLee Schermerhorn } 3029095f1fc4SLee Schermerhorn break; 3030095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 3031095f1fc4SLee Schermerhorn /* 3032095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 3033095f1fc4SLee Schermerhorn */ 3034095f1fc4SLee Schermerhorn if (!nodelist) 303501f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 30363f226aa1SLee Schermerhorn break; 303771fe804bSLee Schermerhorn case MPOL_LOCAL: 30383f226aa1SLee Schermerhorn /* 303971fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 30403f226aa1SLee Schermerhorn */ 304171fe804bSLee Schermerhorn if (nodelist) 30423f226aa1SLee Schermerhorn goto out; 30433f226aa1SLee Schermerhorn break; 3044413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 3045413b43deSRavikiran G Thirumalai /* 3046413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 3047413b43deSRavikiran G Thirumalai */ 3048413b43deSRavikiran G Thirumalai if (!nodelist) 3049413b43deSRavikiran G Thirumalai err = 0; 3050413b43deSRavikiran G Thirumalai goto out; 3051b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 3052d69b2e63SKOSAKI Motohiro case MPOL_BIND: 305371fe804bSLee Schermerhorn /* 3054d69b2e63SKOSAKI Motohiro * Insist on a nodelist 305571fe804bSLee Schermerhorn */ 3056d69b2e63SKOSAKI Motohiro if (!nodelist) 3057d69b2e63SKOSAKI Motohiro goto out; 3058095f1fc4SLee Schermerhorn } 3059095f1fc4SLee Schermerhorn 306071fe804bSLee Schermerhorn mode_flags = 0; 3061095f1fc4SLee Schermerhorn if (flags) { 3062095f1fc4SLee Schermerhorn /* 3063095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 3064095f1fc4SLee Schermerhorn * mode flags. 3065095f1fc4SLee Schermerhorn */ 3066095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 306771fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 3068095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 306971fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 3070095f1fc4SLee Schermerhorn else 3071926f2ae0SKOSAKI Motohiro goto out; 3072095f1fc4SLee Schermerhorn } 307371fe804bSLee Schermerhorn 307471fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 307571fe804bSLee Schermerhorn if (IS_ERR(new)) 3076926f2ae0SKOSAKI Motohiro goto out; 3077926f2ae0SKOSAKI Motohiro 3078f2a07f40SHugh Dickins /* 3079f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 3080f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 3081f2a07f40SHugh Dickins */ 3082269fbe72SBen Widawsky if (mode != MPOL_PREFERRED) { 3083269fbe72SBen Widawsky new->nodes = nodes; 3084269fbe72SBen Widawsky } else if (nodelist) { 3085269fbe72SBen Widawsky nodes_clear(new->nodes); 3086269fbe72SBen Widawsky node_set(first_node(nodes), new->nodes); 3087269fbe72SBen Widawsky } else { 30887858d7bcSFeng Tang new->mode = MPOL_LOCAL; 3089269fbe72SBen Widawsky } 3090f2a07f40SHugh Dickins 3091f2a07f40SHugh Dickins /* 3092f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 3093f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 3094f2a07f40SHugh Dickins */ 3095e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 3096f2a07f40SHugh Dickins 3097926f2ae0SKOSAKI Motohiro err = 0; 309871fe804bSLee Schermerhorn 3099095f1fc4SLee Schermerhorn out: 3100095f1fc4SLee Schermerhorn /* Restore string for error message */ 3101095f1fc4SLee Schermerhorn if (nodelist) 3102095f1fc4SLee Schermerhorn *--nodelist = ':'; 3103095f1fc4SLee Schermerhorn if (flags) 3104095f1fc4SLee Schermerhorn *--flags = '='; 310571fe804bSLee Schermerhorn if (!err) 310671fe804bSLee Schermerhorn *mpol = new; 3107095f1fc4SLee Schermerhorn return err; 3108095f1fc4SLee Schermerhorn } 3109095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 3110095f1fc4SLee Schermerhorn 311171fe804bSLee Schermerhorn /** 311271fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 311371fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 311471fe804bSLee Schermerhorn * @maxlen: length of @buffer 311571fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 311671fe804bSLee Schermerhorn * 3117948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 3118948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3119948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 31201a75a6c8SChristoph Lameter */ 3121948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 31221a75a6c8SChristoph Lameter { 31231a75a6c8SChristoph Lameter char *p = buffer; 3124948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 3125948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 3126948927eeSDavid Rientjes unsigned short flags = 0; 31271a75a6c8SChristoph Lameter 31288790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3129bea904d5SLee Schermerhorn mode = pol->mode; 3130948927eeSDavid Rientjes flags = pol->flags; 3131948927eeSDavid Rientjes } 3132bea904d5SLee Schermerhorn 31331a75a6c8SChristoph Lameter switch (mode) { 31341a75a6c8SChristoph Lameter case MPOL_DEFAULT: 31357858d7bcSFeng Tang case MPOL_LOCAL: 31361a75a6c8SChristoph Lameter break; 31371a75a6c8SChristoph Lameter case MPOL_PREFERRED: 3138b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 31391a75a6c8SChristoph Lameter case MPOL_BIND: 31401a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 3141269fbe72SBen Widawsky nodes = pol->nodes; 31421a75a6c8SChristoph Lameter break; 31431a75a6c8SChristoph Lameter default: 3144948927eeSDavid Rientjes WARN_ON_ONCE(1); 3145948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 3146948927eeSDavid Rientjes return; 31471a75a6c8SChristoph Lameter } 31481a75a6c8SChristoph Lameter 3149b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 31501a75a6c8SChristoph Lameter 3151fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3152948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3153f5b087b5SDavid Rientjes 31542291990aSLee Schermerhorn /* 31552291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 31562291990aSLee Schermerhorn */ 3157f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 31582291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 31592291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 31602291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3161f5b087b5SDavid Rientjes } 3162f5b087b5SDavid Rientjes 31639e763e0fSTejun Heo if (!nodes_empty(nodes)) 31649e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 31659e763e0fSTejun Heo nodemask_pr_args(&nodes)); 31661a75a6c8SChristoph Lameter } 3167