146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 34b27abaccSDave Hansen * preferred many Try a set of nodes first before normal fallback. This is 35b27abaccSDave Hansen * similar to preferred without the special case. 36b27abaccSDave Hansen * 371da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 381da177e4SLinus Torvalds * use the process policy. This is what Linux always did 391da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 401da177e4SLinus Torvalds * 411da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 421da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 431da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 441da177e4SLinus Torvalds * allocations for a VMA in the VM. 451da177e4SLinus Torvalds * 461da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 471da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 481da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 491da177e4SLinus Torvalds * 501da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 511da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 521da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 531da177e4SLinus Torvalds * Same with GFP_DMA allocations. 541da177e4SLinus Torvalds * 551da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 561da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 571da177e4SLinus Torvalds */ 581da177e4SLinus Torvalds 591da177e4SLinus Torvalds /* Notebook: 601da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 611da177e4SLinus Torvalds object 621da177e4SLinus Torvalds statistics for bigpages 631da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 641da177e4SLinus Torvalds first item above. 651da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 661da177e4SLinus Torvalds grows down? 671da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 681da177e4SLinus Torvalds kernel is not always grateful with that. 691da177e4SLinus Torvalds */ 701da177e4SLinus Torvalds 71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72b1de0d13SMitchel Humpherys 731da177e4SLinus Torvalds #include <linux/mempolicy.h> 74a520110eSChristoph Hellwig #include <linux/pagewalk.h> 751da177e4SLinus Torvalds #include <linux/highmem.h> 761da177e4SLinus Torvalds #include <linux/hugetlb.h> 771da177e4SLinus Torvalds #include <linux/kernel.h> 781da177e4SLinus Torvalds #include <linux/sched.h> 796e84f315SIngo Molnar #include <linux/sched/mm.h> 806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 81f719ff9bSIngo Molnar #include <linux/sched/task.h> 821da177e4SLinus Torvalds #include <linux/nodemask.h> 831da177e4SLinus Torvalds #include <linux/cpuset.h> 841da177e4SLinus Torvalds #include <linux/slab.h> 851da177e4SLinus Torvalds #include <linux/string.h> 86b95f1b31SPaul Gortmaker #include <linux/export.h> 87b488893aSPavel Emelyanov #include <linux/nsproxy.h> 881da177e4SLinus Torvalds #include <linux/interrupt.h> 891da177e4SLinus Torvalds #include <linux/init.h> 901da177e4SLinus Torvalds #include <linux/compat.h> 9131367466SOtto Ebeling #include <linux/ptrace.h> 92dc9aa5b9SChristoph Lameter #include <linux/swap.h> 931a75a6c8SChristoph Lameter #include <linux/seq_file.h> 941a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 95b20a3503SChristoph Lameter #include <linux/migrate.h> 9662b61f61SHugh Dickins #include <linux/ksm.h> 9795a402c3SChristoph Lameter #include <linux/rmap.h> 9886c3a764SDavid Quigley #include <linux/security.h> 99dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 100095f1fc4SLee Schermerhorn #include <linux/ctype.h> 1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 103b1de0d13SMitchel Humpherys #include <linux/printk.h> 104c8633798SNaoya Horiguchi #include <linux/swapops.h> 105dc9aa5b9SChristoph Lameter 1061da177e4SLinus Torvalds #include <asm/tlbflush.h> 1074a18419fSNadav Amit #include <asm/tlb.h> 1087c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1091da177e4SLinus Torvalds 11062695a84SNick Piggin #include "internal.h" 11162695a84SNick Piggin 11238e35860SChristoph Lameter /* Internal flags */ 113dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11438e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 115dc9aa5b9SChristoph Lameter 116fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 117fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1181da177e4SLinus Torvalds 1191da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1201da177e4SLinus Torvalds policied. */ 1216267276fSChristoph Lameter enum zone_type policy_zone = 0; 1221da177e4SLinus Torvalds 123bea904d5SLee Schermerhorn /* 124bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 125bea904d5SLee Schermerhorn */ 126e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1271da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 1287858d7bcSFeng Tang .mode = MPOL_LOCAL, 1291da177e4SLinus Torvalds }; 1301da177e4SLinus Torvalds 1315606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1325606e387SMel Gorman 133b2ca916cSDan Williams /** 134b2ca916cSDan Williams * numa_map_to_online_node - Find closest online node 135f6e92f40SKrzysztof Kozlowski * @node: Node id to start the search 136b2ca916cSDan Williams * 137b2ca916cSDan Williams * Lookup the next closest node by distance if @nid is not online. 138dad5b023SRandy Dunlap * 139dad5b023SRandy Dunlap * Return: this @node if it is online, otherwise the closest node by distance 140b2ca916cSDan Williams */ 141b2ca916cSDan Williams int numa_map_to_online_node(int node) 142b2ca916cSDan Williams { 1434fcbe96eSDan Williams int min_dist = INT_MAX, dist, n, min_node; 144b2ca916cSDan Williams 1454fcbe96eSDan Williams if (node == NUMA_NO_NODE || node_online(node)) 1464fcbe96eSDan Williams return node; 147b2ca916cSDan Williams 148b2ca916cSDan Williams min_node = node; 149b2ca916cSDan Williams for_each_online_node(n) { 150b2ca916cSDan Williams dist = node_distance(node, n); 151b2ca916cSDan Williams if (dist < min_dist) { 152b2ca916cSDan Williams min_dist = dist; 153b2ca916cSDan Williams min_node = n; 154b2ca916cSDan Williams } 155b2ca916cSDan Williams } 156b2ca916cSDan Williams 157b2ca916cSDan Williams return min_node; 158b2ca916cSDan Williams } 159b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node); 160b2ca916cSDan Williams 16174d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1625606e387SMel Gorman { 1635606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 164f15ca78eSOleg Nesterov int node; 1655606e387SMel Gorman 166f15ca78eSOleg Nesterov if (pol) 167f15ca78eSOleg Nesterov return pol; 1685606e387SMel Gorman 169f15ca78eSOleg Nesterov node = numa_node_id(); 1701da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1711da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 172f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 173f15ca78eSOleg Nesterov if (pol->mode) 174f15ca78eSOleg Nesterov return pol; 1751da6f0e1SJianguo Wu } 1765606e387SMel Gorman 177f15ca78eSOleg Nesterov return &default_policy; 1785606e387SMel Gorman } 1795606e387SMel Gorman 18037012946SDavid Rientjes static const struct mempolicy_operations { 18137012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 182213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 18337012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 18437012946SDavid Rientjes 185f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 186f5b087b5SDavid Rientjes { 1876d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1884c50bc01SDavid Rientjes } 1894c50bc01SDavid Rientjes 1904c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1914c50bc01SDavid Rientjes const nodemask_t *rel) 1924c50bc01SDavid Rientjes { 1934c50bc01SDavid Rientjes nodemask_t tmp; 1944c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1954c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 196f5b087b5SDavid Rientjes } 197f5b087b5SDavid Rientjes 198be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 19937012946SDavid Rientjes { 20037012946SDavid Rientjes if (nodes_empty(*nodes)) 20137012946SDavid Rientjes return -EINVAL; 202269fbe72SBen Widawsky pol->nodes = *nodes; 20337012946SDavid Rientjes return 0; 20437012946SDavid Rientjes } 20537012946SDavid Rientjes 20637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 20737012946SDavid Rientjes { 2087858d7bcSFeng Tang if (nodes_empty(*nodes)) 2097858d7bcSFeng Tang return -EINVAL; 210269fbe72SBen Widawsky 211269fbe72SBen Widawsky nodes_clear(pol->nodes); 212269fbe72SBen Widawsky node_set(first_node(*nodes), pol->nodes); 21337012946SDavid Rientjes return 0; 21437012946SDavid Rientjes } 21537012946SDavid Rientjes 21658568d2aSMiao Xie /* 21758568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 21858568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 2197858d7bcSFeng Tang * parameter with respect to the policy mode and flags. 22058568d2aSMiao Xie * 22158568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 222c1e8d7c6SMichel Lespinasse * and mempolicy. May also be called holding the mmap_lock for write. 22358568d2aSMiao Xie */ 2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2254bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22658568d2aSMiao Xie { 22758568d2aSMiao Xie int ret; 22858568d2aSMiao Xie 2297858d7bcSFeng Tang /* 2307858d7bcSFeng Tang * Default (pol==NULL) resp. local memory policies are not a 2317858d7bcSFeng Tang * subject of any remapping. They also do not need any special 2327858d7bcSFeng Tang * constructor. 2337858d7bcSFeng Tang */ 2347858d7bcSFeng Tang if (!pol || pol->mode == MPOL_LOCAL) 23558568d2aSMiao Xie return 0; 2367858d7bcSFeng Tang 23701f13bd6SLai Jiangshan /* Check N_MEMORY */ 2384bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 23901f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 24058568d2aSMiao Xie 24158568d2aSMiao Xie VM_BUG_ON(!nodes); 2427858d7bcSFeng Tang 24358568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2444bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 24558568d2aSMiao Xie else 2464bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2474bfc4495SKAMEZAWA Hiroyuki 24858568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 24958568d2aSMiao Xie pol->w.user_nodemask = *nodes; 25058568d2aSMiao Xie else 2517858d7bcSFeng Tang pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 25258568d2aSMiao Xie 2534bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 25458568d2aSMiao Xie return ret; 25558568d2aSMiao Xie } 25658568d2aSMiao Xie 25758568d2aSMiao Xie /* 25858568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25958568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 26058568d2aSMiao Xie */ 261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 262028fec41SDavid Rientjes nodemask_t *nodes) 2631da177e4SLinus Torvalds { 2641da177e4SLinus Torvalds struct mempolicy *policy; 2651da177e4SLinus Torvalds 266028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26700ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 268140d5a49SPaul Mundt 2693e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2703e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 27137012946SDavid Rientjes return ERR_PTR(-EINVAL); 272d3a71033SLee Schermerhorn return NULL; 27337012946SDavid Rientjes } 2743e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2753e1f0645SDavid Rientjes 2763e1f0645SDavid Rientjes /* 2773e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2783e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2793e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2803e1f0645SDavid Rientjes */ 2813e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2823e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2833e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2843e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2853e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2867858d7bcSFeng Tang 2877858d7bcSFeng Tang mode = MPOL_LOCAL; 2883e1f0645SDavid Rientjes } 289479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2908d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2918d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2928d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 293479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 2943e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2953e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2961da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2971da177e4SLinus Torvalds if (!policy) 2981da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2991da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 30045c4745aSLee Schermerhorn policy->mode = mode; 30137012946SDavid Rientjes policy->flags = flags; 302c6018b4bSAneesh Kumar K.V policy->home_node = NUMA_NO_NODE; 3033e1f0645SDavid Rientjes 30437012946SDavid Rientjes return policy; 30537012946SDavid Rientjes } 30637012946SDavid Rientjes 30752cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30852cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 30952cd3b07SLee Schermerhorn { 31052cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 31152cd3b07SLee Schermerhorn return; 31252cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 31352cd3b07SLee Schermerhorn } 31452cd3b07SLee Schermerhorn 315213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 31637012946SDavid Rientjes { 31737012946SDavid Rientjes } 31837012946SDavid Rientjes 319213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 3201d0d2680SDavid Rientjes { 3211d0d2680SDavid Rientjes nodemask_t tmp; 3221d0d2680SDavid Rientjes 32337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32437012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32537012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32637012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3271d0d2680SDavid Rientjes else { 328269fbe72SBen Widawsky nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 329213980c0SVlastimil Babka *nodes); 33029b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3311d0d2680SDavid Rientjes } 33237012946SDavid Rientjes 333708c1bbcSMiao Xie if (nodes_empty(tmp)) 334708c1bbcSMiao Xie tmp = *nodes; 335708c1bbcSMiao Xie 336269fbe72SBen Widawsky pol->nodes = tmp; 33737012946SDavid Rientjes } 33837012946SDavid Rientjes 33937012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 340213980c0SVlastimil Babka const nodemask_t *nodes) 34137012946SDavid Rientjes { 34237012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3431d0d2680SDavid Rientjes } 34437012946SDavid Rientjes 345708c1bbcSMiao Xie /* 346708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 347708c1bbcSMiao Xie * 348c1e8d7c6SMichel Lespinasse * Per-vma policies are protected by mmap_lock. Allocations using per-task 349213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 350213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 351708c1bbcSMiao Xie */ 352213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35337012946SDavid Rientjes { 354018160adSWang Cheng if (!pol || pol->mode == MPOL_LOCAL) 35537012946SDavid Rientjes return; 3567858d7bcSFeng Tang if (!mpol_store_user_nodemask(pol) && 35737012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35837012946SDavid Rientjes return; 359708c1bbcSMiao Xie 360213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3611d0d2680SDavid Rientjes } 3621d0d2680SDavid Rientjes 3631d0d2680SDavid Rientjes /* 3641d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3651d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36658568d2aSMiao Xie * 36758568d2aSMiao Xie * Called with task's alloc_lock held. 3681d0d2680SDavid Rientjes */ 3691d0d2680SDavid Rientjes 370213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3711d0d2680SDavid Rientjes { 372213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3731d0d2680SDavid Rientjes } 3741d0d2680SDavid Rientjes 3751d0d2680SDavid Rientjes /* 3761d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3771d0d2680SDavid Rientjes * 378c1e8d7c6SMichel Lespinasse * Call holding a reference to mm. Takes mm->mmap_lock during call. 3791d0d2680SDavid Rientjes */ 3801d0d2680SDavid Rientjes 3811d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3821d0d2680SDavid Rientjes { 3831d0d2680SDavid Rientjes struct vm_area_struct *vma; 38466850be5SLiam R. Howlett VMA_ITERATOR(vmi, mm, 0); 3851d0d2680SDavid Rientjes 386d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 3876c21e066SJann Horn for_each_vma(vmi, vma) { 3886c21e066SJann Horn vma_start_write(vma); 389213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 3906c21e066SJann Horn } 391d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3921d0d2680SDavid Rientjes } 3931d0d2680SDavid Rientjes 39437012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 39537012946SDavid Rientjes [MPOL_DEFAULT] = { 39637012946SDavid Rientjes .rebind = mpol_rebind_default, 39737012946SDavid Rientjes }, 39837012946SDavid Rientjes [MPOL_INTERLEAVE] = { 399be897d48SFeng Tang .create = mpol_new_nodemask, 40037012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40137012946SDavid Rientjes }, 40237012946SDavid Rientjes [MPOL_PREFERRED] = { 40337012946SDavid Rientjes .create = mpol_new_preferred, 40437012946SDavid Rientjes .rebind = mpol_rebind_preferred, 40537012946SDavid Rientjes }, 40637012946SDavid Rientjes [MPOL_BIND] = { 407be897d48SFeng Tang .create = mpol_new_nodemask, 40837012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40937012946SDavid Rientjes }, 4107858d7bcSFeng Tang [MPOL_LOCAL] = { 4117858d7bcSFeng Tang .rebind = mpol_rebind_default, 4127858d7bcSFeng Tang }, 413b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = { 414be897d48SFeng Tang .create = mpol_new_nodemask, 415b27abaccSDave Hansen .rebind = mpol_rebind_preferred, 416b27abaccSDave Hansen }, 41737012946SDavid Rientjes }; 41837012946SDavid Rientjes 4194a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, 420fc301289SChristoph Lameter unsigned long flags); 4211a75a6c8SChristoph Lameter 4226f4576e3SNaoya Horiguchi struct queue_pages { 4236f4576e3SNaoya Horiguchi struct list_head *pagelist; 4246f4576e3SNaoya Horiguchi unsigned long flags; 4256f4576e3SNaoya Horiguchi nodemask_t *nmask; 426f18da660SLi Xinhai unsigned long start; 427f18da660SLi Xinhai unsigned long end; 428f18da660SLi Xinhai struct vm_area_struct *first; 4296f4576e3SNaoya Horiguchi }; 4306f4576e3SNaoya Horiguchi 43198094945SNaoya Horiguchi /* 432d451b89dSVishal Moola (Oracle) * Check if the folio's nid is in qp->nmask. 43388aaa2a1SNaoya Horiguchi * 43488aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 43588aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 43688aaa2a1SNaoya Horiguchi */ 437d451b89dSVishal Moola (Oracle) static inline bool queue_folio_required(struct folio *folio, 43888aaa2a1SNaoya Horiguchi struct queue_pages *qp) 43988aaa2a1SNaoya Horiguchi { 440d451b89dSVishal Moola (Oracle) int nid = folio_nid(folio); 44188aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 44288aaa2a1SNaoya Horiguchi 44388aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 44488aaa2a1SNaoya Horiguchi } 44588aaa2a1SNaoya Horiguchi 446a7f40cfeSYang Shi /* 447de1f5055SVishal Moola (Oracle) * queue_folios_pmd() has three possible return values: 448de1f5055SVishal Moola (Oracle) * 0 - folios are placed on the right node or queued successfully, or 449e5947d23SYang Shi * special page is met, i.e. huge zero page. 450de1f5055SVishal Moola (Oracle) * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 451d8835445SYang Shi * specified. 452d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 453de1f5055SVishal Moola (Oracle) * existing folio was already on a node that does not follow the 454d8835445SYang Shi * policy. 455a7f40cfeSYang Shi */ 456de1f5055SVishal Moola (Oracle) static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 457c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 458959a7e13SJules Irenge __releases(ptl) 459c8633798SNaoya Horiguchi { 460c8633798SNaoya Horiguchi int ret = 0; 461de1f5055SVishal Moola (Oracle) struct folio *folio; 462c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 463c8633798SNaoya Horiguchi unsigned long flags; 464c8633798SNaoya Horiguchi 465c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 466a7f40cfeSYang Shi ret = -EIO; 467c8633798SNaoya Horiguchi goto unlock; 468c8633798SNaoya Horiguchi } 469de1f5055SVishal Moola (Oracle) folio = pfn_folio(pmd_pfn(*pmd)); 470de1f5055SVishal Moola (Oracle) if (is_huge_zero_page(&folio->page)) { 471e5947d23SYang Shi walk->action = ACTION_CONTINUE; 4726d97cf88SMiaohe Lin goto unlock; 473c8633798SNaoya Horiguchi } 474d451b89dSVishal Moola (Oracle) if (!queue_folio_required(folio, qp)) 475c8633798SNaoya Horiguchi goto unlock; 476c8633798SNaoya Horiguchi 477c8633798SNaoya Horiguchi flags = qp->flags; 478de1f5055SVishal Moola (Oracle) /* go to folio migration */ 479a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 480a53190a4SYang Shi if (!vma_migratable(walk->vma) || 4814a64981dSVishal Moola (Oracle) migrate_folio_add(folio, qp->pagelist, flags)) { 482d8835445SYang Shi ret = 1; 483a7f40cfeSYang Shi goto unlock; 484a7f40cfeSYang Shi } 485a7f40cfeSYang Shi } else 486a7f40cfeSYang Shi ret = -EIO; 487c8633798SNaoya Horiguchi unlock: 488c8633798SNaoya Horiguchi spin_unlock(ptl); 489c8633798SNaoya Horiguchi return ret; 490c8633798SNaoya Horiguchi } 491c8633798SNaoya Horiguchi 49288aaa2a1SNaoya Horiguchi /* 49398094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 49498094945SNaoya Horiguchi * and move them to the pagelist if they do. 495d8835445SYang Shi * 4963dae02bbSVishal Moola (Oracle) * queue_folios_pte_range() has three possible return values: 4973dae02bbSVishal Moola (Oracle) * 0 - folios are placed on the right node or queued successfully, or 498e5947d23SYang Shi * special page is met, i.e. zero page. 4993dae02bbSVishal Moola (Oracle) * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 500d8835445SYang Shi * specified. 5013dae02bbSVishal Moola (Oracle) * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already 502d8835445SYang Shi * on a node that does not follow the policy. 50398094945SNaoya Horiguchi */ 5043dae02bbSVishal Moola (Oracle) static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, 5056f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 5061da177e4SLinus Torvalds { 5076f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5083dae02bbSVishal Moola (Oracle) struct folio *folio; 5096f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5106f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 511d8835445SYang Shi bool has_unmovable = false; 5123f088420SShijie Luo pte_t *pte, *mapped_pte; 513c33c7948SRyan Roberts pte_t ptent; 514705e87c0SHugh Dickins spinlock_t *ptl; 515941150a3SHugh Dickins 516c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 517bc78b5edSMiaohe Lin if (ptl) 518de1f5055SVishal Moola (Oracle) return queue_folios_pmd(pmd, ptl, addr, end, walk); 51991612e0dSHugh Dickins 5203f088420SShijie Luo mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5217780d040SHugh Dickins if (!pte) { 5227780d040SHugh Dickins walk->action = ACTION_AGAIN; 5237780d040SHugh Dickins return 0; 5247780d040SHugh Dickins } 5256f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 526c33c7948SRyan Roberts ptent = ptep_get(pte); 527c33c7948SRyan Roberts if (!pte_present(ptent)) 52891612e0dSHugh Dickins continue; 529c33c7948SRyan Roberts folio = vm_normal_folio(vma, addr, ptent); 5303dae02bbSVishal Moola (Oracle) if (!folio || folio_is_zone_device(folio)) 53191612e0dSHugh Dickins continue; 532053837fcSNick Piggin /* 5333dae02bbSVishal Moola (Oracle) * vm_normal_folio() filters out zero pages, but there might 5343dae02bbSVishal Moola (Oracle) * still be reserved folios to skip, perhaps in a VDSO. 535053837fcSNick Piggin */ 5363dae02bbSVishal Moola (Oracle) if (folio_test_reserved(folio)) 537f4598c8bSChristoph Lameter continue; 538d451b89dSVishal Moola (Oracle) if (!queue_folio_required(folio, qp)) 53938e35860SChristoph Lameter continue; 540a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 541d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 542d8835445SYang Shi if (!vma_migratable(vma)) { 543d8835445SYang Shi has_unmovable = true; 544a7f40cfeSYang Shi break; 545d8835445SYang Shi } 546a53190a4SYang Shi 547a53190a4SYang Shi /* 548a53190a4SYang Shi * Do not abort immediately since there may be 549a53190a4SYang Shi * temporary off LRU pages in the range. Still 550a53190a4SYang Shi * need migrate other LRU pages. 551a53190a4SYang Shi */ 5524a64981dSVishal Moola (Oracle) if (migrate_folio_add(folio, qp->pagelist, flags)) 553a53190a4SYang Shi has_unmovable = true; 554a7f40cfeSYang Shi } else 555a7f40cfeSYang Shi break; 5566f4576e3SNaoya Horiguchi } 5573f088420SShijie Luo pte_unmap_unlock(mapped_pte, ptl); 5586f4576e3SNaoya Horiguchi cond_resched(); 559d8835445SYang Shi 560d8835445SYang Shi if (has_unmovable) 561d8835445SYang Shi return 1; 562d8835445SYang Shi 563a7f40cfeSYang Shi return addr != end ? -EIO : 0; 56491612e0dSHugh Dickins } 56591612e0dSHugh Dickins 5660a2c1e81SVishal Moola (Oracle) static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, 5676f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5686f4576e3SNaoya Horiguchi struct mm_walk *walk) 569e2d8cf40SNaoya Horiguchi { 570dcf17635SLi Xinhai int ret = 0; 571e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5726f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 573dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 5740a2c1e81SVishal Moola (Oracle) struct folio *folio; 575cb900f41SKirill A. Shutemov spinlock_t *ptl; 576d4c54919SNaoya Horiguchi pte_t entry; 577e2d8cf40SNaoya Horiguchi 5786f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5796f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 580d4c54919SNaoya Horiguchi if (!pte_present(entry)) 581d4c54919SNaoya Horiguchi goto unlock; 5820a2c1e81SVishal Moola (Oracle) folio = pfn_folio(pte_pfn(entry)); 583d451b89dSVishal Moola (Oracle) if (!queue_folio_required(folio, qp)) 584e2d8cf40SNaoya Horiguchi goto unlock; 585dcf17635SLi Xinhai 586dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 587dcf17635SLi Xinhai /* 5880a2c1e81SVishal Moola (Oracle) * STRICT alone means only detecting misplaced folio and no 589dcf17635SLi Xinhai * need to further check other vma. 590dcf17635SLi Xinhai */ 591dcf17635SLi Xinhai ret = -EIO; 592dcf17635SLi Xinhai goto unlock; 593dcf17635SLi Xinhai } 594dcf17635SLi Xinhai 595dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 596dcf17635SLi Xinhai /* 597dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 598dcf17635SLi Xinhai * stopped walking current vma. 5990a2c1e81SVishal Moola (Oracle) * Detecting misplaced folio but allow migrating folios which 600dcf17635SLi Xinhai * have been queued. 601dcf17635SLi Xinhai */ 602dcf17635SLi Xinhai ret = 1; 603dcf17635SLi Xinhai goto unlock; 604dcf17635SLi Xinhai } 605dcf17635SLi Xinhai 6060a2c1e81SVishal Moola (Oracle) /* 6070a2c1e81SVishal Moola (Oracle) * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it 6080a2c1e81SVishal Moola (Oracle) * is shared it is likely not worth migrating. 6090a2c1e81SVishal Moola (Oracle) * 6100a2c1e81SVishal Moola (Oracle) * To check if the folio is shared, ideally we want to make sure 6110a2c1e81SVishal Moola (Oracle) * every page is mapped to the same process. Doing that is very 6120a2c1e81SVishal Moola (Oracle) * expensive, so check the estimated mapcount of the folio instead. 6130a2c1e81SVishal Moola (Oracle) */ 614e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 6150a2c1e81SVishal Moola (Oracle) (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 && 61673bdf65eSMike Kravetz !hugetlb_pmd_shared(pte))) { 6179747b9e9SBaolin Wang if (!isolate_hugetlb(folio, qp->pagelist) && 618dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 619dcf17635SLi Xinhai /* 6200a2c1e81SVishal Moola (Oracle) * Failed to isolate folio but allow migrating pages 621dcf17635SLi Xinhai * which have been queued. 622dcf17635SLi Xinhai */ 623dcf17635SLi Xinhai ret = 1; 624dcf17635SLi Xinhai } 625e2d8cf40SNaoya Horiguchi unlock: 626cb900f41SKirill A. Shutemov spin_unlock(ptl); 627e2d8cf40SNaoya Horiguchi #else 628e2d8cf40SNaoya Horiguchi BUG(); 629e2d8cf40SNaoya Horiguchi #endif 630dcf17635SLi Xinhai return ret; 6311da177e4SLinus Torvalds } 6321da177e4SLinus Torvalds 6335877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 634b24f53a0SLee Schermerhorn /* 6354b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6364b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6374b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6384b10e7d5SMel Gorman * 6394b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6404b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6414b10e7d5SMel Gorman * changes to the core. 642b24f53a0SLee Schermerhorn */ 6434b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6444b10e7d5SMel Gorman unsigned long addr, unsigned long end) 645b24f53a0SLee Schermerhorn { 6464a18419fSNadav Amit struct mmu_gather tlb; 647a79390f5SPeter Xu long nr_updated; 648b24f53a0SLee Schermerhorn 6494a18419fSNadav Amit tlb_gather_mmu(&tlb, vma->vm_mm); 6504a18419fSNadav Amit 6511ef488edSDavid Hildenbrand nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA); 652d1751118SPeter Xu if (nr_updated > 0) 65303c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 654b24f53a0SLee Schermerhorn 6554a18419fSNadav Amit tlb_finish_mmu(&tlb); 6564a18419fSNadav Amit 6574b10e7d5SMel Gorman return nr_updated; 658b24f53a0SLee Schermerhorn } 659b24f53a0SLee Schermerhorn #else 660b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 661b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 662b24f53a0SLee Schermerhorn { 663b24f53a0SLee Schermerhorn return 0; 664b24f53a0SLee Schermerhorn } 6655877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 666b24f53a0SLee Schermerhorn 6676f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6686f4576e3SNaoya Horiguchi struct mm_walk *walk) 6691da177e4SLinus Torvalds { 67066850be5SLiam R. Howlett struct vm_area_struct *next, *vma = walk->vma; 6716f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6725b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6736f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 674dc9aa5b9SChristoph Lameter 675a18b3ac2SLi Xinhai /* range check first */ 676ce33135cSMiaohe Lin VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 677f18da660SLi Xinhai 678f18da660SLi Xinhai if (!qp->first) { 679f18da660SLi Xinhai qp->first = vma; 680f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 681f18da660SLi Xinhai (qp->start < vma->vm_start)) 682f18da660SLi Xinhai /* hole at head side of range */ 683a18b3ac2SLi Xinhai return -EFAULT; 684a18b3ac2SLi Xinhai } 68566850be5SLiam R. Howlett next = find_vma(vma->vm_mm, vma->vm_end); 686f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 687f18da660SLi Xinhai ((vma->vm_end < qp->end) && 68866850be5SLiam R. Howlett (!next || vma->vm_end < next->vm_start))) 689f18da660SLi Xinhai /* hole at middle or tail of range */ 690f18da660SLi Xinhai return -EFAULT; 691a18b3ac2SLi Xinhai 692a7f40cfeSYang Shi /* 693a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 694a7f40cfeSYang Shi * regardless of vma_migratable 695a7f40cfeSYang Shi */ 696a7f40cfeSYang Shi if (!vma_migratable(vma) && 697a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 69848684a65SNaoya Horiguchi return 1; 69948684a65SNaoya Horiguchi 7005b952b3cSAndi Kleen if (endvma > end) 7015b952b3cSAndi Kleen endvma = end; 702b24f53a0SLee Schermerhorn 703b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 7042c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 7053122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 7064355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 707b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 7086f4576e3SNaoya Horiguchi return 1; 709b24f53a0SLee Schermerhorn } 710b24f53a0SLee Schermerhorn 7116f4576e3SNaoya Horiguchi /* queue pages from current vma */ 712a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 7136f4576e3SNaoya Horiguchi return 0; 7146f4576e3SNaoya Horiguchi return 1; 7156f4576e3SNaoya Horiguchi } 716b24f53a0SLee Schermerhorn 7177b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 7180a2c1e81SVishal Moola (Oracle) .hugetlb_entry = queue_folios_hugetlb, 7193dae02bbSVishal Moola (Oracle) .pmd_entry = queue_folios_pte_range, 7207b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 721*49b06385SSuren Baghdasaryan .walk_lock = PGWALK_RDLOCK, 722*49b06385SSuren Baghdasaryan }; 723*49b06385SSuren Baghdasaryan 724*49b06385SSuren Baghdasaryan static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = { 725*49b06385SSuren Baghdasaryan .hugetlb_entry = queue_folios_hugetlb, 726*49b06385SSuren Baghdasaryan .pmd_entry = queue_folios_pte_range, 727*49b06385SSuren Baghdasaryan .test_walk = queue_pages_test_walk, 728*49b06385SSuren Baghdasaryan .walk_lock = PGWALK_WRLOCK, 7297b86ac33SChristoph Hellwig }; 7307b86ac33SChristoph Hellwig 7316f4576e3SNaoya Horiguchi /* 7326f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7336f4576e3SNaoya Horiguchi * 7346f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7356f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 736d8835445SYang Shi * passed via @private. 737d8835445SYang Shi * 738d8835445SYang Shi * queue_pages_range() has three possible return values: 739d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 740d8835445SYang Shi * specified. 741d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 742a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 743a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 744a85dfc30SYang Shi * your accessible address space (-EFAULT) 7456f4576e3SNaoya Horiguchi */ 7466f4576e3SNaoya Horiguchi static int 7476f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7486f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 749*49b06385SSuren Baghdasaryan struct list_head *pagelist, bool lock_vma) 7506f4576e3SNaoya Horiguchi { 751f18da660SLi Xinhai int err; 7526f4576e3SNaoya Horiguchi struct queue_pages qp = { 7536f4576e3SNaoya Horiguchi .pagelist = pagelist, 7546f4576e3SNaoya Horiguchi .flags = flags, 7556f4576e3SNaoya Horiguchi .nmask = nodes, 756f18da660SLi Xinhai .start = start, 757f18da660SLi Xinhai .end = end, 758f18da660SLi Xinhai .first = NULL, 7596f4576e3SNaoya Horiguchi }; 760*49b06385SSuren Baghdasaryan const struct mm_walk_ops *ops = lock_vma ? 761*49b06385SSuren Baghdasaryan &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops; 7626f4576e3SNaoya Horiguchi 763*49b06385SSuren Baghdasaryan err = walk_page_range(mm, start, end, ops, &qp); 764f18da660SLi Xinhai 765f18da660SLi Xinhai if (!qp.first) 766f18da660SLi Xinhai /* whole range in hole */ 767f18da660SLi Xinhai err = -EFAULT; 768f18da660SLi Xinhai 769f18da660SLi Xinhai return err; 7701da177e4SLinus Torvalds } 7711da177e4SLinus Torvalds 772869833f2SKOSAKI Motohiro /* 773869833f2SKOSAKI Motohiro * Apply policy to a single VMA 774c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for writing. 775869833f2SKOSAKI Motohiro */ 776869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 777869833f2SKOSAKI Motohiro struct mempolicy *pol) 7788d34694cSKOSAKI Motohiro { 779869833f2SKOSAKI Motohiro int err; 780869833f2SKOSAKI Motohiro struct mempolicy *old; 781869833f2SKOSAKI Motohiro struct mempolicy *new; 7828d34694cSKOSAKI Motohiro 7836c21e066SJann Horn vma_assert_write_locked(vma); 7846c21e066SJann Horn 7858d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7868d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7878d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7888d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7898d34694cSKOSAKI Motohiro 790869833f2SKOSAKI Motohiro new = mpol_dup(pol); 791869833f2SKOSAKI Motohiro if (IS_ERR(new)) 792869833f2SKOSAKI Motohiro return PTR_ERR(new); 793869833f2SKOSAKI Motohiro 794869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7958d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 796869833f2SKOSAKI Motohiro if (err) 797869833f2SKOSAKI Motohiro goto err_out; 7988d34694cSKOSAKI Motohiro } 799869833f2SKOSAKI Motohiro 800869833f2SKOSAKI Motohiro old = vma->vm_policy; 801c1e8d7c6SMichel Lespinasse vma->vm_policy = new; /* protected by mmap_lock */ 802869833f2SKOSAKI Motohiro mpol_put(old); 803869833f2SKOSAKI Motohiro 804869833f2SKOSAKI Motohiro return 0; 805869833f2SKOSAKI Motohiro err_out: 806869833f2SKOSAKI Motohiro mpol_put(new); 8078d34694cSKOSAKI Motohiro return err; 8088d34694cSKOSAKI Motohiro } 8098d34694cSKOSAKI Motohiro 810f4e9e0e6SLiam R. Howlett /* Split or merge the VMA (if required) and apply the new policy */ 811f4e9e0e6SLiam R. Howlett static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma, 812f4e9e0e6SLiam R. Howlett struct vm_area_struct **prev, unsigned long start, 8139d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 8141da177e4SLinus Torvalds { 815f4e9e0e6SLiam R. Howlett struct vm_area_struct *merged; 816f4e9e0e6SLiam R. Howlett unsigned long vmstart, vmend; 817e26a5114SKOSAKI Motohiro pgoff_t pgoff; 818f4e9e0e6SLiam R. Howlett int err; 8191da177e4SLinus Torvalds 820f4e9e0e6SLiam R. Howlett vmend = min(end, vma->vm_end); 821f4e9e0e6SLiam R. Howlett if (start > vma->vm_start) { 822f4e9e0e6SLiam R. Howlett *prev = vma; 823f4e9e0e6SLiam R. Howlett vmstart = start; 824f4e9e0e6SLiam R. Howlett } else { 825f4e9e0e6SLiam R. Howlett vmstart = vma->vm_start; 826f4e9e0e6SLiam R. Howlett } 8279d8cebd4SKOSAKI Motohiro 82800ca0f2eSLorenzo Stoakes if (mpol_equal(vma_policy(vma), new_pol)) { 82900ca0f2eSLorenzo Stoakes *prev = vma; 830f4e9e0e6SLiam R. Howlett return 0; 83100ca0f2eSLorenzo Stoakes } 832e26a5114SKOSAKI Motohiro 833f4e9e0e6SLiam R. Howlett pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); 834f4e9e0e6SLiam R. Howlett merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags, 835f4e9e0e6SLiam R. Howlett vma->anon_vma, vma->vm_file, pgoff, new_pol, 836f4e9e0e6SLiam R. Howlett vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 837f4e9e0e6SLiam R. Howlett if (merged) { 838f4e9e0e6SLiam R. Howlett *prev = merged; 839f4e9e0e6SLiam R. Howlett return vma_replace_policy(merged, new_pol); 8401da177e4SLinus Torvalds } 841f4e9e0e6SLiam R. Howlett 8429d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 843f4e9e0e6SLiam R. Howlett err = split_vma(vmi, vma, vmstart, 1); 8449d8cebd4SKOSAKI Motohiro if (err) 8451da177e4SLinus Torvalds return err; 8461da177e4SLinus Torvalds } 8471da177e4SLinus Torvalds 848f4e9e0e6SLiam R. Howlett if (vma->vm_end != vmend) { 849f4e9e0e6SLiam R. Howlett err = split_vma(vmi, vma, vmend, 0); 850f4e9e0e6SLiam R. Howlett if (err) 851f4e9e0e6SLiam R. Howlett return err; 852f4e9e0e6SLiam R. Howlett } 853f4e9e0e6SLiam R. Howlett 854f4e9e0e6SLiam R. Howlett *prev = vma; 855f4e9e0e6SLiam R. Howlett return vma_replace_policy(vma, new_pol); 856f4e9e0e6SLiam R. Howlett } 857f4e9e0e6SLiam R. Howlett 8581da177e4SLinus Torvalds /* Set the process memory policy */ 859028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 860028fec41SDavid Rientjes nodemask_t *nodes) 8611da177e4SLinus Torvalds { 86258568d2aSMiao Xie struct mempolicy *new, *old; 8634bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 86458568d2aSMiao Xie int ret; 8651da177e4SLinus Torvalds 8664bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8674bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 868f4e53d91SLee Schermerhorn 8694bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8704bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8714bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8724bfc4495SKAMEZAWA Hiroyuki goto out; 8734bfc4495SKAMEZAWA Hiroyuki } 8742c7c3a7dSOleg Nesterov 87512c1dc8eSAbel Wu task_lock(current); 8764bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 87758568d2aSMiao Xie if (ret) { 87812c1dc8eSAbel Wu task_unlock(current); 87958568d2aSMiao Xie mpol_put(new); 8804bfc4495SKAMEZAWA Hiroyuki goto out; 88158568d2aSMiao Xie } 88212c1dc8eSAbel Wu 88358568d2aSMiao Xie old = current->mempolicy; 8841da177e4SLinus Torvalds current->mempolicy = new; 88545816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 88645816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 88758568d2aSMiao Xie task_unlock(current); 88858568d2aSMiao Xie mpol_put(old); 8894bfc4495SKAMEZAWA Hiroyuki ret = 0; 8904bfc4495SKAMEZAWA Hiroyuki out: 8914bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8924bfc4495SKAMEZAWA Hiroyuki return ret; 8931da177e4SLinus Torvalds } 8941da177e4SLinus Torvalds 895bea904d5SLee Schermerhorn /* 896bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 89758568d2aSMiao Xie * 89858568d2aSMiao Xie * Called with task's alloc_lock held 899bea904d5SLee Schermerhorn */ 900bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 9011da177e4SLinus Torvalds { 902dfcd3c0dSAndi Kleen nodes_clear(*nodes); 903bea904d5SLee Schermerhorn if (p == &default_policy) 904bea904d5SLee Schermerhorn return; 905bea904d5SLee Schermerhorn 90645c4745aSLee Schermerhorn switch (p->mode) { 90719770b32SMel Gorman case MPOL_BIND: 9081da177e4SLinus Torvalds case MPOL_INTERLEAVE: 909269fbe72SBen Widawsky case MPOL_PREFERRED: 910b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 911269fbe72SBen Widawsky *nodes = p->nodes; 9121da177e4SLinus Torvalds break; 9137858d7bcSFeng Tang case MPOL_LOCAL: 9147858d7bcSFeng Tang /* return empty node mask for local allocation */ 9157858d7bcSFeng Tang break; 9161da177e4SLinus Torvalds default: 9171da177e4SLinus Torvalds BUG(); 9181da177e4SLinus Torvalds } 9191da177e4SLinus Torvalds } 9201da177e4SLinus Torvalds 9213b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 9221da177e4SLinus Torvalds { 923ba841078SPeter Xu struct page *p = NULL; 924f728b9c4SJohn Hubbard int ret; 9251da177e4SLinus Torvalds 926f728b9c4SJohn Hubbard ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p); 927f728b9c4SJohn Hubbard if (ret > 0) { 928f728b9c4SJohn Hubbard ret = page_to_nid(p); 9291da177e4SLinus Torvalds put_page(p); 9301da177e4SLinus Torvalds } 931f728b9c4SJohn Hubbard return ret; 9321da177e4SLinus Torvalds } 9331da177e4SLinus Torvalds 9341da177e4SLinus Torvalds /* Retrieve NUMA policy */ 935dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9361da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9371da177e4SLinus Torvalds { 9388bccd85fSChristoph Lameter int err; 9391da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9401da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9413b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9421da177e4SLinus Torvalds 943754af6f5SLee Schermerhorn if (flags & 944754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9451da177e4SLinus Torvalds return -EINVAL; 946754af6f5SLee Schermerhorn 947754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 948754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 949754af6f5SLee Schermerhorn return -EINVAL; 950754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 95158568d2aSMiao Xie task_lock(current); 952754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 95358568d2aSMiao Xie task_unlock(current); 954754af6f5SLee Schermerhorn return 0; 955754af6f5SLee Schermerhorn } 956754af6f5SLee Schermerhorn 9571da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 958bea904d5SLee Schermerhorn /* 959bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 960bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 961bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 962bea904d5SLee Schermerhorn */ 963d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 96433e3575cSLiam Howlett vma = vma_lookup(mm, addr); 9651da177e4SLinus Torvalds if (!vma) { 966d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9671da177e4SLinus Torvalds return -EFAULT; 9681da177e4SLinus Torvalds } 9691da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9701da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9711da177e4SLinus Torvalds else 9721da177e4SLinus Torvalds pol = vma->vm_policy; 9731da177e4SLinus Torvalds } else if (addr) 9741da177e4SLinus Torvalds return -EINVAL; 9751da177e4SLinus Torvalds 9761da177e4SLinus Torvalds if (!pol) 977bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9781da177e4SLinus Torvalds 9791da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9801da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9813b9aadf7SAndrea Arcangeli /* 982f728b9c4SJohn Hubbard * Take a refcount on the mpol, because we are about to 983f728b9c4SJohn Hubbard * drop the mmap_lock, after which only "pol" remains 984f728b9c4SJohn Hubbard * valid, "vma" is stale. 9853b9aadf7SAndrea Arcangeli */ 9863b9aadf7SAndrea Arcangeli pol_refcount = pol; 9873b9aadf7SAndrea Arcangeli vma = NULL; 9883b9aadf7SAndrea Arcangeli mpol_get(pol); 989f728b9c4SJohn Hubbard mmap_read_unlock(mm); 9903b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9911da177e4SLinus Torvalds if (err < 0) 9921da177e4SLinus Torvalds goto out; 9938bccd85fSChristoph Lameter *policy = err; 9941da177e4SLinus Torvalds } else if (pol == current->mempolicy && 99545c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 996269fbe72SBen Widawsky *policy = next_node_in(current->il_prev, pol->nodes); 9971da177e4SLinus Torvalds } else { 9981da177e4SLinus Torvalds err = -EINVAL; 9991da177e4SLinus Torvalds goto out; 10001da177e4SLinus Torvalds } 1001bea904d5SLee Schermerhorn } else { 1002bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 1003bea904d5SLee Schermerhorn pol->mode; 1004d79df630SDavid Rientjes /* 1005d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 1006d79df630SDavid Rientjes * the policy to userspace. 1007d79df630SDavid Rientjes */ 1008d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 1009bea904d5SLee Schermerhorn } 10101da177e4SLinus Torvalds 10111da177e4SLinus Torvalds err = 0; 101258568d2aSMiao Xie if (nmask) { 1013c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 1014c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 1015c6b6ef8bSLee Schermerhorn } else { 101658568d2aSMiao Xie task_lock(current); 1017bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 101858568d2aSMiao Xie task_unlock(current); 101958568d2aSMiao Xie } 1020c6b6ef8bSLee Schermerhorn } 10211da177e4SLinus Torvalds 10221da177e4SLinus Torvalds out: 102352cd3b07SLee Schermerhorn mpol_cond_put(pol); 10241da177e4SLinus Torvalds if (vma) 1025d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 10263b9aadf7SAndrea Arcangeli if (pol_refcount) 10273b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10281da177e4SLinus Torvalds return err; 10291da177e4SLinus Torvalds } 10301da177e4SLinus Torvalds 1031b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10324a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, 1033fc301289SChristoph Lameter unsigned long flags) 10346ce3c4c0SChristoph Lameter { 10356ce3c4c0SChristoph Lameter /* 10364a64981dSVishal Moola (Oracle) * We try to migrate only unshared folios. If it is shared it 10374a64981dSVishal Moola (Oracle) * is likely not worth migrating. 10384a64981dSVishal Moola (Oracle) * 10394a64981dSVishal Moola (Oracle) * To check if the folio is shared, ideally we want to make sure 10404a64981dSVishal Moola (Oracle) * every page is mapped to the same process. Doing that is very 10414a64981dSVishal Moola (Oracle) * expensive, so check the estimated mapcount of the folio instead. 10426ce3c4c0SChristoph Lameter */ 10434a64981dSVishal Moola (Oracle) if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) { 1044be2d5756SBaolin Wang if (folio_isolate_lru(folio)) { 10454a64981dSVishal Moola (Oracle) list_add_tail(&folio->lru, foliolist); 10464a64981dSVishal Moola (Oracle) node_stat_mod_folio(folio, 10474a64981dSVishal Moola (Oracle) NR_ISOLATED_ANON + folio_is_file_lru(folio), 10484a64981dSVishal Moola (Oracle) folio_nr_pages(folio)); 1049a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1050a53190a4SYang Shi /* 10514a64981dSVishal Moola (Oracle) * Non-movable folio may reach here. And, there may be 10524a64981dSVishal Moola (Oracle) * temporary off LRU folios or non-LRU movable folios. 10534a64981dSVishal Moola (Oracle) * Treat them as unmovable folios since they can't be 1054a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1055a53190a4SYang Shi * should return -EIO for this case too. 1056a53190a4SYang Shi */ 1057a53190a4SYang Shi return -EIO; 105862695a84SNick Piggin } 105962695a84SNick Piggin } 1060a53190a4SYang Shi 1061a53190a4SYang Shi return 0; 10626ce3c4c0SChristoph Lameter } 10636ce3c4c0SChristoph Lameter 10646ce3c4c0SChristoph Lameter /* 10657e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10667e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10677e2ab150SChristoph Lameter */ 1068dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1069dbcb0f19SAdrian Bunk int flags) 10707e2ab150SChristoph Lameter { 10717e2ab150SChristoph Lameter nodemask_t nmask; 107266850be5SLiam R. Howlett struct vm_area_struct *vma; 10737e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10747e2ab150SChristoph Lameter int err = 0; 1075a0976311SJoonsoo Kim struct migration_target_control mtc = { 1076a0976311SJoonsoo Kim .nid = dest, 1077a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1078a0976311SJoonsoo Kim }; 10797e2ab150SChristoph Lameter 10807e2ab150SChristoph Lameter nodes_clear(nmask); 10817e2ab150SChristoph Lameter node_set(source, nmask); 10827e2ab150SChristoph Lameter 108308270807SMinchan Kim /* 108408270807SMinchan Kim * This does not "check" the range but isolates all pages that 108508270807SMinchan Kim * need migration. Between passing in the full user address 108608270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 108708270807SMinchan Kim */ 108866850be5SLiam R. Howlett vma = find_vma(mm, 0); 108908270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 109066850be5SLiam R. Howlett queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, 1091*49b06385SSuren Baghdasaryan flags | MPOL_MF_DISCONTIG_OK, &pagelist, false); 10927e2ab150SChristoph Lameter 1093cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1094a0976311SJoonsoo Kim err = migrate_pages(&pagelist, alloc_migration_target, NULL, 10955ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1096cf608ac1SMinchan Kim if (err) 1097e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1098cf608ac1SMinchan Kim } 109995a402c3SChristoph Lameter 11007e2ab150SChristoph Lameter return err; 11017e2ab150SChristoph Lameter } 11027e2ab150SChristoph Lameter 11037e2ab150SChristoph Lameter /* 11047e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 11057e2ab150SChristoph Lameter * layout as much as possible. 110639743889SChristoph Lameter * 110739743889SChristoph Lameter * Returns the number of page that could not be moved. 110839743889SChristoph Lameter */ 11090ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11100ce72d4fSAndrew Morton const nodemask_t *to, int flags) 111139743889SChristoph Lameter { 11127e2ab150SChristoph Lameter int busy = 0; 1113f555befdSJan Stancek int err = 0; 11147e2ab150SChristoph Lameter nodemask_t tmp; 111539743889SChristoph Lameter 1116361a2a22SMinchan Kim lru_cache_disable(); 11170aedadf9SChristoph Lameter 1118d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1119d4984711SChristoph Lameter 11207e2ab150SChristoph Lameter /* 11217e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11227e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11237e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11247e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11257e2ab150SChristoph Lameter * 11267e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11277e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11287e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11297e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11307e2ab150SChristoph Lameter * 11317e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11327e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11337e2ab150SChristoph Lameter * (nothing left to migrate). 11347e2ab150SChristoph Lameter * 11357e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11367e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11377e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11387e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11397e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11407e2ab150SChristoph Lameter * 11417e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11427e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11437e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11447e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1145ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11467e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11477e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11487e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11497e2ab150SChristoph Lameter */ 11507e2ab150SChristoph Lameter 11510ce72d4fSAndrew Morton tmp = *from; 11527e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11537e2ab150SChristoph Lameter int s, d; 1154b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11557e2ab150SChristoph Lameter int dest = 0; 11567e2ab150SChristoph Lameter 11577e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11584a5b18ccSLarry Woodman 11594a5b18ccSLarry Woodman /* 11604a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11614a5b18ccSLarry Woodman * node relationship of the pages established between 11624a5b18ccSLarry Woodman * threads and memory areas. 11634a5b18ccSLarry Woodman * 11644a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11654a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11664a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11674a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11684a5b18ccSLarry Woodman * mask. 11694a5b18ccSLarry Woodman * 11704a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11714a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11724a5b18ccSLarry Woodman */ 11734a5b18ccSLarry Woodman 11740ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11750ce72d4fSAndrew Morton (node_isset(s, *to))) 11764a5b18ccSLarry Woodman continue; 11774a5b18ccSLarry Woodman 11780ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11797e2ab150SChristoph Lameter if (s == d) 11807e2ab150SChristoph Lameter continue; 11817e2ab150SChristoph Lameter 11827e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11837e2ab150SChristoph Lameter dest = d; 11847e2ab150SChristoph Lameter 11857e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11867e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11877e2ab150SChristoph Lameter break; 11887e2ab150SChristoph Lameter } 1189b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11907e2ab150SChristoph Lameter break; 11917e2ab150SChristoph Lameter 11927e2ab150SChristoph Lameter node_clear(source, tmp); 11937e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11947e2ab150SChristoph Lameter if (err > 0) 11957e2ab150SChristoph Lameter busy += err; 11967e2ab150SChristoph Lameter if (err < 0) 11977e2ab150SChristoph Lameter break; 119839743889SChristoph Lameter } 1199d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1200d479960eSMinchan Kim 1201361a2a22SMinchan Kim lru_cache_enable(); 12027e2ab150SChristoph Lameter if (err < 0) 12037e2ab150SChristoph Lameter return err; 12047e2ab150SChristoph Lameter return busy; 1205b20a3503SChristoph Lameter 120639743889SChristoph Lameter } 120739743889SChristoph Lameter 12083ad33b24SLee Schermerhorn /* 12093ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1210d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 12113ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 12123ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 12133ad33b24SLee Schermerhorn * is in virtual address order. 12143ad33b24SLee Schermerhorn */ 12154e096ae1SMatthew Wilcox (Oracle) static struct folio *new_folio(struct folio *src, unsigned long start) 121695a402c3SChristoph Lameter { 1217d05f0cdcSHugh Dickins struct vm_area_struct *vma; 12183f649ab7SKees Cook unsigned long address; 121966850be5SLiam R. Howlett VMA_ITERATOR(vmi, current->mm, start); 1220ec4858e0SMatthew Wilcox (Oracle) gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; 122195a402c3SChristoph Lameter 122266850be5SLiam R. Howlett for_each_vma(vmi, vma) { 12234e096ae1SMatthew Wilcox (Oracle) address = page_address_in_vma(&src->page, vma); 12243ad33b24SLee Schermerhorn if (address != -EFAULT) 12253ad33b24SLee Schermerhorn break; 12263ad33b24SLee Schermerhorn } 12273ad33b24SLee Schermerhorn 1228d0ce0e47SSidhartha Kumar if (folio_test_hugetlb(src)) { 12294e096ae1SMatthew Wilcox (Oracle) return alloc_hugetlb_folio_vma(folio_hstate(src), 1230389c8178SMichal Hocko vma, address); 1231d0ce0e47SSidhartha Kumar } 1232c8633798SNaoya Horiguchi 1233ec4858e0SMatthew Wilcox (Oracle) if (folio_test_large(src)) 1234ec4858e0SMatthew Wilcox (Oracle) gfp = GFP_TRANSHUGE; 1235ec4858e0SMatthew Wilcox (Oracle) 123611c731e8SWanpeng Li /* 1237ec4858e0SMatthew Wilcox (Oracle) * if !vma, vma_alloc_folio() will use task or system default policy 123811c731e8SWanpeng Li */ 12394e096ae1SMatthew Wilcox (Oracle) return vma_alloc_folio(gfp, folio_order(src), vma, address, 1240ec4858e0SMatthew Wilcox (Oracle) folio_test_large(src)); 124195a402c3SChristoph Lameter } 1242b20a3503SChristoph Lameter #else 1243b20a3503SChristoph Lameter 12444a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, 1245b20a3503SChristoph Lameter unsigned long flags) 1246b20a3503SChristoph Lameter { 1247a53190a4SYang Shi return -EIO; 1248b20a3503SChristoph Lameter } 1249b20a3503SChristoph Lameter 12500ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12510ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1252b20a3503SChristoph Lameter { 1253b20a3503SChristoph Lameter return -ENOSYS; 1254b20a3503SChristoph Lameter } 125595a402c3SChristoph Lameter 12564e096ae1SMatthew Wilcox (Oracle) static struct folio *new_folio(struct folio *src, unsigned long start) 125795a402c3SChristoph Lameter { 125895a402c3SChristoph Lameter return NULL; 125995a402c3SChristoph Lameter } 1260b20a3503SChristoph Lameter #endif 1261b20a3503SChristoph Lameter 1262dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1263028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1264028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12656ce3c4c0SChristoph Lameter { 12666ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 1267f4e9e0e6SLiam R. Howlett struct vm_area_struct *vma, *prev; 1268f4e9e0e6SLiam R. Howlett struct vma_iterator vmi; 12696ce3c4c0SChristoph Lameter struct mempolicy *new; 12706ce3c4c0SChristoph Lameter unsigned long end; 12716ce3c4c0SChristoph Lameter int err; 1272d8835445SYang Shi int ret; 12736ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12746ce3c4c0SChristoph Lameter 1275b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12766ce3c4c0SChristoph Lameter return -EINVAL; 127774c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12786ce3c4c0SChristoph Lameter return -EPERM; 12796ce3c4c0SChristoph Lameter 12806ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12816ce3c4c0SChristoph Lameter return -EINVAL; 12826ce3c4c0SChristoph Lameter 12836ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12846ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12856ce3c4c0SChristoph Lameter 1286aaa31e05Sze zuo len = PAGE_ALIGN(len); 12876ce3c4c0SChristoph Lameter end = start + len; 12886ce3c4c0SChristoph Lameter 12896ce3c4c0SChristoph Lameter if (end < start) 12906ce3c4c0SChristoph Lameter return -EINVAL; 12916ce3c4c0SChristoph Lameter if (end == start) 12926ce3c4c0SChristoph Lameter return 0; 12936ce3c4c0SChristoph Lameter 1294028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12956ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12966ce3c4c0SChristoph Lameter return PTR_ERR(new); 12976ce3c4c0SChristoph Lameter 1298b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1299b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1300b24f53a0SLee Schermerhorn 13016ce3c4c0SChristoph Lameter /* 13026ce3c4c0SChristoph Lameter * If we are using the default policy then operation 13036ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 13046ce3c4c0SChristoph Lameter */ 13056ce3c4c0SChristoph Lameter if (!new) 13066ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 13076ce3c4c0SChristoph Lameter 1308028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1309028fec41SDavid Rientjes start, start + len, mode, mode_flags, 131000ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 13116ce3c4c0SChristoph Lameter 13120aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 13130aedadf9SChristoph Lameter 1314361a2a22SMinchan Kim lru_cache_disable(); 13150aedadf9SChristoph Lameter } 13164bfc4495SKAMEZAWA Hiroyuki { 13174bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 13184bfc4495SKAMEZAWA Hiroyuki if (scratch) { 1319d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 13204bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 13214bfc4495SKAMEZAWA Hiroyuki if (err) 1322d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 13234bfc4495SKAMEZAWA Hiroyuki } else 13244bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13254bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13264bfc4495SKAMEZAWA Hiroyuki } 1327b05ca738SKOSAKI Motohiro if (err) 1328b05ca738SKOSAKI Motohiro goto mpol_out; 1329b05ca738SKOSAKI Motohiro 13306c21e066SJann Horn /* 13316c21e066SJann Horn * Lock the VMAs before scanning for pages to migrate, to ensure we don't 13326c21e066SJann Horn * miss a concurrently inserted page. 13336c21e066SJann Horn */ 1334d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 1335*49b06385SSuren Baghdasaryan flags | MPOL_MF_INVERT, &pagelist, true); 1336d8835445SYang Shi 1337d8835445SYang Shi if (ret < 0) { 1338a85dfc30SYang Shi err = ret; 1339d8835445SYang Shi goto up_out; 1340d8835445SYang Shi } 1341d8835445SYang Shi 1342f4e9e0e6SLiam R. Howlett vma_iter_init(&vmi, mm, start); 1343f4e9e0e6SLiam R. Howlett prev = vma_prev(&vmi); 1344f4e9e0e6SLiam R. Howlett for_each_vma_range(vmi, vma, end) { 1345f4e9e0e6SLiam R. Howlett err = mbind_range(&vmi, vma, &prev, start, end, new); 1346f4e9e0e6SLiam R. Howlett if (err) 1347f4e9e0e6SLiam R. Howlett break; 1348f4e9e0e6SLiam R. Howlett } 13497e2ab150SChristoph Lameter 1350b24f53a0SLee Schermerhorn if (!err) { 1351b24f53a0SLee Schermerhorn int nr_failed = 0; 1352b24f53a0SLee Schermerhorn 1353cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1354b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 13554e096ae1SMatthew Wilcox (Oracle) nr_failed = migrate_pages(&pagelist, new_folio, NULL, 13565ac95884SYang Shi start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1357cf608ac1SMinchan Kim if (nr_failed) 135874060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1359cf608ac1SMinchan Kim } 13606ce3c4c0SChristoph Lameter 1361d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13626ce3c4c0SChristoph Lameter err = -EIO; 1363a85dfc30SYang Shi } else { 1364d8835445SYang Shi up_out: 1365a85dfc30SYang Shi if (!list_empty(&pagelist)) 1366a85dfc30SYang Shi putback_movable_pages(&pagelist); 1367a85dfc30SYang Shi } 1368a85dfc30SYang Shi 1369d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1370b05ca738SKOSAKI Motohiro mpol_out: 1371f0be3d32SLee Schermerhorn mpol_put(new); 1372d479960eSMinchan Kim if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1373361a2a22SMinchan Kim lru_cache_enable(); 13746ce3c4c0SChristoph Lameter return err; 13756ce3c4c0SChristoph Lameter } 13766ce3c4c0SChristoph Lameter 137739743889SChristoph Lameter /* 13788bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13798bccd85fSChristoph Lameter */ 1380e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, 1381e130242dSArnd Bergmann unsigned long maxnode) 1382e130242dSArnd Bergmann { 1383e130242dSArnd Bergmann unsigned long nlongs = BITS_TO_LONGS(maxnode); 1384e130242dSArnd Bergmann int ret; 1385e130242dSArnd Bergmann 1386e130242dSArnd Bergmann if (in_compat_syscall()) 1387e130242dSArnd Bergmann ret = compat_get_bitmap(mask, 1388e130242dSArnd Bergmann (const compat_ulong_t __user *)nmask, 1389e130242dSArnd Bergmann maxnode); 1390e130242dSArnd Bergmann else 1391e130242dSArnd Bergmann ret = copy_from_user(mask, nmask, 1392e130242dSArnd Bergmann nlongs * sizeof(unsigned long)); 1393e130242dSArnd Bergmann 1394e130242dSArnd Bergmann if (ret) 1395e130242dSArnd Bergmann return -EFAULT; 1396e130242dSArnd Bergmann 1397e130242dSArnd Bergmann if (maxnode % BITS_PER_LONG) 1398e130242dSArnd Bergmann mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; 1399e130242dSArnd Bergmann 1400e130242dSArnd Bergmann return 0; 1401e130242dSArnd Bergmann } 14028bccd85fSChristoph Lameter 14038bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 140439743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 14058bccd85fSChristoph Lameter unsigned long maxnode) 14068bccd85fSChristoph Lameter { 14078bccd85fSChristoph Lameter --maxnode; 14088bccd85fSChristoph Lameter nodes_clear(*nodes); 14098bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 14108bccd85fSChristoph Lameter return 0; 1411a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1412636f13c1SChris Wright return -EINVAL; 14138bccd85fSChristoph Lameter 141456521e7aSYisheng Xie /* 141556521e7aSYisheng Xie * When the user specified more nodes than supported just check 1416e130242dSArnd Bergmann * if the non supported part is all zero, one word at a time, 1417e130242dSArnd Bergmann * starting at the end. 141856521e7aSYisheng Xie */ 1419e130242dSArnd Bergmann while (maxnode > MAX_NUMNODES) { 1420e130242dSArnd Bergmann unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG); 1421e130242dSArnd Bergmann unsigned long t; 14228bccd85fSChristoph Lameter 1423000eca5dSTianyu Li if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) 142456521e7aSYisheng Xie return -EFAULT; 1425e130242dSArnd Bergmann 1426e130242dSArnd Bergmann if (maxnode - bits >= MAX_NUMNODES) { 1427e130242dSArnd Bergmann maxnode -= bits; 1428e130242dSArnd Bergmann } else { 1429e130242dSArnd Bergmann maxnode = MAX_NUMNODES; 1430e130242dSArnd Bergmann t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 1431e130242dSArnd Bergmann } 1432e130242dSArnd Bergmann if (t) 143356521e7aSYisheng Xie return -EINVAL; 143456521e7aSYisheng Xie } 143556521e7aSYisheng Xie 1436e130242dSArnd Bergmann return get_bitmap(nodes_addr(*nodes), nmask, maxnode); 14378bccd85fSChristoph Lameter } 14388bccd85fSChristoph Lameter 14398bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14408bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14418bccd85fSChristoph Lameter nodemask_t *nodes) 14428bccd85fSChristoph Lameter { 14438bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1444050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 1445e130242dSArnd Bergmann bool compat = in_compat_syscall(); 1446e130242dSArnd Bergmann 1447e130242dSArnd Bergmann if (compat) 1448e130242dSArnd Bergmann nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); 14498bccd85fSChristoph Lameter 14508bccd85fSChristoph Lameter if (copy > nbytes) { 14518bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14528bccd85fSChristoph Lameter return -EINVAL; 14538bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14548bccd85fSChristoph Lameter return -EFAULT; 14558bccd85fSChristoph Lameter copy = nbytes; 1456e130242dSArnd Bergmann maxnode = nr_node_ids; 14578bccd85fSChristoph Lameter } 1458e130242dSArnd Bergmann 1459e130242dSArnd Bergmann if (compat) 1460e130242dSArnd Bergmann return compat_put_bitmap((compat_ulong_t __user *)mask, 1461e130242dSArnd Bergmann nodes_addr(*nodes), maxnode); 1462e130242dSArnd Bergmann 14638bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14648bccd85fSChristoph Lameter } 14658bccd85fSChristoph Lameter 146695837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 146795837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 146895837924SFeng Tang { 146995837924SFeng Tang *flags = *mode & MPOL_MODE_FLAGS; 147095837924SFeng Tang *mode &= ~MPOL_MODE_FLAGS; 1471b27abaccSDave Hansen 1472a38a59fdSBen Widawsky if ((unsigned int)(*mode) >= MPOL_MAX) 147395837924SFeng Tang return -EINVAL; 147495837924SFeng Tang if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 147595837924SFeng Tang return -EINVAL; 14766d2aec9eSEric Dumazet if (*flags & MPOL_F_NUMA_BALANCING) { 14776d2aec9eSEric Dumazet if (*mode != MPOL_BIND) 14786d2aec9eSEric Dumazet return -EINVAL; 14796d2aec9eSEric Dumazet *flags |= (MPOL_F_MOF | MPOL_F_MORON); 14806d2aec9eSEric Dumazet } 148195837924SFeng Tang return 0; 148295837924SFeng Tang } 148395837924SFeng Tang 1484e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1485e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1486e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14878bccd85fSChristoph Lameter { 1488028fec41SDavid Rientjes unsigned short mode_flags; 148995837924SFeng Tang nodemask_t nodes; 149095837924SFeng Tang int lmode = mode; 149195837924SFeng Tang int err; 14928bccd85fSChristoph Lameter 1493057d3389SAndrey Konovalov start = untagged_addr(start); 149495837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 149595837924SFeng Tang if (err) 149695837924SFeng Tang return err; 149795837924SFeng Tang 14988bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14998bccd85fSChristoph Lameter if (err) 15008bccd85fSChristoph Lameter return err; 150195837924SFeng Tang 150295837924SFeng Tang return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 15038bccd85fSChristoph Lameter } 15048bccd85fSChristoph Lameter 1505c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len, 1506c6018b4bSAneesh Kumar K.V unsigned long, home_node, unsigned long, flags) 1507c6018b4bSAneesh Kumar K.V { 1508c6018b4bSAneesh Kumar K.V struct mm_struct *mm = current->mm; 1509f4e9e0e6SLiam R. Howlett struct vm_area_struct *vma, *prev; 1510e976936cSMichal Hocko struct mempolicy *new, *old; 1511c6018b4bSAneesh Kumar K.V unsigned long end; 1512c6018b4bSAneesh Kumar K.V int err = -ENOENT; 151366850be5SLiam R. Howlett VMA_ITERATOR(vmi, mm, start); 1514c6018b4bSAneesh Kumar K.V 1515c6018b4bSAneesh Kumar K.V start = untagged_addr(start); 1516c6018b4bSAneesh Kumar K.V if (start & ~PAGE_MASK) 1517c6018b4bSAneesh Kumar K.V return -EINVAL; 1518c6018b4bSAneesh Kumar K.V /* 1519c6018b4bSAneesh Kumar K.V * flags is used for future extension if any. 1520c6018b4bSAneesh Kumar K.V */ 1521c6018b4bSAneesh Kumar K.V if (flags != 0) 1522c6018b4bSAneesh Kumar K.V return -EINVAL; 1523c6018b4bSAneesh Kumar K.V 1524c6018b4bSAneesh Kumar K.V /* 1525c6018b4bSAneesh Kumar K.V * Check home_node is online to avoid accessing uninitialized 1526c6018b4bSAneesh Kumar K.V * NODE_DATA. 1527c6018b4bSAneesh Kumar K.V */ 1528c6018b4bSAneesh Kumar K.V if (home_node >= MAX_NUMNODES || !node_online(home_node)) 1529c6018b4bSAneesh Kumar K.V return -EINVAL; 1530c6018b4bSAneesh Kumar K.V 1531aaa31e05Sze zuo len = PAGE_ALIGN(len); 1532c6018b4bSAneesh Kumar K.V end = start + len; 1533c6018b4bSAneesh Kumar K.V 1534c6018b4bSAneesh Kumar K.V if (end < start) 1535c6018b4bSAneesh Kumar K.V return -EINVAL; 1536c6018b4bSAneesh Kumar K.V if (end == start) 1537c6018b4bSAneesh Kumar K.V return 0; 1538c6018b4bSAneesh Kumar K.V mmap_write_lock(mm); 1539f4e9e0e6SLiam R. Howlett prev = vma_prev(&vmi); 154066850be5SLiam R. Howlett for_each_vma_range(vmi, vma, end) { 1541c6018b4bSAneesh Kumar K.V /* 1542c6018b4bSAneesh Kumar K.V * If any vma in the range got policy other than MPOL_BIND 1543c6018b4bSAneesh Kumar K.V * or MPOL_PREFERRED_MANY we return error. We don't reset 1544c6018b4bSAneesh Kumar K.V * the home node for vmas we already updated before. 1545c6018b4bSAneesh Kumar K.V */ 1546e976936cSMichal Hocko old = vma_policy(vma); 1547e976936cSMichal Hocko if (!old) 1548e976936cSMichal Hocko continue; 1549e976936cSMichal Hocko if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) { 1550c6018b4bSAneesh Kumar K.V err = -EOPNOTSUPP; 1551c6018b4bSAneesh Kumar K.V break; 1552c6018b4bSAneesh Kumar K.V } 1553e976936cSMichal Hocko new = mpol_dup(old); 1554e976936cSMichal Hocko if (IS_ERR(new)) { 1555e976936cSMichal Hocko err = PTR_ERR(new); 1556e976936cSMichal Hocko break; 1557e976936cSMichal Hocko } 1558c6018b4bSAneesh Kumar K.V 15596c21e066SJann Horn vma_start_write(vma); 1560c6018b4bSAneesh Kumar K.V new->home_node = home_node; 1561f4e9e0e6SLiam R. Howlett err = mbind_range(&vmi, vma, &prev, start, end, new); 1562c6018b4bSAneesh Kumar K.V mpol_put(new); 1563c6018b4bSAneesh Kumar K.V if (err) 1564c6018b4bSAneesh Kumar K.V break; 1565c6018b4bSAneesh Kumar K.V } 1566c6018b4bSAneesh Kumar K.V mmap_write_unlock(mm); 1567c6018b4bSAneesh Kumar K.V return err; 1568c6018b4bSAneesh Kumar K.V } 1569c6018b4bSAneesh Kumar K.V 1570e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1571e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1572e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1573e7dc9ad6SDominik Brodowski { 1574e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1575e7dc9ad6SDominik Brodowski } 1576e7dc9ad6SDominik Brodowski 15778bccd85fSChristoph Lameter /* Set the process memory policy */ 1578af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1579af03c4acSDominik Brodowski unsigned long maxnode) 15808bccd85fSChristoph Lameter { 158195837924SFeng Tang unsigned short mode_flags; 15828bccd85fSChristoph Lameter nodemask_t nodes; 158395837924SFeng Tang int lmode = mode; 158495837924SFeng Tang int err; 15858bccd85fSChristoph Lameter 158695837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 158795837924SFeng Tang if (err) 158895837924SFeng Tang return err; 158995837924SFeng Tang 15908bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 15918bccd85fSChristoph Lameter if (err) 15928bccd85fSChristoph Lameter return err; 159395837924SFeng Tang 159495837924SFeng Tang return do_set_mempolicy(lmode, mode_flags, &nodes); 15958bccd85fSChristoph Lameter } 15968bccd85fSChristoph Lameter 1597af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1598af03c4acSDominik Brodowski unsigned long, maxnode) 1599af03c4acSDominik Brodowski { 1600af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1601af03c4acSDominik Brodowski } 1602af03c4acSDominik Brodowski 1603b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1604b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1605b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 160639743889SChristoph Lameter { 1607596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 160839743889SChristoph Lameter struct task_struct *task; 160939743889SChristoph Lameter nodemask_t task_nodes; 161039743889SChristoph Lameter int err; 1611596d7cfaSKOSAKI Motohiro nodemask_t *old; 1612596d7cfaSKOSAKI Motohiro nodemask_t *new; 1613596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 161439743889SChristoph Lameter 1615596d7cfaSKOSAKI Motohiro if (!scratch) 1616596d7cfaSKOSAKI Motohiro return -ENOMEM; 161739743889SChristoph Lameter 1618596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1619596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1620596d7cfaSKOSAKI Motohiro 1621596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 162239743889SChristoph Lameter if (err) 1623596d7cfaSKOSAKI Motohiro goto out; 1624596d7cfaSKOSAKI Motohiro 1625596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1626596d7cfaSKOSAKI Motohiro if (err) 1627596d7cfaSKOSAKI Motohiro goto out; 162839743889SChristoph Lameter 162939743889SChristoph Lameter /* Find the mm_struct */ 163055cfaa3cSZeng Zhaoming rcu_read_lock(); 1631228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 163239743889SChristoph Lameter if (!task) { 163355cfaa3cSZeng Zhaoming rcu_read_unlock(); 1634596d7cfaSKOSAKI Motohiro err = -ESRCH; 1635596d7cfaSKOSAKI Motohiro goto out; 163639743889SChristoph Lameter } 16373268c63eSChristoph Lameter get_task_struct(task); 163839743889SChristoph Lameter 1639596d7cfaSKOSAKI Motohiro err = -EINVAL; 164039743889SChristoph Lameter 164139743889SChristoph Lameter /* 164231367466SOtto Ebeling * Check if this process has the right to modify the specified process. 164331367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 164439743889SChristoph Lameter */ 164531367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1646c69e8d9cSDavid Howells rcu_read_unlock(); 164739743889SChristoph Lameter err = -EPERM; 16483268c63eSChristoph Lameter goto out_put; 164939743889SChristoph Lameter } 1650c69e8d9cSDavid Howells rcu_read_unlock(); 165139743889SChristoph Lameter 165239743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 165339743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1654596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 165539743889SChristoph Lameter err = -EPERM; 16563268c63eSChristoph Lameter goto out_put; 165739743889SChristoph Lameter } 165839743889SChristoph Lameter 16590486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 16600486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 16610486a38bSYisheng Xie if (nodes_empty(*new)) 16623268c63eSChristoph Lameter goto out_put; 16630486a38bSYisheng Xie 166486c3a764SDavid Quigley err = security_task_movememory(task); 166586c3a764SDavid Quigley if (err) 16663268c63eSChristoph Lameter goto out_put; 166786c3a764SDavid Quigley 16683268c63eSChristoph Lameter mm = get_task_mm(task); 16693268c63eSChristoph Lameter put_task_struct(task); 1670f2a9ef88SSasha Levin 1671f2a9ef88SSasha Levin if (!mm) { 1672f2a9ef88SSasha Levin err = -EINVAL; 1673f2a9ef88SSasha Levin goto out; 1674f2a9ef88SSasha Levin } 1675f2a9ef88SSasha Levin 1676596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 167774c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 16783268c63eSChristoph Lameter 167939743889SChristoph Lameter mmput(mm); 16803268c63eSChristoph Lameter out: 1681596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1682596d7cfaSKOSAKI Motohiro 168339743889SChristoph Lameter return err; 16843268c63eSChristoph Lameter 16853268c63eSChristoph Lameter out_put: 16863268c63eSChristoph Lameter put_task_struct(task); 16873268c63eSChristoph Lameter goto out; 16883268c63eSChristoph Lameter 168939743889SChristoph Lameter } 169039743889SChristoph Lameter 1691b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1692b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1693b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1694b6e9b0baSDominik Brodowski { 1695b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1696b6e9b0baSDominik Brodowski } 1697b6e9b0baSDominik Brodowski 169839743889SChristoph Lameter 16998bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1700af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1701af03c4acSDominik Brodowski unsigned long __user *nmask, 1702af03c4acSDominik Brodowski unsigned long maxnode, 1703af03c4acSDominik Brodowski unsigned long addr, 1704af03c4acSDominik Brodowski unsigned long flags) 17058bccd85fSChristoph Lameter { 1706dbcb0f19SAdrian Bunk int err; 17073f649ab7SKees Cook int pval; 17088bccd85fSChristoph Lameter nodemask_t nodes; 17098bccd85fSChristoph Lameter 1710050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 17118bccd85fSChristoph Lameter return -EINVAL; 17128bccd85fSChristoph Lameter 17134605f057SWenchao Hao addr = untagged_addr(addr); 17144605f057SWenchao Hao 17158bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 17168bccd85fSChristoph Lameter 17178bccd85fSChristoph Lameter if (err) 17188bccd85fSChristoph Lameter return err; 17198bccd85fSChristoph Lameter 17208bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 17218bccd85fSChristoph Lameter return -EFAULT; 17228bccd85fSChristoph Lameter 17238bccd85fSChristoph Lameter if (nmask) 17248bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 17258bccd85fSChristoph Lameter 17268bccd85fSChristoph Lameter return err; 17278bccd85fSChristoph Lameter } 17288bccd85fSChristoph Lameter 1729af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1730af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1731af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1732af03c4acSDominik Brodowski { 1733af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1734af03c4acSDominik Brodowski } 1735af03c4acSDominik Brodowski 173620ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 173720ca87f2SLi Xinhai { 173820ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 173920ca87f2SLi Xinhai return false; 174020ca87f2SLi Xinhai 174120ca87f2SLi Xinhai /* 174220ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 174320ca87f2SLi Xinhai * incurring periodic faults. 174420ca87f2SLi Xinhai */ 174520ca87f2SLi Xinhai if (vma_is_dax(vma)) 174620ca87f2SLi Xinhai return false; 174720ca87f2SLi Xinhai 174820ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 174920ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 175020ca87f2SLi Xinhai return false; 175120ca87f2SLi Xinhai 175220ca87f2SLi Xinhai /* 175320ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 175420ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 175520ca87f2SLi Xinhai * possible. 175620ca87f2SLi Xinhai */ 175720ca87f2SLi Xinhai if (vma->vm_file && 175820ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 175920ca87f2SLi Xinhai < policy_zone) 176020ca87f2SLi Xinhai return false; 176120ca87f2SLi Xinhai return true; 176220ca87f2SLi Xinhai } 176320ca87f2SLi Xinhai 176474d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 176574d2c3a0SOleg Nesterov unsigned long addr) 17661da177e4SLinus Torvalds { 17678d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17681da177e4SLinus Torvalds 17691da177e4SLinus Torvalds if (vma) { 1770480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17718d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 177200442ad0SMel Gorman } else if (vma->vm_policy) { 17731da177e4SLinus Torvalds pol = vma->vm_policy; 177400442ad0SMel Gorman 177500442ad0SMel Gorman /* 177600442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 177700442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 177800442ad0SMel Gorman * count on these policies which will be dropped by 177900442ad0SMel Gorman * mpol_cond_put() later 178000442ad0SMel Gorman */ 178100442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 178200442ad0SMel Gorman mpol_get(pol); 178300442ad0SMel Gorman } 17841da177e4SLinus Torvalds } 1785f15ca78eSOleg Nesterov 178674d2c3a0SOleg Nesterov return pol; 178774d2c3a0SOleg Nesterov } 178874d2c3a0SOleg Nesterov 178974d2c3a0SOleg Nesterov /* 1790dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 179174d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 179274d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 179374d2c3a0SOleg Nesterov * 179474d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1795dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 179674d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 179774d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 179874d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 179974d2c3a0SOleg Nesterov * extra reference for shared policies. 180074d2c3a0SOleg Nesterov */ 1801ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1802dd6eecb9SOleg Nesterov unsigned long addr) 180374d2c3a0SOleg Nesterov { 180474d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 180574d2c3a0SOleg Nesterov 18068d90274bSOleg Nesterov if (!pol) 1807dd6eecb9SOleg Nesterov pol = get_task_policy(current); 18088d90274bSOleg Nesterov 18091da177e4SLinus Torvalds return pol; 18101da177e4SLinus Torvalds } 18111da177e4SLinus Torvalds 18126b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1813fc314724SMel Gorman { 18146b6482bbSOleg Nesterov struct mempolicy *pol; 1815f15ca78eSOleg Nesterov 1816fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1817fc314724SMel Gorman bool ret = false; 1818fc314724SMel Gorman 1819fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1820fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1821fc314724SMel Gorman ret = true; 1822fc314724SMel Gorman mpol_cond_put(pol); 1823fc314724SMel Gorman 1824fc314724SMel Gorman return ret; 18258d90274bSOleg Nesterov } 18268d90274bSOleg Nesterov 1827fc314724SMel Gorman pol = vma->vm_policy; 18288d90274bSOleg Nesterov if (!pol) 18296b6482bbSOleg Nesterov pol = get_task_policy(current); 1830fc314724SMel Gorman 1831fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1832fc314724SMel Gorman } 1833fc314724SMel Gorman 1834d2226ebdSFeng Tang bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1835d3eb1570SLai Jiangshan { 1836d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1837d3eb1570SLai Jiangshan 1838d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1839d3eb1570SLai Jiangshan 1840d3eb1570SLai Jiangshan /* 1841269fbe72SBen Widawsky * if policy->nodes has movable memory only, 1842d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1843d3eb1570SLai Jiangshan * 1844269fbe72SBen Widawsky * policy->nodes is intersect with node_states[N_MEMORY]. 1845f0953a1bSIngo Molnar * so if the following test fails, it implies 1846269fbe72SBen Widawsky * policy->nodes has movable memory only. 1847d3eb1570SLai Jiangshan */ 1848269fbe72SBen Widawsky if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1849d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1850d3eb1570SLai Jiangshan 1851d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1852d3eb1570SLai Jiangshan } 1853d3eb1570SLai Jiangshan 185452cd3b07SLee Schermerhorn /* 185552cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 185652cd3b07SLee Schermerhorn * page allocation 185752cd3b07SLee Schermerhorn */ 18588ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 185919770b32SMel Gorman { 1860b27abaccSDave Hansen int mode = policy->mode; 1861b27abaccSDave Hansen 186219770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1863b27abaccSDave Hansen if (unlikely(mode == MPOL_BIND) && 1864d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 1865269fbe72SBen Widawsky cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1866269fbe72SBen Widawsky return &policy->nodes; 186719770b32SMel Gorman 1868b27abaccSDave Hansen if (mode == MPOL_PREFERRED_MANY) 1869b27abaccSDave Hansen return &policy->nodes; 1870b27abaccSDave Hansen 187119770b32SMel Gorman return NULL; 187219770b32SMel Gorman } 187319770b32SMel Gorman 1874b27abaccSDave Hansen /* 1875b27abaccSDave Hansen * Return the preferred node id for 'prefer' mempolicy, and return 1876b27abaccSDave Hansen * the given id for all other policies. 1877b27abaccSDave Hansen * 1878b27abaccSDave Hansen * policy_node() is always coupled with policy_nodemask(), which 1879b27abaccSDave Hansen * secures the nodemask limit for 'bind' and 'prefer-many' policy. 1880b27abaccSDave Hansen */ 1881f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 18821da177e4SLinus Torvalds { 18837858d7bcSFeng Tang if (policy->mode == MPOL_PREFERRED) { 1884269fbe72SBen Widawsky nd = first_node(policy->nodes); 18857858d7bcSFeng Tang } else { 188619770b32SMel Gorman /* 18876d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18886d840958SMichal Hocko * because we might easily break the expectation to stay on the 18896d840958SMichal Hocko * requested node and not break the policy. 189019770b32SMel Gorman */ 18916d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 18921da177e4SLinus Torvalds } 18936d840958SMichal Hocko 1894c6018b4bSAneesh Kumar K.V if ((policy->mode == MPOL_BIND || 1895c6018b4bSAneesh Kumar K.V policy->mode == MPOL_PREFERRED_MANY) && 1896c6018b4bSAneesh Kumar K.V policy->home_node != NUMA_NO_NODE) 1897c6018b4bSAneesh Kumar K.V return policy->home_node; 1898c6018b4bSAneesh Kumar K.V 189904ec6264SVlastimil Babka return nd; 19001da177e4SLinus Torvalds } 19011da177e4SLinus Torvalds 19021da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 19031da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 19041da177e4SLinus Torvalds { 190545816682SVlastimil Babka unsigned next; 19061da177e4SLinus Torvalds struct task_struct *me = current; 19071da177e4SLinus Torvalds 1908269fbe72SBen Widawsky next = next_node_in(me->il_prev, policy->nodes); 1909f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 191045816682SVlastimil Babka me->il_prev = next; 191145816682SVlastimil Babka return next; 19121da177e4SLinus Torvalds } 19131da177e4SLinus Torvalds 1914dc85da15SChristoph Lameter /* 1915dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1916dc85da15SChristoph Lameter * next slab entry. 1917dc85da15SChristoph Lameter */ 19182a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1919dc85da15SChristoph Lameter { 1920e7b691b0SAndi Kleen struct mempolicy *policy; 19212a389610SDavid Rientjes int node = numa_mem_id(); 1922e7b691b0SAndi Kleen 192338b031ddSVasily Averin if (!in_task()) 19242a389610SDavid Rientjes return node; 1925e7b691b0SAndi Kleen 1926e7b691b0SAndi Kleen policy = current->mempolicy; 19277858d7bcSFeng Tang if (!policy) 19282a389610SDavid Rientjes return node; 1929765c4507SChristoph Lameter 1930bea904d5SLee Schermerhorn switch (policy->mode) { 1931bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1932269fbe72SBen Widawsky return first_node(policy->nodes); 1933bea904d5SLee Schermerhorn 1934dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1935dc85da15SChristoph Lameter return interleave_nodes(policy); 1936dc85da15SChristoph Lameter 1937b27abaccSDave Hansen case MPOL_BIND: 1938b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 1939b27abaccSDave Hansen { 1940c33d6c06SMel Gorman struct zoneref *z; 1941c33d6c06SMel Gorman 1942dc85da15SChristoph Lameter /* 1943dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1944dc85da15SChristoph Lameter * first node. 1945dc85da15SChristoph Lameter */ 194619770b32SMel Gorman struct zonelist *zonelist; 194719770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1948c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1949c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1950269fbe72SBen Widawsky &policy->nodes); 1951c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1952dd1a239fSMel Gorman } 19537858d7bcSFeng Tang case MPOL_LOCAL: 19547858d7bcSFeng Tang return node; 1955dc85da15SChristoph Lameter 1956dc85da15SChristoph Lameter default: 1957bea904d5SLee Schermerhorn BUG(); 1958dc85da15SChristoph Lameter } 1959dc85da15SChristoph Lameter } 1960dc85da15SChristoph Lameter 1961fee83b3aSAndrew Morton /* 1962fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1963269fbe72SBen Widawsky * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1964fee83b3aSAndrew Morton * number of present nodes. 1965fee83b3aSAndrew Morton */ 196698c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19671da177e4SLinus Torvalds { 1968276aeee1Syanghui nodemask_t nodemask = pol->nodes; 1969276aeee1Syanghui unsigned int target, nnodes; 1970fee83b3aSAndrew Morton int i; 1971fee83b3aSAndrew Morton int nid; 1972276aeee1Syanghui /* 1973276aeee1Syanghui * The barrier will stabilize the nodemask in a register or on 1974276aeee1Syanghui * the stack so that it will stop changing under the code. 1975276aeee1Syanghui * 1976276aeee1Syanghui * Between first_node() and next_node(), pol->nodes could be changed 1977276aeee1Syanghui * by other threads. So we put pol->nodes in a local stack. 1978276aeee1Syanghui */ 1979276aeee1Syanghui barrier(); 19801da177e4SLinus Torvalds 1981276aeee1Syanghui nnodes = nodes_weight(nodemask); 1982f5b087b5SDavid Rientjes if (!nnodes) 1983f5b087b5SDavid Rientjes return numa_node_id(); 1984fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1985276aeee1Syanghui nid = first_node(nodemask); 1986fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1987276aeee1Syanghui nid = next_node(nid, nodemask); 19881da177e4SLinus Torvalds return nid; 19891da177e4SLinus Torvalds } 19901da177e4SLinus Torvalds 19915da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19925da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19935da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19945da7ca86SChristoph Lameter { 19955da7ca86SChristoph Lameter if (vma) { 19965da7ca86SChristoph Lameter unsigned long off; 19975da7ca86SChristoph Lameter 19983b98b087SNishanth Aravamudan /* 19993b98b087SNishanth Aravamudan * for small pages, there is no difference between 20003b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 20013b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 20023b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 20033b98b087SNishanth Aravamudan * a useful offset. 20043b98b087SNishanth Aravamudan */ 20053b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 20063b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 20075da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 200898c70baaSLaurent Dufour return offset_il_node(pol, off); 20095da7ca86SChristoph Lameter } else 20105da7ca86SChristoph Lameter return interleave_nodes(pol); 20115da7ca86SChristoph Lameter } 20125da7ca86SChristoph Lameter 201300ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 2014480eccf9SLee Schermerhorn /* 201504ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 2016b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 2017b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 2018b46e14acSFabian Frederick * @gfp_flags: for requested zone 2019b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2020b27abaccSDave Hansen * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 2021480eccf9SLee Schermerhorn * 202204ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 202352cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 2024b27abaccSDave Hansen * If the effective policy is 'bind' or 'prefer-many', returns a pointer 2025b27abaccSDave Hansen * to the mempolicy's @nodemask for filtering the zonelist. 2026c0ff7453SMiao Xie * 2027d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2028480eccf9SLee Schermerhorn */ 202904ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 203004ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20315da7ca86SChristoph Lameter { 203204ec6264SVlastimil Babka int nid; 2033b27abaccSDave Hansen int mode; 20345da7ca86SChristoph Lameter 2035dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 2036b27abaccSDave Hansen *nodemask = NULL; 2037b27abaccSDave Hansen mode = (*mpol)->mode; 20385da7ca86SChristoph Lameter 2039b27abaccSDave Hansen if (unlikely(mode == MPOL_INTERLEAVE)) { 204004ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 204104ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 204252cd3b07SLee Schermerhorn } else { 204304ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 2044b27abaccSDave Hansen if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) 2045269fbe72SBen Widawsky *nodemask = &(*mpol)->nodes; 2046480eccf9SLee Schermerhorn } 204704ec6264SVlastimil Babka return nid; 20485da7ca86SChristoph Lameter } 204906808b08SLee Schermerhorn 205006808b08SLee Schermerhorn /* 205106808b08SLee Schermerhorn * init_nodemask_of_mempolicy 205206808b08SLee Schermerhorn * 205306808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 205406808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 205506808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 205606808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 205706808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 205806808b08SLee Schermerhorn * of non-default mempolicy. 205906808b08SLee Schermerhorn * 206006808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 206106808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 206206808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 206306808b08SLee Schermerhorn * 206406808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 206506808b08SLee Schermerhorn */ 206606808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 206706808b08SLee Schermerhorn { 206806808b08SLee Schermerhorn struct mempolicy *mempolicy; 206906808b08SLee Schermerhorn 207006808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 207106808b08SLee Schermerhorn return false; 207206808b08SLee Schermerhorn 2073c0ff7453SMiao Xie task_lock(current); 207406808b08SLee Schermerhorn mempolicy = current->mempolicy; 207506808b08SLee Schermerhorn switch (mempolicy->mode) { 207606808b08SLee Schermerhorn case MPOL_PREFERRED: 2077b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 207806808b08SLee Schermerhorn case MPOL_BIND: 207906808b08SLee Schermerhorn case MPOL_INTERLEAVE: 2080269fbe72SBen Widawsky *mask = mempolicy->nodes; 208106808b08SLee Schermerhorn break; 208206808b08SLee Schermerhorn 20837858d7bcSFeng Tang case MPOL_LOCAL: 2084269fbe72SBen Widawsky init_nodemask_of_node(mask, numa_node_id()); 20857858d7bcSFeng Tang break; 20867858d7bcSFeng Tang 208706808b08SLee Schermerhorn default: 208806808b08SLee Schermerhorn BUG(); 208906808b08SLee Schermerhorn } 2090c0ff7453SMiao Xie task_unlock(current); 209106808b08SLee Schermerhorn 209206808b08SLee Schermerhorn return true; 209306808b08SLee Schermerhorn } 209400ac59adSChen, Kenneth W #endif 20955da7ca86SChristoph Lameter 20966f48d0ebSDavid Rientjes /* 2097b26e517aSFeng Tang * mempolicy_in_oom_domain 20986f48d0ebSDavid Rientjes * 2099b26e517aSFeng Tang * If tsk's mempolicy is "bind", check for intersection between mask and 2100b26e517aSFeng Tang * the policy nodemask. Otherwise, return true for all other policies 2101b26e517aSFeng Tang * including "interleave", as a tsk with "interleave" policy may have 2102b26e517aSFeng Tang * memory allocated from all nodes in system. 21036f48d0ebSDavid Rientjes * 21046f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 21056f48d0ebSDavid Rientjes */ 2106b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk, 21076f48d0ebSDavid Rientjes const nodemask_t *mask) 21086f48d0ebSDavid Rientjes { 21096f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 21106f48d0ebSDavid Rientjes bool ret = true; 21116f48d0ebSDavid Rientjes 21126f48d0ebSDavid Rientjes if (!mask) 21136f48d0ebSDavid Rientjes return ret; 2114b26e517aSFeng Tang 21156f48d0ebSDavid Rientjes task_lock(tsk); 21166f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 2117b26e517aSFeng Tang if (mempolicy && mempolicy->mode == MPOL_BIND) 2118269fbe72SBen Widawsky ret = nodes_intersects(mempolicy->nodes, *mask); 21196f48d0ebSDavid Rientjes task_unlock(tsk); 2120b26e517aSFeng Tang 21216f48d0ebSDavid Rientjes return ret; 21226f48d0ebSDavid Rientjes } 21236f48d0ebSDavid Rientjes 21241da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 21251da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2126662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2127662f3a0bSAndi Kleen unsigned nid) 21281da177e4SLinus Torvalds { 21291da177e4SLinus Torvalds struct page *page; 21301da177e4SLinus Torvalds 213184172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, nid, NULL); 21324518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21334518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21344518085eSKemi Wang return page; 2135de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2136de55c8b2SAndrey Ryabinin preempt_disable(); 2137f19298b9SMel Gorman __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2138de55c8b2SAndrey Ryabinin preempt_enable(); 2139de55c8b2SAndrey Ryabinin } 21401da177e4SLinus Torvalds return page; 21411da177e4SLinus Torvalds } 21421da177e4SLinus Torvalds 21434c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 21444c54d949SFeng Tang int nid, struct mempolicy *pol) 21454c54d949SFeng Tang { 21464c54d949SFeng Tang struct page *page; 21474c54d949SFeng Tang gfp_t preferred_gfp; 21484c54d949SFeng Tang 21494c54d949SFeng Tang /* 21504c54d949SFeng Tang * This is a two pass approach. The first pass will only try the 21514c54d949SFeng Tang * preferred nodes but skip the direct reclaim and allow the 21524c54d949SFeng Tang * allocation to fail, while the second pass will try all the 21534c54d949SFeng Tang * nodes in system. 21544c54d949SFeng Tang */ 21554c54d949SFeng Tang preferred_gfp = gfp | __GFP_NOWARN; 21564c54d949SFeng Tang preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 21574c54d949SFeng Tang page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); 21584c54d949SFeng Tang if (!page) 2159c0455116SAneesh Kumar K.V page = __alloc_pages(gfp, order, nid, NULL); 21604c54d949SFeng Tang 21614c54d949SFeng Tang return page; 21624c54d949SFeng Tang } 21634c54d949SFeng Tang 21641da177e4SLinus Torvalds /** 2165adf88aa8SMatthew Wilcox (Oracle) * vma_alloc_folio - Allocate a folio for a VMA. 2166eb350739SMatthew Wilcox (Oracle) * @gfp: GFP flags. 2167adf88aa8SMatthew Wilcox (Oracle) * @order: Order of the folio. 21681da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 2169eb350739SMatthew Wilcox (Oracle) * @addr: Virtual address of the allocation. Must be inside @vma. 2170eb350739SMatthew Wilcox (Oracle) * @hugepage: For hugepages try only the preferred node if possible. 21711da177e4SLinus Torvalds * 2172adf88aa8SMatthew Wilcox (Oracle) * Allocate a folio for a specific address in @vma, using the appropriate 2173eb350739SMatthew Wilcox (Oracle) * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2174eb350739SMatthew Wilcox (Oracle) * of the mm_struct of the VMA to prevent it from going away. Should be 2175adf88aa8SMatthew Wilcox (Oracle) * used for all allocations for folios that will be mapped into user space. 2176eb350739SMatthew Wilcox (Oracle) * 2177adf88aa8SMatthew Wilcox (Oracle) * Return: The folio on success or NULL if allocation fails. 21781da177e4SLinus Torvalds */ 2179adf88aa8SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, 2180be1a13ebSMichal Hocko unsigned long addr, bool hugepage) 21811da177e4SLinus Torvalds { 2182cc9a6c87SMel Gorman struct mempolicy *pol; 2183be1a13ebSMichal Hocko int node = numa_node_id(); 2184adf88aa8SMatthew Wilcox (Oracle) struct folio *folio; 218504ec6264SVlastimil Babka int preferred_nid; 2186be97a41bSVlastimil Babka nodemask_t *nmask; 21871da177e4SLinus Torvalds 2188dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2189cc9a6c87SMel Gorman 2190be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 2191adf88aa8SMatthew Wilcox (Oracle) struct page *page; 21921da177e4SLinus Torvalds unsigned nid; 21935da7ca86SChristoph Lameter 21948eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 219552cd3b07SLee Schermerhorn mpol_cond_put(pol); 2196adf88aa8SMatthew Wilcox (Oracle) gfp |= __GFP_COMP; 21970bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2198adf88aa8SMatthew Wilcox (Oracle) if (page && order > 1) 2199adf88aa8SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2200adf88aa8SMatthew Wilcox (Oracle) folio = (struct folio *)page; 2201be97a41bSVlastimil Babka goto out; 22021da177e4SLinus Torvalds } 22031da177e4SLinus Torvalds 22044c54d949SFeng Tang if (pol->mode == MPOL_PREFERRED_MANY) { 2205adf88aa8SMatthew Wilcox (Oracle) struct page *page; 2206adf88aa8SMatthew Wilcox (Oracle) 2207c0455116SAneesh Kumar K.V node = policy_node(gfp, pol, node); 2208adf88aa8SMatthew Wilcox (Oracle) gfp |= __GFP_COMP; 22094c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, node, pol); 22104c54d949SFeng Tang mpol_cond_put(pol); 2211adf88aa8SMatthew Wilcox (Oracle) if (page && order > 1) 2212adf88aa8SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2213adf88aa8SMatthew Wilcox (Oracle) folio = (struct folio *)page; 22144c54d949SFeng Tang goto out; 22154c54d949SFeng Tang } 22164c54d949SFeng Tang 221719deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 221819deb769SDavid Rientjes int hpage_node = node; 221919deb769SDavid Rientjes 222019deb769SDavid Rientjes /* 222119deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 222219deb769SDavid Rientjes * allows the current node (or other explicitly preferred 222319deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 222419deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 222519deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 222619deb769SDavid Rientjes * 2227b27abaccSDave Hansen * If the policy is interleave or does not allow the current 222819deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 222919deb769SDavid Rientjes */ 22307858d7bcSFeng Tang if (pol->mode == MPOL_PREFERRED) 2231269fbe72SBen Widawsky hpage_node = first_node(pol->nodes); 223219deb769SDavid Rientjes 223319deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 223419deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 223519deb769SDavid Rientjes mpol_cond_put(pol); 2236cc638f32SVlastimil Babka /* 2237cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2238cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2239cc638f32SVlastimil Babka */ 2240adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc_node(gfp | __GFP_THISNODE | 2241adf88aa8SMatthew Wilcox (Oracle) __GFP_NORETRY, order, hpage_node); 224276e654ccSDavid Rientjes 224376e654ccSDavid Rientjes /* 224476e654ccSDavid Rientjes * If hugepage allocations are configured to always 224576e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 224676e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2247cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 224876e654ccSDavid Rientjes */ 2249adf88aa8SMatthew Wilcox (Oracle) if (!folio && (gfp & __GFP_DIRECT_RECLAIM)) 2250adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, order, hpage_node, 2251adf88aa8SMatthew Wilcox (Oracle) nmask); 225276e654ccSDavid Rientjes 225319deb769SDavid Rientjes goto out; 225419deb769SDavid Rientjes } 225519deb769SDavid Rientjes } 225619deb769SDavid Rientjes 2257077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 225804ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 2259adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, order, preferred_nid, nmask); 2260d51e9894SVlastimil Babka mpol_cond_put(pol); 2261be97a41bSVlastimil Babka out: 2262f584b680SMatthew Wilcox (Oracle) return folio; 2263f584b680SMatthew Wilcox (Oracle) } 2264adf88aa8SMatthew Wilcox (Oracle) EXPORT_SYMBOL(vma_alloc_folio); 2265f584b680SMatthew Wilcox (Oracle) 22661da177e4SLinus Torvalds /** 2267d7f946d0SMatthew Wilcox (Oracle) * alloc_pages - Allocate pages. 22686421ec76SMatthew Wilcox (Oracle) * @gfp: GFP flags. 22696421ec76SMatthew Wilcox (Oracle) * @order: Power of two of number of pages to allocate. 22701da177e4SLinus Torvalds * 22716421ec76SMatthew Wilcox (Oracle) * Allocate 1 << @order contiguous pages. The physical address of the 22726421ec76SMatthew Wilcox (Oracle) * first page is naturally aligned (eg an order-3 allocation will be aligned 22736421ec76SMatthew Wilcox (Oracle) * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 22746421ec76SMatthew Wilcox (Oracle) * process is honoured when in process context. 22751da177e4SLinus Torvalds * 22766421ec76SMatthew Wilcox (Oracle) * Context: Can be called from any context, providing the appropriate GFP 22776421ec76SMatthew Wilcox (Oracle) * flags are used. 22786421ec76SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 22791da177e4SLinus Torvalds */ 2280d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order) 22811da177e4SLinus Torvalds { 22828d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2283c0ff7453SMiao Xie struct page *page; 22841da177e4SLinus Torvalds 22858d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22868d90274bSOleg Nesterov pol = get_task_policy(current); 228752cd3b07SLee Schermerhorn 228852cd3b07SLee Schermerhorn /* 228952cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 229052cd3b07SLee Schermerhorn * nor system default_policy 229152cd3b07SLee Schermerhorn */ 229245c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2293c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 22944c54d949SFeng Tang else if (pol->mode == MPOL_PREFERRED_MANY) 22954c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, 2296c0455116SAneesh Kumar K.V policy_node(gfp, pol, numa_node_id()), pol); 2297c0ff7453SMiao Xie else 229884172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, 229904ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 23005c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2301cc9a6c87SMel Gorman 2302c0ff7453SMiao Xie return page; 23031da177e4SLinus Torvalds } 2304d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages); 23051da177e4SLinus Torvalds 2306cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order) 2307cc09cb13SMatthew Wilcox (Oracle) { 2308cc09cb13SMatthew Wilcox (Oracle) struct page *page = alloc_pages(gfp | __GFP_COMP, order); 2309cc09cb13SMatthew Wilcox (Oracle) 2310cc09cb13SMatthew Wilcox (Oracle) if (page && order > 1) 2311cc09cb13SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2312cc09cb13SMatthew Wilcox (Oracle) return (struct folio *)page; 2313cc09cb13SMatthew Wilcox (Oracle) } 2314cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc); 2315cc09cb13SMatthew Wilcox (Oracle) 2316c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, 2317c00b6b96SChen Wandun struct mempolicy *pol, unsigned long nr_pages, 2318c00b6b96SChen Wandun struct page **page_array) 2319c00b6b96SChen Wandun { 2320c00b6b96SChen Wandun int nodes; 2321c00b6b96SChen Wandun unsigned long nr_pages_per_node; 2322c00b6b96SChen Wandun int delta; 2323c00b6b96SChen Wandun int i; 2324c00b6b96SChen Wandun unsigned long nr_allocated; 2325c00b6b96SChen Wandun unsigned long total_allocated = 0; 2326c00b6b96SChen Wandun 2327c00b6b96SChen Wandun nodes = nodes_weight(pol->nodes); 2328c00b6b96SChen Wandun nr_pages_per_node = nr_pages / nodes; 2329c00b6b96SChen Wandun delta = nr_pages - nodes * nr_pages_per_node; 2330c00b6b96SChen Wandun 2331c00b6b96SChen Wandun for (i = 0; i < nodes; i++) { 2332c00b6b96SChen Wandun if (delta) { 2333c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(gfp, 2334c00b6b96SChen Wandun interleave_nodes(pol), NULL, 2335c00b6b96SChen Wandun nr_pages_per_node + 1, NULL, 2336c00b6b96SChen Wandun page_array); 2337c00b6b96SChen Wandun delta--; 2338c00b6b96SChen Wandun } else { 2339c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(gfp, 2340c00b6b96SChen Wandun interleave_nodes(pol), NULL, 2341c00b6b96SChen Wandun nr_pages_per_node, NULL, page_array); 2342c00b6b96SChen Wandun } 2343c00b6b96SChen Wandun 2344c00b6b96SChen Wandun page_array += nr_allocated; 2345c00b6b96SChen Wandun total_allocated += nr_allocated; 2346c00b6b96SChen Wandun } 2347c00b6b96SChen Wandun 2348c00b6b96SChen Wandun return total_allocated; 2349c00b6b96SChen Wandun } 2350c00b6b96SChen Wandun 2351c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, 2352c00b6b96SChen Wandun struct mempolicy *pol, unsigned long nr_pages, 2353c00b6b96SChen Wandun struct page **page_array) 2354c00b6b96SChen Wandun { 2355c00b6b96SChen Wandun gfp_t preferred_gfp; 2356c00b6b96SChen Wandun unsigned long nr_allocated = 0; 2357c00b6b96SChen Wandun 2358c00b6b96SChen Wandun preferred_gfp = gfp | __GFP_NOWARN; 2359c00b6b96SChen Wandun preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2360c00b6b96SChen Wandun 2361c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, 2362c00b6b96SChen Wandun nr_pages, NULL, page_array); 2363c00b6b96SChen Wandun 2364c00b6b96SChen Wandun if (nr_allocated < nr_pages) 2365c00b6b96SChen Wandun nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, 2366c00b6b96SChen Wandun nr_pages - nr_allocated, NULL, 2367c00b6b96SChen Wandun page_array + nr_allocated); 2368c00b6b96SChen Wandun return nr_allocated; 2369c00b6b96SChen Wandun } 2370c00b6b96SChen Wandun 2371c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the 2372c00b6b96SChen Wandun * same time in some situation such as vmalloc. 2373c00b6b96SChen Wandun * 2374c00b6b96SChen Wandun * It can accelerate memory allocation especially interleaving 2375c00b6b96SChen Wandun * allocate memory. 2376c00b6b96SChen Wandun */ 2377c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, 2378c00b6b96SChen Wandun unsigned long nr_pages, struct page **page_array) 2379c00b6b96SChen Wandun { 2380c00b6b96SChen Wandun struct mempolicy *pol = &default_policy; 2381c00b6b96SChen Wandun 2382c00b6b96SChen Wandun if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2383c00b6b96SChen Wandun pol = get_task_policy(current); 2384c00b6b96SChen Wandun 2385c00b6b96SChen Wandun if (pol->mode == MPOL_INTERLEAVE) 2386c00b6b96SChen Wandun return alloc_pages_bulk_array_interleave(gfp, pol, 2387c00b6b96SChen Wandun nr_pages, page_array); 2388c00b6b96SChen Wandun 2389c00b6b96SChen Wandun if (pol->mode == MPOL_PREFERRED_MANY) 2390c00b6b96SChen Wandun return alloc_pages_bulk_array_preferred_many(gfp, 2391c00b6b96SChen Wandun numa_node_id(), pol, nr_pages, page_array); 2392c00b6b96SChen Wandun 2393c00b6b96SChen Wandun return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()), 2394c00b6b96SChen Wandun policy_nodemask(gfp, pol), nr_pages, NULL, 2395c00b6b96SChen Wandun page_array); 2396c00b6b96SChen Wandun } 2397c00b6b96SChen Wandun 2398ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2399ef0855d3SOleg Nesterov { 2400ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2401ef0855d3SOleg Nesterov 2402ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2403ef0855d3SOleg Nesterov return PTR_ERR(pol); 2404ef0855d3SOleg Nesterov dst->vm_policy = pol; 2405ef0855d3SOleg Nesterov return 0; 2406ef0855d3SOleg Nesterov } 2407ef0855d3SOleg Nesterov 24084225399aSPaul Jackson /* 2409846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 24104225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 24114225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 24124225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 24134225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2414708c1bbcSMiao Xie * 2415708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2416708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 24174225399aSPaul Jackson */ 24184225399aSPaul Jackson 2419846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2420846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 24211da177e4SLinus Torvalds { 24221da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 24231da177e4SLinus Torvalds 24241da177e4SLinus Torvalds if (!new) 24251da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2426708c1bbcSMiao Xie 2427708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2428708c1bbcSMiao Xie if (old == current->mempolicy) { 2429708c1bbcSMiao Xie task_lock(current); 2430708c1bbcSMiao Xie *new = *old; 2431708c1bbcSMiao Xie task_unlock(current); 2432708c1bbcSMiao Xie } else 2433708c1bbcSMiao Xie *new = *old; 2434708c1bbcSMiao Xie 24354225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 24364225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2437213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 24384225399aSPaul Jackson } 24391da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 24401da177e4SLinus Torvalds return new; 24411da177e4SLinus Torvalds } 24421da177e4SLinus Torvalds 24431da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2444fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 24451da177e4SLinus Torvalds { 24461da177e4SLinus Torvalds if (!a || !b) 2447fcfb4dccSKOSAKI Motohiro return false; 244845c4745aSLee Schermerhorn if (a->mode != b->mode) 2449fcfb4dccSKOSAKI Motohiro return false; 245019800502SBob Liu if (a->flags != b->flags) 2451fcfb4dccSKOSAKI Motohiro return false; 2452c6018b4bSAneesh Kumar K.V if (a->home_node != b->home_node) 2453c6018b4bSAneesh Kumar K.V return false; 245419800502SBob Liu if (mpol_store_user_nodemask(a)) 245519800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2456fcfb4dccSKOSAKI Motohiro return false; 245719800502SBob Liu 245845c4745aSLee Schermerhorn switch (a->mode) { 245919770b32SMel Gorman case MPOL_BIND: 24601da177e4SLinus Torvalds case MPOL_INTERLEAVE: 24611da177e4SLinus Torvalds case MPOL_PREFERRED: 2462b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2463269fbe72SBen Widawsky return !!nodes_equal(a->nodes, b->nodes); 24647858d7bcSFeng Tang case MPOL_LOCAL: 24657858d7bcSFeng Tang return true; 24661da177e4SLinus Torvalds default: 24671da177e4SLinus Torvalds BUG(); 2468fcfb4dccSKOSAKI Motohiro return false; 24691da177e4SLinus Torvalds } 24701da177e4SLinus Torvalds } 24711da177e4SLinus Torvalds 24721da177e4SLinus Torvalds /* 24731da177e4SLinus Torvalds * Shared memory backing store policy support. 24741da177e4SLinus Torvalds * 24751da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 24761da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 24774a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 24781da177e4SLinus Torvalds * for any accesses to the tree. 24791da177e4SLinus Torvalds */ 24801da177e4SLinus Torvalds 24814a8c7bb5SNathan Zimmer /* 24824a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 24834a8c7bb5SNathan Zimmer * reading or for writing 24844a8c7bb5SNathan Zimmer */ 24851da177e4SLinus Torvalds static struct sp_node * 24861da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 24871da177e4SLinus Torvalds { 24881da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 24891da177e4SLinus Torvalds 24901da177e4SLinus Torvalds while (n) { 24911da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 24921da177e4SLinus Torvalds 24931da177e4SLinus Torvalds if (start >= p->end) 24941da177e4SLinus Torvalds n = n->rb_right; 24951da177e4SLinus Torvalds else if (end <= p->start) 24961da177e4SLinus Torvalds n = n->rb_left; 24971da177e4SLinus Torvalds else 24981da177e4SLinus Torvalds break; 24991da177e4SLinus Torvalds } 25001da177e4SLinus Torvalds if (!n) 25011da177e4SLinus Torvalds return NULL; 25021da177e4SLinus Torvalds for (;;) { 25031da177e4SLinus Torvalds struct sp_node *w = NULL; 25041da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 25051da177e4SLinus Torvalds if (!prev) 25061da177e4SLinus Torvalds break; 25071da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 25081da177e4SLinus Torvalds if (w->end <= start) 25091da177e4SLinus Torvalds break; 25101da177e4SLinus Torvalds n = prev; 25111da177e4SLinus Torvalds } 25121da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 25131da177e4SLinus Torvalds } 25141da177e4SLinus Torvalds 25154a8c7bb5SNathan Zimmer /* 25164a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 25174a8c7bb5SNathan Zimmer * writing. 25184a8c7bb5SNathan Zimmer */ 25191da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 25201da177e4SLinus Torvalds { 25211da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 25221da177e4SLinus Torvalds struct rb_node *parent = NULL; 25231da177e4SLinus Torvalds struct sp_node *nd; 25241da177e4SLinus Torvalds 25251da177e4SLinus Torvalds while (*p) { 25261da177e4SLinus Torvalds parent = *p; 25271da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 25281da177e4SLinus Torvalds if (new->start < nd->start) 25291da177e4SLinus Torvalds p = &(*p)->rb_left; 25301da177e4SLinus Torvalds else if (new->end > nd->end) 25311da177e4SLinus Torvalds p = &(*p)->rb_right; 25321da177e4SLinus Torvalds else 25331da177e4SLinus Torvalds BUG(); 25341da177e4SLinus Torvalds } 25351da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 25361da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2537140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 253845c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 25391da177e4SLinus Torvalds } 25401da177e4SLinus Torvalds 25411da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 25421da177e4SLinus Torvalds struct mempolicy * 25431da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 25441da177e4SLinus Torvalds { 25451da177e4SLinus Torvalds struct mempolicy *pol = NULL; 25461da177e4SLinus Torvalds struct sp_node *sn; 25471da177e4SLinus Torvalds 25481da177e4SLinus Torvalds if (!sp->root.rb_node) 25491da177e4SLinus Torvalds return NULL; 25504a8c7bb5SNathan Zimmer read_lock(&sp->lock); 25511da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 25521da177e4SLinus Torvalds if (sn) { 25531da177e4SLinus Torvalds mpol_get(sn->policy); 25541da177e4SLinus Torvalds pol = sn->policy; 25551da177e4SLinus Torvalds } 25564a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 25571da177e4SLinus Torvalds return pol; 25581da177e4SLinus Torvalds } 25591da177e4SLinus Torvalds 256063f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 256163f74ca2SKOSAKI Motohiro { 256263f74ca2SKOSAKI Motohiro mpol_put(n->policy); 256363f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 256463f74ca2SKOSAKI Motohiro } 256563f74ca2SKOSAKI Motohiro 2566771fb4d8SLee Schermerhorn /** 2567771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2568771fb4d8SLee Schermerhorn * 2569b46e14acSFabian Frederick * @page: page to be checked 2570b46e14acSFabian Frederick * @vma: vm area where page mapped 2571b46e14acSFabian Frederick * @addr: virtual address where page mapped 2572771fb4d8SLee Schermerhorn * 2573771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 25745f076944SMatthew Wilcox (Oracle) * node id. Policy determination "mimics" alloc_page_vma(). 2575771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 25765f076944SMatthew Wilcox (Oracle) * 2577062db293SBaolin Wang * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2578062db293SBaolin Wang * policy, or a suitable node ID to allocate a replacement page from. 2579771fb4d8SLee Schermerhorn */ 2580771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2581771fb4d8SLee Schermerhorn { 2582771fb4d8SLee Schermerhorn struct mempolicy *pol; 2583c33d6c06SMel Gorman struct zoneref *z; 2584771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2585771fb4d8SLee Schermerhorn unsigned long pgoff; 258690572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 258790572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 258898fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2589062db293SBaolin Wang int ret = NUMA_NO_NODE; 2590771fb4d8SLee Schermerhorn 2591dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2592771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2593771fb4d8SLee Schermerhorn goto out; 2594771fb4d8SLee Schermerhorn 2595771fb4d8SLee Schermerhorn switch (pol->mode) { 2596771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2597771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2598771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 259998c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2600771fb4d8SLee Schermerhorn break; 2601771fb4d8SLee Schermerhorn 2602771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2603b27abaccSDave Hansen if (node_isset(curnid, pol->nodes)) 2604b27abaccSDave Hansen goto out; 2605269fbe72SBen Widawsky polnid = first_node(pol->nodes); 2606771fb4d8SLee Schermerhorn break; 2607771fb4d8SLee Schermerhorn 26087858d7bcSFeng Tang case MPOL_LOCAL: 26097858d7bcSFeng Tang polnid = numa_node_id(); 26107858d7bcSFeng Tang break; 26117858d7bcSFeng Tang 2612771fb4d8SLee Schermerhorn case MPOL_BIND: 2613bda420b9SHuang Ying /* Optimize placement among multiple nodes via NUMA balancing */ 2614bda420b9SHuang Ying if (pol->flags & MPOL_F_MORON) { 2615269fbe72SBen Widawsky if (node_isset(thisnid, pol->nodes)) 2616bda420b9SHuang Ying break; 2617bda420b9SHuang Ying goto out; 2618bda420b9SHuang Ying } 2619b27abaccSDave Hansen fallthrough; 2620c33d6c06SMel Gorman 2621b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2622771fb4d8SLee Schermerhorn /* 2623771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2624771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2625771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2626771fb4d8SLee Schermerhorn */ 2627269fbe72SBen Widawsky if (node_isset(curnid, pol->nodes)) 2628771fb4d8SLee Schermerhorn goto out; 2629c33d6c06SMel Gorman z = first_zones_zonelist( 2630771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2631771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2632269fbe72SBen Widawsky &pol->nodes); 2633c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2634771fb4d8SLee Schermerhorn break; 2635771fb4d8SLee Schermerhorn 2636771fb4d8SLee Schermerhorn default: 2637771fb4d8SLee Schermerhorn BUG(); 2638771fb4d8SLee Schermerhorn } 26395606e387SMel Gorman 26405606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2641e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 264290572890SPeter Zijlstra polnid = thisnid; 26435606e387SMel Gorman 264410f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2645de1c9ce6SRik van Riel goto out; 2646de1c9ce6SRik van Riel } 2647e42c8ff2SMel Gorman 2648771fb4d8SLee Schermerhorn if (curnid != polnid) 2649771fb4d8SLee Schermerhorn ret = polnid; 2650771fb4d8SLee Schermerhorn out: 2651771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2652771fb4d8SLee Schermerhorn 2653771fb4d8SLee Schermerhorn return ret; 2654771fb4d8SLee Schermerhorn } 2655771fb4d8SLee Schermerhorn 2656c11600e4SDavid Rientjes /* 2657c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2658c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2659c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2660c11600e4SDavid Rientjes * policy. 2661c11600e4SDavid Rientjes */ 2662c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2663c11600e4SDavid Rientjes { 2664c11600e4SDavid Rientjes struct mempolicy *pol; 2665c11600e4SDavid Rientjes 2666c11600e4SDavid Rientjes task_lock(task); 2667c11600e4SDavid Rientjes pol = task->mempolicy; 2668c11600e4SDavid Rientjes task->mempolicy = NULL; 2669c11600e4SDavid Rientjes task_unlock(task); 2670c11600e4SDavid Rientjes mpol_put(pol); 2671c11600e4SDavid Rientjes } 2672c11600e4SDavid Rientjes 26731da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 26741da177e4SLinus Torvalds { 2675140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 26761da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 267763f74ca2SKOSAKI Motohiro sp_free(n); 26781da177e4SLinus Torvalds } 26791da177e4SLinus Torvalds 268042288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 268142288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 268242288fe3SMel Gorman { 268342288fe3SMel Gorman node->start = start; 268442288fe3SMel Gorman node->end = end; 268542288fe3SMel Gorman node->policy = pol; 268642288fe3SMel Gorman } 268742288fe3SMel Gorman 2688dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2689dbcb0f19SAdrian Bunk struct mempolicy *pol) 26901da177e4SLinus Torvalds { 2691869833f2SKOSAKI Motohiro struct sp_node *n; 2692869833f2SKOSAKI Motohiro struct mempolicy *newpol; 26931da177e4SLinus Torvalds 2694869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 26951da177e4SLinus Torvalds if (!n) 26961da177e4SLinus Torvalds return NULL; 2697869833f2SKOSAKI Motohiro 2698869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2699869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2700869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2701869833f2SKOSAKI Motohiro return NULL; 2702869833f2SKOSAKI Motohiro } 2703869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 270442288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2705869833f2SKOSAKI Motohiro 27061da177e4SLinus Torvalds return n; 27071da177e4SLinus Torvalds } 27081da177e4SLinus Torvalds 27091da177e4SLinus Torvalds /* Replace a policy range. */ 27101da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 27111da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 27121da177e4SLinus Torvalds { 2713b22d127aSMel Gorman struct sp_node *n; 271442288fe3SMel Gorman struct sp_node *n_new = NULL; 271542288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2716b22d127aSMel Gorman int ret = 0; 27171da177e4SLinus Torvalds 271842288fe3SMel Gorman restart: 27194a8c7bb5SNathan Zimmer write_lock(&sp->lock); 27201da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 27211da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 27221da177e4SLinus Torvalds while (n && n->start < end) { 27231da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 27241da177e4SLinus Torvalds if (n->start >= start) { 27251da177e4SLinus Torvalds if (n->end <= end) 27261da177e4SLinus Torvalds sp_delete(sp, n); 27271da177e4SLinus Torvalds else 27281da177e4SLinus Torvalds n->start = end; 27291da177e4SLinus Torvalds } else { 27301da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 27311da177e4SLinus Torvalds if (n->end > end) { 273242288fe3SMel Gorman if (!n_new) 273342288fe3SMel Gorman goto alloc_new; 273442288fe3SMel Gorman 273542288fe3SMel Gorman *mpol_new = *n->policy; 273642288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 27377880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 27381da177e4SLinus Torvalds n->end = start; 27395ca39575SHillf Danton sp_insert(sp, n_new); 274042288fe3SMel Gorman n_new = NULL; 274142288fe3SMel Gorman mpol_new = NULL; 27421da177e4SLinus Torvalds break; 27431da177e4SLinus Torvalds } else 27441da177e4SLinus Torvalds n->end = start; 27451da177e4SLinus Torvalds } 27461da177e4SLinus Torvalds if (!next) 27471da177e4SLinus Torvalds break; 27481da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27491da177e4SLinus Torvalds } 27501da177e4SLinus Torvalds if (new) 27511da177e4SLinus Torvalds sp_insert(sp, new); 27524a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 275342288fe3SMel Gorman ret = 0; 275442288fe3SMel Gorman 275542288fe3SMel Gorman err_out: 275642288fe3SMel Gorman if (mpol_new) 275742288fe3SMel Gorman mpol_put(mpol_new); 275842288fe3SMel Gorman if (n_new) 275942288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 276042288fe3SMel Gorman 2761b22d127aSMel Gorman return ret; 276242288fe3SMel Gorman 276342288fe3SMel Gorman alloc_new: 27644a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 276542288fe3SMel Gorman ret = -ENOMEM; 276642288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 276742288fe3SMel Gorman if (!n_new) 276842288fe3SMel Gorman goto err_out; 276942288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 277042288fe3SMel Gorman if (!mpol_new) 277142288fe3SMel Gorman goto err_out; 27724ad09955SMiaohe Lin atomic_set(&mpol_new->refcnt, 1); 277342288fe3SMel Gorman goto restart; 27741da177e4SLinus Torvalds } 27751da177e4SLinus Torvalds 277671fe804bSLee Schermerhorn /** 277771fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 277871fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 277971fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 278071fe804bSLee Schermerhorn * 278171fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 278271fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 278371fe804bSLee Schermerhorn * This must be released on exit. 27844bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 278571fe804bSLee Schermerhorn */ 278671fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 27877339ff83SRobin Holt { 278858568d2aSMiao Xie int ret; 278958568d2aSMiao Xie 279071fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 27914a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 27927339ff83SRobin Holt 279371fe804bSLee Schermerhorn if (mpol) { 27947339ff83SRobin Holt struct vm_area_struct pvma; 279571fe804bSLee Schermerhorn struct mempolicy *new; 27964bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 27977339ff83SRobin Holt 27984bfc4495SKAMEZAWA Hiroyuki if (!scratch) 27995c0c1654SLee Schermerhorn goto put_mpol; 280071fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 280171fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 280215d77835SLee Schermerhorn if (IS_ERR(new)) 28030cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 280458568d2aSMiao Xie 280558568d2aSMiao Xie task_lock(current); 28064bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 280758568d2aSMiao Xie task_unlock(current); 280815d77835SLee Schermerhorn if (ret) 28095c0c1654SLee Schermerhorn goto put_new; 281071fe804bSLee Schermerhorn 281171fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 28122c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 281371fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 281471fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 281515d77835SLee Schermerhorn 28165c0c1654SLee Schermerhorn put_new: 281771fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 28180cae3457SDan Carpenter free_scratch: 28194bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 28205c0c1654SLee Schermerhorn put_mpol: 28215c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 28227339ff83SRobin Holt } 28237339ff83SRobin Holt } 28247339ff83SRobin Holt 28251da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 28261da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 28271da177e4SLinus Torvalds { 28281da177e4SLinus Torvalds int err; 28291da177e4SLinus Torvalds struct sp_node *new = NULL; 28301da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 28311da177e4SLinus Torvalds 2832028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 28331da177e4SLinus Torvalds vma->vm_pgoff, 283445c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2835028fec41SDavid Rientjes npol ? npol->flags : -1, 2836269fbe72SBen Widawsky npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 28371da177e4SLinus Torvalds 28381da177e4SLinus Torvalds if (npol) { 28391da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 28401da177e4SLinus Torvalds if (!new) 28411da177e4SLinus Torvalds return -ENOMEM; 28421da177e4SLinus Torvalds } 28431da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 28441da177e4SLinus Torvalds if (err && new) 284563f74ca2SKOSAKI Motohiro sp_free(new); 28461da177e4SLinus Torvalds return err; 28471da177e4SLinus Torvalds } 28481da177e4SLinus Torvalds 28491da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 28501da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 28511da177e4SLinus Torvalds { 28521da177e4SLinus Torvalds struct sp_node *n; 28531da177e4SLinus Torvalds struct rb_node *next; 28541da177e4SLinus Torvalds 28551da177e4SLinus Torvalds if (!p->root.rb_node) 28561da177e4SLinus Torvalds return; 28574a8c7bb5SNathan Zimmer write_lock(&p->lock); 28581da177e4SLinus Torvalds next = rb_first(&p->root); 28591da177e4SLinus Torvalds while (next) { 28601da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 28611da177e4SLinus Torvalds next = rb_next(&n->nd); 286263f74ca2SKOSAKI Motohiro sp_delete(p, n); 28631da177e4SLinus Torvalds } 28644a8c7bb5SNathan Zimmer write_unlock(&p->lock); 28651da177e4SLinus Torvalds } 28661da177e4SLinus Torvalds 28671a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2868c297663cSMel Gorman static int __initdata numabalancing_override; 28691a687c2eSMel Gorman 28701a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 28711a687c2eSMel Gorman { 28721a687c2eSMel Gorman bool numabalancing_default = false; 28731a687c2eSMel Gorman 28741a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 28751a687c2eSMel Gorman numabalancing_default = true; 28761a687c2eSMel Gorman 2877c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2878c297663cSMel Gorman if (numabalancing_override) 2879c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2880c297663cSMel Gorman 2881b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2882756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2883c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 28841a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 28851a687c2eSMel Gorman } 28861a687c2eSMel Gorman } 28871a687c2eSMel Gorman 28881a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 28891a687c2eSMel Gorman { 28901a687c2eSMel Gorman int ret = 0; 28911a687c2eSMel Gorman if (!str) 28921a687c2eSMel Gorman goto out; 28931a687c2eSMel Gorman 28941a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2895c297663cSMel Gorman numabalancing_override = 1; 28961a687c2eSMel Gorman ret = 1; 28971a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2898c297663cSMel Gorman numabalancing_override = -1; 28991a687c2eSMel Gorman ret = 1; 29001a687c2eSMel Gorman } 29011a687c2eSMel Gorman out: 29021a687c2eSMel Gorman if (!ret) 29034a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 29041a687c2eSMel Gorman 29051a687c2eSMel Gorman return ret; 29061a687c2eSMel Gorman } 29071a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 29081a687c2eSMel Gorman #else 29091a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 29101a687c2eSMel Gorman { 29111a687c2eSMel Gorman } 29121a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 29131a687c2eSMel Gorman 29141da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 29151da177e4SLinus Torvalds void __init numa_policy_init(void) 29161da177e4SLinus Torvalds { 2917b71636e2SPaul Mundt nodemask_t interleave_nodes; 2918b71636e2SPaul Mundt unsigned long largest = 0; 2919b71636e2SPaul Mundt int nid, prefer = 0; 2920b71636e2SPaul Mundt 29211da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 29221da177e4SLinus Torvalds sizeof(struct mempolicy), 292320c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 29241da177e4SLinus Torvalds 29251da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 29261da177e4SLinus Torvalds sizeof(struct sp_node), 292720c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 29281da177e4SLinus Torvalds 29295606e387SMel Gorman for_each_node(nid) { 29305606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 29315606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 29325606e387SMel Gorman .mode = MPOL_PREFERRED, 29335606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 2934269fbe72SBen Widawsky .nodes = nodemask_of_node(nid), 29355606e387SMel Gorman }; 29365606e387SMel Gorman } 29375606e387SMel Gorman 2938b71636e2SPaul Mundt /* 2939b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2940b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2941b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2942b71636e2SPaul Mundt */ 2943b71636e2SPaul Mundt nodes_clear(interleave_nodes); 294401f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2945b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 29461da177e4SLinus Torvalds 2947b71636e2SPaul Mundt /* Preserve the largest node */ 2948b71636e2SPaul Mundt if (largest < total_pages) { 2949b71636e2SPaul Mundt largest = total_pages; 2950b71636e2SPaul Mundt prefer = nid; 2951b71636e2SPaul Mundt } 2952b71636e2SPaul Mundt 2953b71636e2SPaul Mundt /* Interleave this node? */ 2954b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2955b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2956b71636e2SPaul Mundt } 2957b71636e2SPaul Mundt 2958b71636e2SPaul Mundt /* All too small, use the largest */ 2959b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2960b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2961b71636e2SPaul Mundt 2962028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2963b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 29641a687c2eSMel Gorman 29651a687c2eSMel Gorman check_numabalancing_enable(); 29661da177e4SLinus Torvalds } 29671da177e4SLinus Torvalds 29688bccd85fSChristoph Lameter /* Reset policy of current process to default */ 29691da177e4SLinus Torvalds void numa_default_policy(void) 29701da177e4SLinus Torvalds { 2971028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 29721da177e4SLinus Torvalds } 297368860ec1SPaul Jackson 29744225399aSPaul Jackson /* 2975095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2976095f1fc4SLee Schermerhorn */ 2977095f1fc4SLee Schermerhorn 2978345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2979345ace9cSLee Schermerhorn { 2980345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2981345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2982345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2983345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2984d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2985b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = "prefer (many)", 2986345ace9cSLee Schermerhorn }; 29871a75a6c8SChristoph Lameter 2988095f1fc4SLee Schermerhorn 2989095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2990095f1fc4SLee Schermerhorn /** 2991f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2992095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 299371fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2994095f1fc4SLee Schermerhorn * 2995095f1fc4SLee Schermerhorn * Format of input: 2996095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2997095f1fc4SLee Schermerhorn * 2998dad5b023SRandy Dunlap * Return: %0 on success, else %1 2999095f1fc4SLee Schermerhorn */ 3000a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 3001095f1fc4SLee Schermerhorn { 300271fe804bSLee Schermerhorn struct mempolicy *new = NULL; 3003f2a07f40SHugh Dickins unsigned short mode_flags; 300471fe804bSLee Schermerhorn nodemask_t nodes; 3005095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 3006095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 3007dedf2c73Szhong jiang int err = 1, mode; 3008095f1fc4SLee Schermerhorn 3009c7a91bc7SDan Carpenter if (flags) 3010c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 3011c7a91bc7SDan Carpenter 3012095f1fc4SLee Schermerhorn if (nodelist) { 3013095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 3014095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 301571fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 3016095f1fc4SLee Schermerhorn goto out; 301701f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 3018095f1fc4SLee Schermerhorn goto out; 301971fe804bSLee Schermerhorn } else 302071fe804bSLee Schermerhorn nodes_clear(nodes); 302171fe804bSLee Schermerhorn 3022dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 3023dedf2c73Szhong jiang if (mode < 0) 3024095f1fc4SLee Schermerhorn goto out; 3025095f1fc4SLee Schermerhorn 302671fe804bSLee Schermerhorn switch (mode) { 3027095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 302871fe804bSLee Schermerhorn /* 3029aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 3030aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 3031aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 303271fe804bSLee Schermerhorn */ 3033095f1fc4SLee Schermerhorn if (nodelist) { 3034095f1fc4SLee Schermerhorn char *rest = nodelist; 3035095f1fc4SLee Schermerhorn while (isdigit(*rest)) 3036095f1fc4SLee Schermerhorn rest++; 3037926f2ae0SKOSAKI Motohiro if (*rest) 3038926f2ae0SKOSAKI Motohiro goto out; 3039aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 3040aa9f7d51SRandy Dunlap goto out; 3041095f1fc4SLee Schermerhorn } 3042095f1fc4SLee Schermerhorn break; 3043095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 3044095f1fc4SLee Schermerhorn /* 3045095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 3046095f1fc4SLee Schermerhorn */ 3047095f1fc4SLee Schermerhorn if (!nodelist) 304801f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 30493f226aa1SLee Schermerhorn break; 305071fe804bSLee Schermerhorn case MPOL_LOCAL: 30513f226aa1SLee Schermerhorn /* 305271fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 30533f226aa1SLee Schermerhorn */ 305471fe804bSLee Schermerhorn if (nodelist) 30553f226aa1SLee Schermerhorn goto out; 30563f226aa1SLee Schermerhorn break; 3057413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 3058413b43deSRavikiran G Thirumalai /* 3059413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 3060413b43deSRavikiran G Thirumalai */ 3061413b43deSRavikiran G Thirumalai if (!nodelist) 3062413b43deSRavikiran G Thirumalai err = 0; 3063413b43deSRavikiran G Thirumalai goto out; 3064b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 3065d69b2e63SKOSAKI Motohiro case MPOL_BIND: 306671fe804bSLee Schermerhorn /* 3067d69b2e63SKOSAKI Motohiro * Insist on a nodelist 306871fe804bSLee Schermerhorn */ 3069d69b2e63SKOSAKI Motohiro if (!nodelist) 3070d69b2e63SKOSAKI Motohiro goto out; 3071095f1fc4SLee Schermerhorn } 3072095f1fc4SLee Schermerhorn 307371fe804bSLee Schermerhorn mode_flags = 0; 3074095f1fc4SLee Schermerhorn if (flags) { 3075095f1fc4SLee Schermerhorn /* 3076095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 3077095f1fc4SLee Schermerhorn * mode flags. 3078095f1fc4SLee Schermerhorn */ 3079095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 308071fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 3081095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 308271fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 3083095f1fc4SLee Schermerhorn else 3084926f2ae0SKOSAKI Motohiro goto out; 3085095f1fc4SLee Schermerhorn } 308671fe804bSLee Schermerhorn 308771fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 308871fe804bSLee Schermerhorn if (IS_ERR(new)) 3089926f2ae0SKOSAKI Motohiro goto out; 3090926f2ae0SKOSAKI Motohiro 3091f2a07f40SHugh Dickins /* 3092f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 3093f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 3094f2a07f40SHugh Dickins */ 3095269fbe72SBen Widawsky if (mode != MPOL_PREFERRED) { 3096269fbe72SBen Widawsky new->nodes = nodes; 3097269fbe72SBen Widawsky } else if (nodelist) { 3098269fbe72SBen Widawsky nodes_clear(new->nodes); 3099269fbe72SBen Widawsky node_set(first_node(nodes), new->nodes); 3100269fbe72SBen Widawsky } else { 31017858d7bcSFeng Tang new->mode = MPOL_LOCAL; 3102269fbe72SBen Widawsky } 3103f2a07f40SHugh Dickins 3104f2a07f40SHugh Dickins /* 3105f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 3106f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 3107f2a07f40SHugh Dickins */ 3108e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 3109f2a07f40SHugh Dickins 3110926f2ae0SKOSAKI Motohiro err = 0; 311171fe804bSLee Schermerhorn 3112095f1fc4SLee Schermerhorn out: 3113095f1fc4SLee Schermerhorn /* Restore string for error message */ 3114095f1fc4SLee Schermerhorn if (nodelist) 3115095f1fc4SLee Schermerhorn *--nodelist = ':'; 3116095f1fc4SLee Schermerhorn if (flags) 3117095f1fc4SLee Schermerhorn *--flags = '='; 311871fe804bSLee Schermerhorn if (!err) 311971fe804bSLee Schermerhorn *mpol = new; 3120095f1fc4SLee Schermerhorn return err; 3121095f1fc4SLee Schermerhorn } 3122095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 3123095f1fc4SLee Schermerhorn 312471fe804bSLee Schermerhorn /** 312571fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 312671fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 312771fe804bSLee Schermerhorn * @maxlen: length of @buffer 312871fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 312971fe804bSLee Schermerhorn * 3130948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 3131948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3132948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 31331a75a6c8SChristoph Lameter */ 3134948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 31351a75a6c8SChristoph Lameter { 31361a75a6c8SChristoph Lameter char *p = buffer; 3137948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 3138948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 3139948927eeSDavid Rientjes unsigned short flags = 0; 31401a75a6c8SChristoph Lameter 31418790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3142bea904d5SLee Schermerhorn mode = pol->mode; 3143948927eeSDavid Rientjes flags = pol->flags; 3144948927eeSDavid Rientjes } 3145bea904d5SLee Schermerhorn 31461a75a6c8SChristoph Lameter switch (mode) { 31471a75a6c8SChristoph Lameter case MPOL_DEFAULT: 31487858d7bcSFeng Tang case MPOL_LOCAL: 31491a75a6c8SChristoph Lameter break; 31501a75a6c8SChristoph Lameter case MPOL_PREFERRED: 3151b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 31521a75a6c8SChristoph Lameter case MPOL_BIND: 31531a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 3154269fbe72SBen Widawsky nodes = pol->nodes; 31551a75a6c8SChristoph Lameter break; 31561a75a6c8SChristoph Lameter default: 3157948927eeSDavid Rientjes WARN_ON_ONCE(1); 3158948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 3159948927eeSDavid Rientjes return; 31601a75a6c8SChristoph Lameter } 31611a75a6c8SChristoph Lameter 3162b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 31631a75a6c8SChristoph Lameter 3164fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3165948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3166f5b087b5SDavid Rientjes 31672291990aSLee Schermerhorn /* 31682291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 31692291990aSLee Schermerhorn */ 3170f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 31712291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 31722291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 31732291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3174f5b087b5SDavid Rientjes } 3175f5b087b5SDavid Rientjes 31769e763e0fSTejun Heo if (!nodes_empty(nodes)) 31779e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 31789e763e0fSTejun Heo nodemask_pr_args(&nodes)); 31791a75a6c8SChristoph Lameter } 3180