146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 34b27abaccSDave Hansen * preferred many Try a set of nodes first before normal fallback. This is 35b27abaccSDave Hansen * similar to preferred without the special case. 36b27abaccSDave Hansen * 371da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 381da177e4SLinus Torvalds * use the process policy. This is what Linux always did 391da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 401da177e4SLinus Torvalds * 411da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 421da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 431da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 441da177e4SLinus Torvalds * allocations for a VMA in the VM. 451da177e4SLinus Torvalds * 461da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 471da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 481da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 491da177e4SLinus Torvalds * 501da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 511da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 521da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 531da177e4SLinus Torvalds * Same with GFP_DMA allocations. 541da177e4SLinus Torvalds * 551da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 561da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 571da177e4SLinus Torvalds */ 581da177e4SLinus Torvalds 591da177e4SLinus Torvalds /* Notebook: 601da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 611da177e4SLinus Torvalds object 621da177e4SLinus Torvalds statistics for bigpages 631da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 641da177e4SLinus Torvalds first item above. 651da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 661da177e4SLinus Torvalds grows down? 671da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 681da177e4SLinus Torvalds kernel is not always grateful with that. 691da177e4SLinus Torvalds */ 701da177e4SLinus Torvalds 71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72b1de0d13SMitchel Humpherys 731da177e4SLinus Torvalds #include <linux/mempolicy.h> 74a520110eSChristoph Hellwig #include <linux/pagewalk.h> 751da177e4SLinus Torvalds #include <linux/highmem.h> 761da177e4SLinus Torvalds #include <linux/hugetlb.h> 771da177e4SLinus Torvalds #include <linux/kernel.h> 781da177e4SLinus Torvalds #include <linux/sched.h> 796e84f315SIngo Molnar #include <linux/sched/mm.h> 806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 81f719ff9bSIngo Molnar #include <linux/sched/task.h> 821da177e4SLinus Torvalds #include <linux/nodemask.h> 831da177e4SLinus Torvalds #include <linux/cpuset.h> 841da177e4SLinus Torvalds #include <linux/slab.h> 851da177e4SLinus Torvalds #include <linux/string.h> 86b95f1b31SPaul Gortmaker #include <linux/export.h> 87b488893aSPavel Emelyanov #include <linux/nsproxy.h> 881da177e4SLinus Torvalds #include <linux/interrupt.h> 891da177e4SLinus Torvalds #include <linux/init.h> 901da177e4SLinus Torvalds #include <linux/compat.h> 9131367466SOtto Ebeling #include <linux/ptrace.h> 92dc9aa5b9SChristoph Lameter #include <linux/swap.h> 931a75a6c8SChristoph Lameter #include <linux/seq_file.h> 941a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 95b20a3503SChristoph Lameter #include <linux/migrate.h> 9662b61f61SHugh Dickins #include <linux/ksm.h> 9795a402c3SChristoph Lameter #include <linux/rmap.h> 9886c3a764SDavid Quigley #include <linux/security.h> 99dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 100095f1fc4SLee Schermerhorn #include <linux/ctype.h> 1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 103b1de0d13SMitchel Humpherys #include <linux/printk.h> 104c8633798SNaoya Horiguchi #include <linux/swapops.h> 105dc9aa5b9SChristoph Lameter 1061da177e4SLinus Torvalds #include <asm/tlbflush.h> 1074a18419fSNadav Amit #include <asm/tlb.h> 1087c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1091da177e4SLinus Torvalds 11062695a84SNick Piggin #include "internal.h" 11162695a84SNick Piggin 11238e35860SChristoph Lameter /* Internal flags */ 113dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11438e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 115dc9aa5b9SChristoph Lameter 116fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 117fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1181da177e4SLinus Torvalds 1191da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1201da177e4SLinus Torvalds policied. */ 1216267276fSChristoph Lameter enum zone_type policy_zone = 0; 1221da177e4SLinus Torvalds 123bea904d5SLee Schermerhorn /* 124bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 125bea904d5SLee Schermerhorn */ 126e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1271da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 1287858d7bcSFeng Tang .mode = MPOL_LOCAL, 1291da177e4SLinus Torvalds }; 1301da177e4SLinus Torvalds 1315606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1325606e387SMel Gorman 133b2ca916cSDan Williams /** 134b2ca916cSDan Williams * numa_map_to_online_node - Find closest online node 135f6e92f40SKrzysztof Kozlowski * @node: Node id to start the search 136b2ca916cSDan Williams * 137b2ca916cSDan Williams * Lookup the next closest node by distance if @nid is not online. 138dad5b023SRandy Dunlap * 139dad5b023SRandy Dunlap * Return: this @node if it is online, otherwise the closest node by distance 140b2ca916cSDan Williams */ 141b2ca916cSDan Williams int numa_map_to_online_node(int node) 142b2ca916cSDan Williams { 1434fcbe96eSDan Williams int min_dist = INT_MAX, dist, n, min_node; 144b2ca916cSDan Williams 1454fcbe96eSDan Williams if (node == NUMA_NO_NODE || node_online(node)) 1464fcbe96eSDan Williams return node; 147b2ca916cSDan Williams 148b2ca916cSDan Williams min_node = node; 149b2ca916cSDan Williams for_each_online_node(n) { 150b2ca916cSDan Williams dist = node_distance(node, n); 151b2ca916cSDan Williams if (dist < min_dist) { 152b2ca916cSDan Williams min_dist = dist; 153b2ca916cSDan Williams min_node = n; 154b2ca916cSDan Williams } 155b2ca916cSDan Williams } 156b2ca916cSDan Williams 157b2ca916cSDan Williams return min_node; 158b2ca916cSDan Williams } 159b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node); 160b2ca916cSDan Williams 16174d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1625606e387SMel Gorman { 1635606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 164f15ca78eSOleg Nesterov int node; 1655606e387SMel Gorman 166f15ca78eSOleg Nesterov if (pol) 167f15ca78eSOleg Nesterov return pol; 1685606e387SMel Gorman 169f15ca78eSOleg Nesterov node = numa_node_id(); 1701da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1711da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 172f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 173f15ca78eSOleg Nesterov if (pol->mode) 174f15ca78eSOleg Nesterov return pol; 1751da6f0e1SJianguo Wu } 1765606e387SMel Gorman 177f15ca78eSOleg Nesterov return &default_policy; 1785606e387SMel Gorman } 1795606e387SMel Gorman 18037012946SDavid Rientjes static const struct mempolicy_operations { 18137012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 182213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 18337012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 18437012946SDavid Rientjes 185f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 186f5b087b5SDavid Rientjes { 1876d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1884c50bc01SDavid Rientjes } 1894c50bc01SDavid Rientjes 1904c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1914c50bc01SDavid Rientjes const nodemask_t *rel) 1924c50bc01SDavid Rientjes { 1934c50bc01SDavid Rientjes nodemask_t tmp; 1944c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1954c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 196f5b087b5SDavid Rientjes } 197f5b087b5SDavid Rientjes 198be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 19937012946SDavid Rientjes { 20037012946SDavid Rientjes if (nodes_empty(*nodes)) 20137012946SDavid Rientjes return -EINVAL; 202269fbe72SBen Widawsky pol->nodes = *nodes; 20337012946SDavid Rientjes return 0; 20437012946SDavid Rientjes } 20537012946SDavid Rientjes 20637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 20737012946SDavid Rientjes { 2087858d7bcSFeng Tang if (nodes_empty(*nodes)) 2097858d7bcSFeng Tang return -EINVAL; 210269fbe72SBen Widawsky 211269fbe72SBen Widawsky nodes_clear(pol->nodes); 212269fbe72SBen Widawsky node_set(first_node(*nodes), pol->nodes); 21337012946SDavid Rientjes return 0; 21437012946SDavid Rientjes } 21537012946SDavid Rientjes 21658568d2aSMiao Xie /* 21758568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 21858568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 2197858d7bcSFeng Tang * parameter with respect to the policy mode and flags. 22058568d2aSMiao Xie * 22158568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 222c1e8d7c6SMichel Lespinasse * and mempolicy. May also be called holding the mmap_lock for write. 22358568d2aSMiao Xie */ 2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2254bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22658568d2aSMiao Xie { 22758568d2aSMiao Xie int ret; 22858568d2aSMiao Xie 2297858d7bcSFeng Tang /* 2307858d7bcSFeng Tang * Default (pol==NULL) resp. local memory policies are not a 2317858d7bcSFeng Tang * subject of any remapping. They also do not need any special 2327858d7bcSFeng Tang * constructor. 2337858d7bcSFeng Tang */ 2347858d7bcSFeng Tang if (!pol || pol->mode == MPOL_LOCAL) 23558568d2aSMiao Xie return 0; 2367858d7bcSFeng Tang 23701f13bd6SLai Jiangshan /* Check N_MEMORY */ 2384bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 23901f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 24058568d2aSMiao Xie 24158568d2aSMiao Xie VM_BUG_ON(!nodes); 2427858d7bcSFeng Tang 24358568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2444bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 24558568d2aSMiao Xie else 2464bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2474bfc4495SKAMEZAWA Hiroyuki 24858568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 24958568d2aSMiao Xie pol->w.user_nodemask = *nodes; 25058568d2aSMiao Xie else 2517858d7bcSFeng Tang pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 25258568d2aSMiao Xie 2534bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 25458568d2aSMiao Xie return ret; 25558568d2aSMiao Xie } 25658568d2aSMiao Xie 25758568d2aSMiao Xie /* 25858568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25958568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 26058568d2aSMiao Xie */ 261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 262028fec41SDavid Rientjes nodemask_t *nodes) 2631da177e4SLinus Torvalds { 2641da177e4SLinus Torvalds struct mempolicy *policy; 2651da177e4SLinus Torvalds 266028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26700ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 268140d5a49SPaul Mundt 2693e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2703e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 27137012946SDavid Rientjes return ERR_PTR(-EINVAL); 272d3a71033SLee Schermerhorn return NULL; 27337012946SDavid Rientjes } 2743e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2753e1f0645SDavid Rientjes 2763e1f0645SDavid Rientjes /* 2773e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2783e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2793e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2803e1f0645SDavid Rientjes */ 2813e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2823e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2833e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2843e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2853e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2867858d7bcSFeng Tang 2877858d7bcSFeng Tang mode = MPOL_LOCAL; 2883e1f0645SDavid Rientjes } 289479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2908d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2918d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2928d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 293479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 2943e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2953e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2961da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2971da177e4SLinus Torvalds if (!policy) 2981da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2991da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 30045c4745aSLee Schermerhorn policy->mode = mode; 30137012946SDavid Rientjes policy->flags = flags; 302c6018b4bSAneesh Kumar K.V policy->home_node = NUMA_NO_NODE; 3033e1f0645SDavid Rientjes 30437012946SDavid Rientjes return policy; 30537012946SDavid Rientjes } 30637012946SDavid Rientjes 30752cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30852cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 30952cd3b07SLee Schermerhorn { 31052cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 31152cd3b07SLee Schermerhorn return; 31252cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 31352cd3b07SLee Schermerhorn } 31452cd3b07SLee Schermerhorn 315213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 31637012946SDavid Rientjes { 31737012946SDavid Rientjes } 31837012946SDavid Rientjes 319213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 3201d0d2680SDavid Rientjes { 3211d0d2680SDavid Rientjes nodemask_t tmp; 3221d0d2680SDavid Rientjes 32337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32437012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32537012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32637012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3271d0d2680SDavid Rientjes else { 328269fbe72SBen Widawsky nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 329213980c0SVlastimil Babka *nodes); 33029b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3311d0d2680SDavid Rientjes } 33237012946SDavid Rientjes 333708c1bbcSMiao Xie if (nodes_empty(tmp)) 334708c1bbcSMiao Xie tmp = *nodes; 335708c1bbcSMiao Xie 336269fbe72SBen Widawsky pol->nodes = tmp; 33737012946SDavid Rientjes } 33837012946SDavid Rientjes 33937012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 340213980c0SVlastimil Babka const nodemask_t *nodes) 34137012946SDavid Rientjes { 34237012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3431d0d2680SDavid Rientjes } 34437012946SDavid Rientjes 345708c1bbcSMiao Xie /* 346708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 347708c1bbcSMiao Xie * 348c1e8d7c6SMichel Lespinasse * Per-vma policies are protected by mmap_lock. Allocations using per-task 349213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 350213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 351708c1bbcSMiao Xie */ 352213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35337012946SDavid Rientjes { 354018160adSWang Cheng if (!pol || pol->mode == MPOL_LOCAL) 35537012946SDavid Rientjes return; 3567858d7bcSFeng Tang if (!mpol_store_user_nodemask(pol) && 35737012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35837012946SDavid Rientjes return; 359708c1bbcSMiao Xie 360213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3611d0d2680SDavid Rientjes } 3621d0d2680SDavid Rientjes 3631d0d2680SDavid Rientjes /* 3641d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3651d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36658568d2aSMiao Xie * 36758568d2aSMiao Xie * Called with task's alloc_lock held. 3681d0d2680SDavid Rientjes */ 3691d0d2680SDavid Rientjes 370213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3711d0d2680SDavid Rientjes { 372213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3731d0d2680SDavid Rientjes } 3741d0d2680SDavid Rientjes 3751d0d2680SDavid Rientjes /* 3761d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3771d0d2680SDavid Rientjes * 378c1e8d7c6SMichel Lespinasse * Call holding a reference to mm. Takes mm->mmap_lock during call. 3791d0d2680SDavid Rientjes */ 3801d0d2680SDavid Rientjes 3811d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3821d0d2680SDavid Rientjes { 3831d0d2680SDavid Rientjes struct vm_area_struct *vma; 38466850be5SLiam R. Howlett VMA_ITERATOR(vmi, mm, 0); 3851d0d2680SDavid Rientjes 386d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 38766850be5SLiam R. Howlett for_each_vma(vmi, vma) 388213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 389d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3901d0d2680SDavid Rientjes } 3911d0d2680SDavid Rientjes 39237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 39337012946SDavid Rientjes [MPOL_DEFAULT] = { 39437012946SDavid Rientjes .rebind = mpol_rebind_default, 39537012946SDavid Rientjes }, 39637012946SDavid Rientjes [MPOL_INTERLEAVE] = { 397be897d48SFeng Tang .create = mpol_new_nodemask, 39837012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39937012946SDavid Rientjes }, 40037012946SDavid Rientjes [MPOL_PREFERRED] = { 40137012946SDavid Rientjes .create = mpol_new_preferred, 40237012946SDavid Rientjes .rebind = mpol_rebind_preferred, 40337012946SDavid Rientjes }, 40437012946SDavid Rientjes [MPOL_BIND] = { 405be897d48SFeng Tang .create = mpol_new_nodemask, 40637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40737012946SDavid Rientjes }, 4087858d7bcSFeng Tang [MPOL_LOCAL] = { 4097858d7bcSFeng Tang .rebind = mpol_rebind_default, 4107858d7bcSFeng Tang }, 411b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = { 412be897d48SFeng Tang .create = mpol_new_nodemask, 413b27abaccSDave Hansen .rebind = mpol_rebind_preferred, 414b27abaccSDave Hansen }, 41537012946SDavid Rientjes }; 41637012946SDavid Rientjes 4174a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, 418fc301289SChristoph Lameter unsigned long flags); 4191a75a6c8SChristoph Lameter 4206f4576e3SNaoya Horiguchi struct queue_pages { 4216f4576e3SNaoya Horiguchi struct list_head *pagelist; 4226f4576e3SNaoya Horiguchi unsigned long flags; 4236f4576e3SNaoya Horiguchi nodemask_t *nmask; 424f18da660SLi Xinhai unsigned long start; 425f18da660SLi Xinhai unsigned long end; 426f18da660SLi Xinhai struct vm_area_struct *first; 4276f4576e3SNaoya Horiguchi }; 4286f4576e3SNaoya Horiguchi 42998094945SNaoya Horiguchi /* 430d451b89dSVishal Moola (Oracle) * Check if the folio's nid is in qp->nmask. 43188aaa2a1SNaoya Horiguchi * 43288aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 43388aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 43488aaa2a1SNaoya Horiguchi */ 435d451b89dSVishal Moola (Oracle) static inline bool queue_folio_required(struct folio *folio, 43688aaa2a1SNaoya Horiguchi struct queue_pages *qp) 43788aaa2a1SNaoya Horiguchi { 438d451b89dSVishal Moola (Oracle) int nid = folio_nid(folio); 43988aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 44088aaa2a1SNaoya Horiguchi 44188aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 44288aaa2a1SNaoya Horiguchi } 44388aaa2a1SNaoya Horiguchi 444a7f40cfeSYang Shi /* 445de1f5055SVishal Moola (Oracle) * queue_folios_pmd() has three possible return values: 446de1f5055SVishal Moola (Oracle) * 0 - folios are placed on the right node or queued successfully, or 447e5947d23SYang Shi * special page is met, i.e. huge zero page. 448de1f5055SVishal Moola (Oracle) * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 449d8835445SYang Shi * specified. 450d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 451de1f5055SVishal Moola (Oracle) * existing folio was already on a node that does not follow the 452d8835445SYang Shi * policy. 453a7f40cfeSYang Shi */ 454de1f5055SVishal Moola (Oracle) static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 455c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 456959a7e13SJules Irenge __releases(ptl) 457c8633798SNaoya Horiguchi { 458c8633798SNaoya Horiguchi int ret = 0; 459de1f5055SVishal Moola (Oracle) struct folio *folio; 460c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 461c8633798SNaoya Horiguchi unsigned long flags; 462c8633798SNaoya Horiguchi 463c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 464a7f40cfeSYang Shi ret = -EIO; 465c8633798SNaoya Horiguchi goto unlock; 466c8633798SNaoya Horiguchi } 467de1f5055SVishal Moola (Oracle) folio = pfn_folio(pmd_pfn(*pmd)); 468de1f5055SVishal Moola (Oracle) if (is_huge_zero_page(&folio->page)) { 469e5947d23SYang Shi walk->action = ACTION_CONTINUE; 4706d97cf88SMiaohe Lin goto unlock; 471c8633798SNaoya Horiguchi } 472d451b89dSVishal Moola (Oracle) if (!queue_folio_required(folio, qp)) 473c8633798SNaoya Horiguchi goto unlock; 474c8633798SNaoya Horiguchi 475c8633798SNaoya Horiguchi flags = qp->flags; 476de1f5055SVishal Moola (Oracle) /* go to folio migration */ 477a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 478a53190a4SYang Shi if (!vma_migratable(walk->vma) || 4794a64981dSVishal Moola (Oracle) migrate_folio_add(folio, qp->pagelist, flags)) { 480d8835445SYang Shi ret = 1; 481a7f40cfeSYang Shi goto unlock; 482a7f40cfeSYang Shi } 483a7f40cfeSYang Shi } else 484a7f40cfeSYang Shi ret = -EIO; 485c8633798SNaoya Horiguchi unlock: 486c8633798SNaoya Horiguchi spin_unlock(ptl); 487c8633798SNaoya Horiguchi return ret; 488c8633798SNaoya Horiguchi } 489c8633798SNaoya Horiguchi 49088aaa2a1SNaoya Horiguchi /* 49198094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 49298094945SNaoya Horiguchi * and move them to the pagelist if they do. 493d8835445SYang Shi * 4943dae02bbSVishal Moola (Oracle) * queue_folios_pte_range() has three possible return values: 4953dae02bbSVishal Moola (Oracle) * 0 - folios are placed on the right node or queued successfully, or 496e5947d23SYang Shi * special page is met, i.e. zero page. 4973dae02bbSVishal Moola (Oracle) * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 498d8835445SYang Shi * specified. 4993dae02bbSVishal Moola (Oracle) * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already 500d8835445SYang Shi * on a node that does not follow the policy. 50198094945SNaoya Horiguchi */ 5023dae02bbSVishal Moola (Oracle) static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, 5036f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 5041da177e4SLinus Torvalds { 5056f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5063dae02bbSVishal Moola (Oracle) struct folio *folio; 5076f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5086f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 509d8835445SYang Shi bool has_unmovable = false; 5103f088420SShijie Luo pte_t *pte, *mapped_pte; 511*c33c7948SRyan Roberts pte_t ptent; 512705e87c0SHugh Dickins spinlock_t *ptl; 513941150a3SHugh Dickins 514c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 515bc78b5edSMiaohe Lin if (ptl) 516de1f5055SVishal Moola (Oracle) return queue_folios_pmd(pmd, ptl, addr, end, walk); 51791612e0dSHugh Dickins 5183f088420SShijie Luo mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5197780d040SHugh Dickins if (!pte) { 5207780d040SHugh Dickins walk->action = ACTION_AGAIN; 5217780d040SHugh Dickins return 0; 5227780d040SHugh Dickins } 5236f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 524*c33c7948SRyan Roberts ptent = ptep_get(pte); 525*c33c7948SRyan Roberts if (!pte_present(ptent)) 52691612e0dSHugh Dickins continue; 527*c33c7948SRyan Roberts folio = vm_normal_folio(vma, addr, ptent); 5283dae02bbSVishal Moola (Oracle) if (!folio || folio_is_zone_device(folio)) 52991612e0dSHugh Dickins continue; 530053837fcSNick Piggin /* 5313dae02bbSVishal Moola (Oracle) * vm_normal_folio() filters out zero pages, but there might 5323dae02bbSVishal Moola (Oracle) * still be reserved folios to skip, perhaps in a VDSO. 533053837fcSNick Piggin */ 5343dae02bbSVishal Moola (Oracle) if (folio_test_reserved(folio)) 535f4598c8bSChristoph Lameter continue; 536d451b89dSVishal Moola (Oracle) if (!queue_folio_required(folio, qp)) 53738e35860SChristoph Lameter continue; 538a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 539d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 540d8835445SYang Shi if (!vma_migratable(vma)) { 541d8835445SYang Shi has_unmovable = true; 542a7f40cfeSYang Shi break; 543d8835445SYang Shi } 544a53190a4SYang Shi 545a53190a4SYang Shi /* 546a53190a4SYang Shi * Do not abort immediately since there may be 547a53190a4SYang Shi * temporary off LRU pages in the range. Still 548a53190a4SYang Shi * need migrate other LRU pages. 549a53190a4SYang Shi */ 5504a64981dSVishal Moola (Oracle) if (migrate_folio_add(folio, qp->pagelist, flags)) 551a53190a4SYang Shi has_unmovable = true; 552a7f40cfeSYang Shi } else 553a7f40cfeSYang Shi break; 5546f4576e3SNaoya Horiguchi } 5553f088420SShijie Luo pte_unmap_unlock(mapped_pte, ptl); 5566f4576e3SNaoya Horiguchi cond_resched(); 557d8835445SYang Shi 558d8835445SYang Shi if (has_unmovable) 559d8835445SYang Shi return 1; 560d8835445SYang Shi 561a7f40cfeSYang Shi return addr != end ? -EIO : 0; 56291612e0dSHugh Dickins } 56391612e0dSHugh Dickins 5640a2c1e81SVishal Moola (Oracle) static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, 5656f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5666f4576e3SNaoya Horiguchi struct mm_walk *walk) 567e2d8cf40SNaoya Horiguchi { 568dcf17635SLi Xinhai int ret = 0; 569e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5706f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 571dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 5720a2c1e81SVishal Moola (Oracle) struct folio *folio; 573cb900f41SKirill A. Shutemov spinlock_t *ptl; 574d4c54919SNaoya Horiguchi pte_t entry; 575e2d8cf40SNaoya Horiguchi 5766f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5776f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 578d4c54919SNaoya Horiguchi if (!pte_present(entry)) 579d4c54919SNaoya Horiguchi goto unlock; 5800a2c1e81SVishal Moola (Oracle) folio = pfn_folio(pte_pfn(entry)); 581d451b89dSVishal Moola (Oracle) if (!queue_folio_required(folio, qp)) 582e2d8cf40SNaoya Horiguchi goto unlock; 583dcf17635SLi Xinhai 584dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 585dcf17635SLi Xinhai /* 5860a2c1e81SVishal Moola (Oracle) * STRICT alone means only detecting misplaced folio and no 587dcf17635SLi Xinhai * need to further check other vma. 588dcf17635SLi Xinhai */ 589dcf17635SLi Xinhai ret = -EIO; 590dcf17635SLi Xinhai goto unlock; 591dcf17635SLi Xinhai } 592dcf17635SLi Xinhai 593dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 594dcf17635SLi Xinhai /* 595dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 596dcf17635SLi Xinhai * stopped walking current vma. 5970a2c1e81SVishal Moola (Oracle) * Detecting misplaced folio but allow migrating folios which 598dcf17635SLi Xinhai * have been queued. 599dcf17635SLi Xinhai */ 600dcf17635SLi Xinhai ret = 1; 601dcf17635SLi Xinhai goto unlock; 602dcf17635SLi Xinhai } 603dcf17635SLi Xinhai 6040a2c1e81SVishal Moola (Oracle) /* 6050a2c1e81SVishal Moola (Oracle) * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it 6060a2c1e81SVishal Moola (Oracle) * is shared it is likely not worth migrating. 6070a2c1e81SVishal Moola (Oracle) * 6080a2c1e81SVishal Moola (Oracle) * To check if the folio is shared, ideally we want to make sure 6090a2c1e81SVishal Moola (Oracle) * every page is mapped to the same process. Doing that is very 6100a2c1e81SVishal Moola (Oracle) * expensive, so check the estimated mapcount of the folio instead. 6110a2c1e81SVishal Moola (Oracle) */ 612e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 6130a2c1e81SVishal Moola (Oracle) (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 && 61473bdf65eSMike Kravetz !hugetlb_pmd_shared(pte))) { 6159747b9e9SBaolin Wang if (!isolate_hugetlb(folio, qp->pagelist) && 616dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 617dcf17635SLi Xinhai /* 6180a2c1e81SVishal Moola (Oracle) * Failed to isolate folio but allow migrating pages 619dcf17635SLi Xinhai * which have been queued. 620dcf17635SLi Xinhai */ 621dcf17635SLi Xinhai ret = 1; 622dcf17635SLi Xinhai } 623e2d8cf40SNaoya Horiguchi unlock: 624cb900f41SKirill A. Shutemov spin_unlock(ptl); 625e2d8cf40SNaoya Horiguchi #else 626e2d8cf40SNaoya Horiguchi BUG(); 627e2d8cf40SNaoya Horiguchi #endif 628dcf17635SLi Xinhai return ret; 6291da177e4SLinus Torvalds } 6301da177e4SLinus Torvalds 6315877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 632b24f53a0SLee Schermerhorn /* 6334b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6344b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6354b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6364b10e7d5SMel Gorman * 6374b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6384b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6394b10e7d5SMel Gorman * changes to the core. 640b24f53a0SLee Schermerhorn */ 6414b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6424b10e7d5SMel Gorman unsigned long addr, unsigned long end) 643b24f53a0SLee Schermerhorn { 6444a18419fSNadav Amit struct mmu_gather tlb; 645a79390f5SPeter Xu long nr_updated; 646b24f53a0SLee Schermerhorn 6474a18419fSNadav Amit tlb_gather_mmu(&tlb, vma->vm_mm); 6484a18419fSNadav Amit 6491ef488edSDavid Hildenbrand nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA); 650d1751118SPeter Xu if (nr_updated > 0) 65103c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 652b24f53a0SLee Schermerhorn 6534a18419fSNadav Amit tlb_finish_mmu(&tlb); 6544a18419fSNadav Amit 6554b10e7d5SMel Gorman return nr_updated; 656b24f53a0SLee Schermerhorn } 657b24f53a0SLee Schermerhorn #else 658b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 659b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 660b24f53a0SLee Schermerhorn { 661b24f53a0SLee Schermerhorn return 0; 662b24f53a0SLee Schermerhorn } 6635877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 664b24f53a0SLee Schermerhorn 6656f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6666f4576e3SNaoya Horiguchi struct mm_walk *walk) 6671da177e4SLinus Torvalds { 66866850be5SLiam R. Howlett struct vm_area_struct *next, *vma = walk->vma; 6696f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6705b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6716f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 672dc9aa5b9SChristoph Lameter 673a18b3ac2SLi Xinhai /* range check first */ 674ce33135cSMiaohe Lin VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 675f18da660SLi Xinhai 676f18da660SLi Xinhai if (!qp->first) { 677f18da660SLi Xinhai qp->first = vma; 678f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 679f18da660SLi Xinhai (qp->start < vma->vm_start)) 680f18da660SLi Xinhai /* hole at head side of range */ 681a18b3ac2SLi Xinhai return -EFAULT; 682a18b3ac2SLi Xinhai } 68366850be5SLiam R. Howlett next = find_vma(vma->vm_mm, vma->vm_end); 684f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 685f18da660SLi Xinhai ((vma->vm_end < qp->end) && 68666850be5SLiam R. Howlett (!next || vma->vm_end < next->vm_start))) 687f18da660SLi Xinhai /* hole at middle or tail of range */ 688f18da660SLi Xinhai return -EFAULT; 689a18b3ac2SLi Xinhai 690a7f40cfeSYang Shi /* 691a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 692a7f40cfeSYang Shi * regardless of vma_migratable 693a7f40cfeSYang Shi */ 694a7f40cfeSYang Shi if (!vma_migratable(vma) && 695a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 69648684a65SNaoya Horiguchi return 1; 69748684a65SNaoya Horiguchi 6985b952b3cSAndi Kleen if (endvma > end) 6995b952b3cSAndi Kleen endvma = end; 700b24f53a0SLee Schermerhorn 701b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 7022c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 7033122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 7044355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 705b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 7066f4576e3SNaoya Horiguchi return 1; 707b24f53a0SLee Schermerhorn } 708b24f53a0SLee Schermerhorn 7096f4576e3SNaoya Horiguchi /* queue pages from current vma */ 710a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 7116f4576e3SNaoya Horiguchi return 0; 7126f4576e3SNaoya Horiguchi return 1; 7136f4576e3SNaoya Horiguchi } 714b24f53a0SLee Schermerhorn 7157b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 7160a2c1e81SVishal Moola (Oracle) .hugetlb_entry = queue_folios_hugetlb, 7173dae02bbSVishal Moola (Oracle) .pmd_entry = queue_folios_pte_range, 7187b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 7197b86ac33SChristoph Hellwig }; 7207b86ac33SChristoph Hellwig 7216f4576e3SNaoya Horiguchi /* 7226f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7236f4576e3SNaoya Horiguchi * 7246f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7256f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 726d8835445SYang Shi * passed via @private. 727d8835445SYang Shi * 728d8835445SYang Shi * queue_pages_range() has three possible return values: 729d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 730d8835445SYang Shi * specified. 731d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 732a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 733a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 734a85dfc30SYang Shi * your accessible address space (-EFAULT) 7356f4576e3SNaoya Horiguchi */ 7366f4576e3SNaoya Horiguchi static int 7376f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7386f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 7396f4576e3SNaoya Horiguchi struct list_head *pagelist) 7406f4576e3SNaoya Horiguchi { 741f18da660SLi Xinhai int err; 7426f4576e3SNaoya Horiguchi struct queue_pages qp = { 7436f4576e3SNaoya Horiguchi .pagelist = pagelist, 7446f4576e3SNaoya Horiguchi .flags = flags, 7456f4576e3SNaoya Horiguchi .nmask = nodes, 746f18da660SLi Xinhai .start = start, 747f18da660SLi Xinhai .end = end, 748f18da660SLi Xinhai .first = NULL, 7496f4576e3SNaoya Horiguchi }; 7506f4576e3SNaoya Horiguchi 751f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 752f18da660SLi Xinhai 753f18da660SLi Xinhai if (!qp.first) 754f18da660SLi Xinhai /* whole range in hole */ 755f18da660SLi Xinhai err = -EFAULT; 756f18da660SLi Xinhai 757f18da660SLi Xinhai return err; 7581da177e4SLinus Torvalds } 7591da177e4SLinus Torvalds 760869833f2SKOSAKI Motohiro /* 761869833f2SKOSAKI Motohiro * Apply policy to a single VMA 762c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for writing. 763869833f2SKOSAKI Motohiro */ 764869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 765869833f2SKOSAKI Motohiro struct mempolicy *pol) 7668d34694cSKOSAKI Motohiro { 767869833f2SKOSAKI Motohiro int err; 768869833f2SKOSAKI Motohiro struct mempolicy *old; 769869833f2SKOSAKI Motohiro struct mempolicy *new; 7708d34694cSKOSAKI Motohiro 7718d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7728d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7738d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7748d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7758d34694cSKOSAKI Motohiro 776869833f2SKOSAKI Motohiro new = mpol_dup(pol); 777869833f2SKOSAKI Motohiro if (IS_ERR(new)) 778869833f2SKOSAKI Motohiro return PTR_ERR(new); 779869833f2SKOSAKI Motohiro 780869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7818d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 782869833f2SKOSAKI Motohiro if (err) 783869833f2SKOSAKI Motohiro goto err_out; 7848d34694cSKOSAKI Motohiro } 785869833f2SKOSAKI Motohiro 786869833f2SKOSAKI Motohiro old = vma->vm_policy; 787c1e8d7c6SMichel Lespinasse vma->vm_policy = new; /* protected by mmap_lock */ 788869833f2SKOSAKI Motohiro mpol_put(old); 789869833f2SKOSAKI Motohiro 790869833f2SKOSAKI Motohiro return 0; 791869833f2SKOSAKI Motohiro err_out: 792869833f2SKOSAKI Motohiro mpol_put(new); 7938d34694cSKOSAKI Motohiro return err; 7948d34694cSKOSAKI Motohiro } 7958d34694cSKOSAKI Motohiro 796f4e9e0e6SLiam R. Howlett /* Split or merge the VMA (if required) and apply the new policy */ 797f4e9e0e6SLiam R. Howlett static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma, 798f4e9e0e6SLiam R. Howlett struct vm_area_struct **prev, unsigned long start, 7999d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 8001da177e4SLinus Torvalds { 801f4e9e0e6SLiam R. Howlett struct vm_area_struct *merged; 802f4e9e0e6SLiam R. Howlett unsigned long vmstart, vmend; 803e26a5114SKOSAKI Motohiro pgoff_t pgoff; 804f4e9e0e6SLiam R. Howlett int err; 8051da177e4SLinus Torvalds 806f4e9e0e6SLiam R. Howlett vmend = min(end, vma->vm_end); 807f4e9e0e6SLiam R. Howlett if (start > vma->vm_start) { 808f4e9e0e6SLiam R. Howlett *prev = vma; 809f4e9e0e6SLiam R. Howlett vmstart = start; 810f4e9e0e6SLiam R. Howlett } else { 811f4e9e0e6SLiam R. Howlett vmstart = vma->vm_start; 812f4e9e0e6SLiam R. Howlett } 8139d8cebd4SKOSAKI Motohiro 81400ca0f2eSLorenzo Stoakes if (mpol_equal(vma_policy(vma), new_pol)) { 81500ca0f2eSLorenzo Stoakes *prev = vma; 816f4e9e0e6SLiam R. Howlett return 0; 81700ca0f2eSLorenzo Stoakes } 818e26a5114SKOSAKI Motohiro 819f4e9e0e6SLiam R. Howlett pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); 820f4e9e0e6SLiam R. Howlett merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags, 821f4e9e0e6SLiam R. Howlett vma->anon_vma, vma->vm_file, pgoff, new_pol, 822f4e9e0e6SLiam R. Howlett vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 823f4e9e0e6SLiam R. Howlett if (merged) { 824f4e9e0e6SLiam R. Howlett *prev = merged; 825f4e9e0e6SLiam R. Howlett return vma_replace_policy(merged, new_pol); 8261da177e4SLinus Torvalds } 827f4e9e0e6SLiam R. Howlett 8289d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 829f4e9e0e6SLiam R. Howlett err = split_vma(vmi, vma, vmstart, 1); 8309d8cebd4SKOSAKI Motohiro if (err) 8311da177e4SLinus Torvalds return err; 8321da177e4SLinus Torvalds } 8331da177e4SLinus Torvalds 834f4e9e0e6SLiam R. Howlett if (vma->vm_end != vmend) { 835f4e9e0e6SLiam R. Howlett err = split_vma(vmi, vma, vmend, 0); 836f4e9e0e6SLiam R. Howlett if (err) 837f4e9e0e6SLiam R. Howlett return err; 838f4e9e0e6SLiam R. Howlett } 839f4e9e0e6SLiam R. Howlett 840f4e9e0e6SLiam R. Howlett *prev = vma; 841f4e9e0e6SLiam R. Howlett return vma_replace_policy(vma, new_pol); 842f4e9e0e6SLiam R. Howlett } 843f4e9e0e6SLiam R. Howlett 8441da177e4SLinus Torvalds /* Set the process memory policy */ 845028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 846028fec41SDavid Rientjes nodemask_t *nodes) 8471da177e4SLinus Torvalds { 84858568d2aSMiao Xie struct mempolicy *new, *old; 8494bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 85058568d2aSMiao Xie int ret; 8511da177e4SLinus Torvalds 8524bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8534bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 854f4e53d91SLee Schermerhorn 8554bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8564bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8574bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8584bfc4495SKAMEZAWA Hiroyuki goto out; 8594bfc4495SKAMEZAWA Hiroyuki } 8602c7c3a7dSOleg Nesterov 86112c1dc8eSAbel Wu task_lock(current); 8624bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 86358568d2aSMiao Xie if (ret) { 86412c1dc8eSAbel Wu task_unlock(current); 86558568d2aSMiao Xie mpol_put(new); 8664bfc4495SKAMEZAWA Hiroyuki goto out; 86758568d2aSMiao Xie } 86812c1dc8eSAbel Wu 86958568d2aSMiao Xie old = current->mempolicy; 8701da177e4SLinus Torvalds current->mempolicy = new; 87145816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 87245816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 87358568d2aSMiao Xie task_unlock(current); 87458568d2aSMiao Xie mpol_put(old); 8754bfc4495SKAMEZAWA Hiroyuki ret = 0; 8764bfc4495SKAMEZAWA Hiroyuki out: 8774bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8784bfc4495SKAMEZAWA Hiroyuki return ret; 8791da177e4SLinus Torvalds } 8801da177e4SLinus Torvalds 881bea904d5SLee Schermerhorn /* 882bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 88358568d2aSMiao Xie * 88458568d2aSMiao Xie * Called with task's alloc_lock held 885bea904d5SLee Schermerhorn */ 886bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8871da177e4SLinus Torvalds { 888dfcd3c0dSAndi Kleen nodes_clear(*nodes); 889bea904d5SLee Schermerhorn if (p == &default_policy) 890bea904d5SLee Schermerhorn return; 891bea904d5SLee Schermerhorn 89245c4745aSLee Schermerhorn switch (p->mode) { 89319770b32SMel Gorman case MPOL_BIND: 8941da177e4SLinus Torvalds case MPOL_INTERLEAVE: 895269fbe72SBen Widawsky case MPOL_PREFERRED: 896b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 897269fbe72SBen Widawsky *nodes = p->nodes; 8981da177e4SLinus Torvalds break; 8997858d7bcSFeng Tang case MPOL_LOCAL: 9007858d7bcSFeng Tang /* return empty node mask for local allocation */ 9017858d7bcSFeng Tang break; 9021da177e4SLinus Torvalds default: 9031da177e4SLinus Torvalds BUG(); 9041da177e4SLinus Torvalds } 9051da177e4SLinus Torvalds } 9061da177e4SLinus Torvalds 9073b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 9081da177e4SLinus Torvalds { 909ba841078SPeter Xu struct page *p = NULL; 910f728b9c4SJohn Hubbard int ret; 9111da177e4SLinus Torvalds 912f728b9c4SJohn Hubbard ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p); 913f728b9c4SJohn Hubbard if (ret > 0) { 914f728b9c4SJohn Hubbard ret = page_to_nid(p); 9151da177e4SLinus Torvalds put_page(p); 9161da177e4SLinus Torvalds } 917f728b9c4SJohn Hubbard return ret; 9181da177e4SLinus Torvalds } 9191da177e4SLinus Torvalds 9201da177e4SLinus Torvalds /* Retrieve NUMA policy */ 921dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9221da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9231da177e4SLinus Torvalds { 9248bccd85fSChristoph Lameter int err; 9251da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9261da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9273b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9281da177e4SLinus Torvalds 929754af6f5SLee Schermerhorn if (flags & 930754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9311da177e4SLinus Torvalds return -EINVAL; 932754af6f5SLee Schermerhorn 933754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 934754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 935754af6f5SLee Schermerhorn return -EINVAL; 936754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 93758568d2aSMiao Xie task_lock(current); 938754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 93958568d2aSMiao Xie task_unlock(current); 940754af6f5SLee Schermerhorn return 0; 941754af6f5SLee Schermerhorn } 942754af6f5SLee Schermerhorn 9431da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 944bea904d5SLee Schermerhorn /* 945bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 946bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 947bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 948bea904d5SLee Schermerhorn */ 949d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 95033e3575cSLiam Howlett vma = vma_lookup(mm, addr); 9511da177e4SLinus Torvalds if (!vma) { 952d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9531da177e4SLinus Torvalds return -EFAULT; 9541da177e4SLinus Torvalds } 9551da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9561da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9571da177e4SLinus Torvalds else 9581da177e4SLinus Torvalds pol = vma->vm_policy; 9591da177e4SLinus Torvalds } else if (addr) 9601da177e4SLinus Torvalds return -EINVAL; 9611da177e4SLinus Torvalds 9621da177e4SLinus Torvalds if (!pol) 963bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9641da177e4SLinus Torvalds 9651da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9661da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9673b9aadf7SAndrea Arcangeli /* 968f728b9c4SJohn Hubbard * Take a refcount on the mpol, because we are about to 969f728b9c4SJohn Hubbard * drop the mmap_lock, after which only "pol" remains 970f728b9c4SJohn Hubbard * valid, "vma" is stale. 9713b9aadf7SAndrea Arcangeli */ 9723b9aadf7SAndrea Arcangeli pol_refcount = pol; 9733b9aadf7SAndrea Arcangeli vma = NULL; 9743b9aadf7SAndrea Arcangeli mpol_get(pol); 975f728b9c4SJohn Hubbard mmap_read_unlock(mm); 9763b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9771da177e4SLinus Torvalds if (err < 0) 9781da177e4SLinus Torvalds goto out; 9798bccd85fSChristoph Lameter *policy = err; 9801da177e4SLinus Torvalds } else if (pol == current->mempolicy && 98145c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 982269fbe72SBen Widawsky *policy = next_node_in(current->il_prev, pol->nodes); 9831da177e4SLinus Torvalds } else { 9841da177e4SLinus Torvalds err = -EINVAL; 9851da177e4SLinus Torvalds goto out; 9861da177e4SLinus Torvalds } 987bea904d5SLee Schermerhorn } else { 988bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 989bea904d5SLee Schermerhorn pol->mode; 990d79df630SDavid Rientjes /* 991d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 992d79df630SDavid Rientjes * the policy to userspace. 993d79df630SDavid Rientjes */ 994d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 995bea904d5SLee Schermerhorn } 9961da177e4SLinus Torvalds 9971da177e4SLinus Torvalds err = 0; 99858568d2aSMiao Xie if (nmask) { 999c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 1000c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 1001c6b6ef8bSLee Schermerhorn } else { 100258568d2aSMiao Xie task_lock(current); 1003bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 100458568d2aSMiao Xie task_unlock(current); 100558568d2aSMiao Xie } 1006c6b6ef8bSLee Schermerhorn } 10071da177e4SLinus Torvalds 10081da177e4SLinus Torvalds out: 100952cd3b07SLee Schermerhorn mpol_cond_put(pol); 10101da177e4SLinus Torvalds if (vma) 1011d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 10123b9aadf7SAndrea Arcangeli if (pol_refcount) 10133b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10141da177e4SLinus Torvalds return err; 10151da177e4SLinus Torvalds } 10161da177e4SLinus Torvalds 1017b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10184a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, 1019fc301289SChristoph Lameter unsigned long flags) 10206ce3c4c0SChristoph Lameter { 10216ce3c4c0SChristoph Lameter /* 10224a64981dSVishal Moola (Oracle) * We try to migrate only unshared folios. If it is shared it 10234a64981dSVishal Moola (Oracle) * is likely not worth migrating. 10244a64981dSVishal Moola (Oracle) * 10254a64981dSVishal Moola (Oracle) * To check if the folio is shared, ideally we want to make sure 10264a64981dSVishal Moola (Oracle) * every page is mapped to the same process. Doing that is very 10274a64981dSVishal Moola (Oracle) * expensive, so check the estimated mapcount of the folio instead. 10286ce3c4c0SChristoph Lameter */ 10294a64981dSVishal Moola (Oracle) if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) { 1030be2d5756SBaolin Wang if (folio_isolate_lru(folio)) { 10314a64981dSVishal Moola (Oracle) list_add_tail(&folio->lru, foliolist); 10324a64981dSVishal Moola (Oracle) node_stat_mod_folio(folio, 10334a64981dSVishal Moola (Oracle) NR_ISOLATED_ANON + folio_is_file_lru(folio), 10344a64981dSVishal Moola (Oracle) folio_nr_pages(folio)); 1035a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1036a53190a4SYang Shi /* 10374a64981dSVishal Moola (Oracle) * Non-movable folio may reach here. And, there may be 10384a64981dSVishal Moola (Oracle) * temporary off LRU folios or non-LRU movable folios. 10394a64981dSVishal Moola (Oracle) * Treat them as unmovable folios since they can't be 1040a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1041a53190a4SYang Shi * should return -EIO for this case too. 1042a53190a4SYang Shi */ 1043a53190a4SYang Shi return -EIO; 104462695a84SNick Piggin } 104562695a84SNick Piggin } 1046a53190a4SYang Shi 1047a53190a4SYang Shi return 0; 10486ce3c4c0SChristoph Lameter } 10496ce3c4c0SChristoph Lameter 10506ce3c4c0SChristoph Lameter /* 10517e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10527e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10537e2ab150SChristoph Lameter */ 1054dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1055dbcb0f19SAdrian Bunk int flags) 10567e2ab150SChristoph Lameter { 10577e2ab150SChristoph Lameter nodemask_t nmask; 105866850be5SLiam R. Howlett struct vm_area_struct *vma; 10597e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10607e2ab150SChristoph Lameter int err = 0; 1061a0976311SJoonsoo Kim struct migration_target_control mtc = { 1062a0976311SJoonsoo Kim .nid = dest, 1063a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1064a0976311SJoonsoo Kim }; 10657e2ab150SChristoph Lameter 10667e2ab150SChristoph Lameter nodes_clear(nmask); 10677e2ab150SChristoph Lameter node_set(source, nmask); 10687e2ab150SChristoph Lameter 106908270807SMinchan Kim /* 107008270807SMinchan Kim * This does not "check" the range but isolates all pages that 107108270807SMinchan Kim * need migration. Between passing in the full user address 107208270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 107308270807SMinchan Kim */ 107466850be5SLiam R. Howlett vma = find_vma(mm, 0); 107508270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 107666850be5SLiam R. Howlett queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, 10777e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10787e2ab150SChristoph Lameter 1079cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1080a0976311SJoonsoo Kim err = migrate_pages(&pagelist, alloc_migration_target, NULL, 10815ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1082cf608ac1SMinchan Kim if (err) 1083e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1084cf608ac1SMinchan Kim } 108595a402c3SChristoph Lameter 10867e2ab150SChristoph Lameter return err; 10877e2ab150SChristoph Lameter } 10887e2ab150SChristoph Lameter 10897e2ab150SChristoph Lameter /* 10907e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10917e2ab150SChristoph Lameter * layout as much as possible. 109239743889SChristoph Lameter * 109339743889SChristoph Lameter * Returns the number of page that could not be moved. 109439743889SChristoph Lameter */ 10950ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10960ce72d4fSAndrew Morton const nodemask_t *to, int flags) 109739743889SChristoph Lameter { 10987e2ab150SChristoph Lameter int busy = 0; 1099f555befdSJan Stancek int err = 0; 11007e2ab150SChristoph Lameter nodemask_t tmp; 110139743889SChristoph Lameter 1102361a2a22SMinchan Kim lru_cache_disable(); 11030aedadf9SChristoph Lameter 1104d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1105d4984711SChristoph Lameter 11067e2ab150SChristoph Lameter /* 11077e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11087e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11097e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11107e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11117e2ab150SChristoph Lameter * 11127e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11137e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11147e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11157e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11167e2ab150SChristoph Lameter * 11177e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11187e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11197e2ab150SChristoph Lameter * (nothing left to migrate). 11207e2ab150SChristoph Lameter * 11217e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11227e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11237e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11247e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11257e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11267e2ab150SChristoph Lameter * 11277e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11287e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11297e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11307e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1131ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11327e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11337e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11347e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11357e2ab150SChristoph Lameter */ 11367e2ab150SChristoph Lameter 11370ce72d4fSAndrew Morton tmp = *from; 11387e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11397e2ab150SChristoph Lameter int s, d; 1140b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11417e2ab150SChristoph Lameter int dest = 0; 11427e2ab150SChristoph Lameter 11437e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11444a5b18ccSLarry Woodman 11454a5b18ccSLarry Woodman /* 11464a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11474a5b18ccSLarry Woodman * node relationship of the pages established between 11484a5b18ccSLarry Woodman * threads and memory areas. 11494a5b18ccSLarry Woodman * 11504a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11514a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11524a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11534a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11544a5b18ccSLarry Woodman * mask. 11554a5b18ccSLarry Woodman * 11564a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11574a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11584a5b18ccSLarry Woodman */ 11594a5b18ccSLarry Woodman 11600ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11610ce72d4fSAndrew Morton (node_isset(s, *to))) 11624a5b18ccSLarry Woodman continue; 11634a5b18ccSLarry Woodman 11640ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11657e2ab150SChristoph Lameter if (s == d) 11667e2ab150SChristoph Lameter continue; 11677e2ab150SChristoph Lameter 11687e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11697e2ab150SChristoph Lameter dest = d; 11707e2ab150SChristoph Lameter 11717e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11727e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11737e2ab150SChristoph Lameter break; 11747e2ab150SChristoph Lameter } 1175b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11767e2ab150SChristoph Lameter break; 11777e2ab150SChristoph Lameter 11787e2ab150SChristoph Lameter node_clear(source, tmp); 11797e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11807e2ab150SChristoph Lameter if (err > 0) 11817e2ab150SChristoph Lameter busy += err; 11827e2ab150SChristoph Lameter if (err < 0) 11837e2ab150SChristoph Lameter break; 118439743889SChristoph Lameter } 1185d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1186d479960eSMinchan Kim 1187361a2a22SMinchan Kim lru_cache_enable(); 11887e2ab150SChristoph Lameter if (err < 0) 11897e2ab150SChristoph Lameter return err; 11907e2ab150SChristoph Lameter return busy; 1191b20a3503SChristoph Lameter 119239743889SChristoph Lameter } 119339743889SChristoph Lameter 11943ad33b24SLee Schermerhorn /* 11953ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1196d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 11973ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11983ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11993ad33b24SLee Schermerhorn * is in virtual address order. 12003ad33b24SLee Schermerhorn */ 12014e096ae1SMatthew Wilcox (Oracle) static struct folio *new_folio(struct folio *src, unsigned long start) 120295a402c3SChristoph Lameter { 1203d05f0cdcSHugh Dickins struct vm_area_struct *vma; 12043f649ab7SKees Cook unsigned long address; 120566850be5SLiam R. Howlett VMA_ITERATOR(vmi, current->mm, start); 1206ec4858e0SMatthew Wilcox (Oracle) gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; 120795a402c3SChristoph Lameter 120866850be5SLiam R. Howlett for_each_vma(vmi, vma) { 12094e096ae1SMatthew Wilcox (Oracle) address = page_address_in_vma(&src->page, vma); 12103ad33b24SLee Schermerhorn if (address != -EFAULT) 12113ad33b24SLee Schermerhorn break; 12123ad33b24SLee Schermerhorn } 12133ad33b24SLee Schermerhorn 1214d0ce0e47SSidhartha Kumar if (folio_test_hugetlb(src)) { 12154e096ae1SMatthew Wilcox (Oracle) return alloc_hugetlb_folio_vma(folio_hstate(src), 1216389c8178SMichal Hocko vma, address); 1217d0ce0e47SSidhartha Kumar } 1218c8633798SNaoya Horiguchi 1219ec4858e0SMatthew Wilcox (Oracle) if (folio_test_large(src)) 1220ec4858e0SMatthew Wilcox (Oracle) gfp = GFP_TRANSHUGE; 1221ec4858e0SMatthew Wilcox (Oracle) 122211c731e8SWanpeng Li /* 1223ec4858e0SMatthew Wilcox (Oracle) * if !vma, vma_alloc_folio() will use task or system default policy 122411c731e8SWanpeng Li */ 12254e096ae1SMatthew Wilcox (Oracle) return vma_alloc_folio(gfp, folio_order(src), vma, address, 1226ec4858e0SMatthew Wilcox (Oracle) folio_test_large(src)); 122795a402c3SChristoph Lameter } 1228b20a3503SChristoph Lameter #else 1229b20a3503SChristoph Lameter 12304a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, 1231b20a3503SChristoph Lameter unsigned long flags) 1232b20a3503SChristoph Lameter { 1233a53190a4SYang Shi return -EIO; 1234b20a3503SChristoph Lameter } 1235b20a3503SChristoph Lameter 12360ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12370ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1238b20a3503SChristoph Lameter { 1239b20a3503SChristoph Lameter return -ENOSYS; 1240b20a3503SChristoph Lameter } 124195a402c3SChristoph Lameter 12424e096ae1SMatthew Wilcox (Oracle) static struct folio *new_folio(struct folio *src, unsigned long start) 124395a402c3SChristoph Lameter { 124495a402c3SChristoph Lameter return NULL; 124595a402c3SChristoph Lameter } 1246b20a3503SChristoph Lameter #endif 1247b20a3503SChristoph Lameter 1248dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1249028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1250028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12516ce3c4c0SChristoph Lameter { 12526ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 1253f4e9e0e6SLiam R. Howlett struct vm_area_struct *vma, *prev; 1254f4e9e0e6SLiam R. Howlett struct vma_iterator vmi; 12556ce3c4c0SChristoph Lameter struct mempolicy *new; 12566ce3c4c0SChristoph Lameter unsigned long end; 12576ce3c4c0SChristoph Lameter int err; 1258d8835445SYang Shi int ret; 12596ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12606ce3c4c0SChristoph Lameter 1261b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12626ce3c4c0SChristoph Lameter return -EINVAL; 126374c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12646ce3c4c0SChristoph Lameter return -EPERM; 12656ce3c4c0SChristoph Lameter 12666ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12676ce3c4c0SChristoph Lameter return -EINVAL; 12686ce3c4c0SChristoph Lameter 12696ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12706ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12716ce3c4c0SChristoph Lameter 1272aaa31e05Sze zuo len = PAGE_ALIGN(len); 12736ce3c4c0SChristoph Lameter end = start + len; 12746ce3c4c0SChristoph Lameter 12756ce3c4c0SChristoph Lameter if (end < start) 12766ce3c4c0SChristoph Lameter return -EINVAL; 12776ce3c4c0SChristoph Lameter if (end == start) 12786ce3c4c0SChristoph Lameter return 0; 12796ce3c4c0SChristoph Lameter 1280028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12816ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12826ce3c4c0SChristoph Lameter return PTR_ERR(new); 12836ce3c4c0SChristoph Lameter 1284b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1285b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1286b24f53a0SLee Schermerhorn 12876ce3c4c0SChristoph Lameter /* 12886ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12896ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12906ce3c4c0SChristoph Lameter */ 12916ce3c4c0SChristoph Lameter if (!new) 12926ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12936ce3c4c0SChristoph Lameter 1294028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1295028fec41SDavid Rientjes start, start + len, mode, mode_flags, 129600ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12976ce3c4c0SChristoph Lameter 12980aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12990aedadf9SChristoph Lameter 1300361a2a22SMinchan Kim lru_cache_disable(); 13010aedadf9SChristoph Lameter } 13024bfc4495SKAMEZAWA Hiroyuki { 13034bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 13044bfc4495SKAMEZAWA Hiroyuki if (scratch) { 1305d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 13064bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 13074bfc4495SKAMEZAWA Hiroyuki if (err) 1308d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 13094bfc4495SKAMEZAWA Hiroyuki } else 13104bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13114bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13124bfc4495SKAMEZAWA Hiroyuki } 1313b05ca738SKOSAKI Motohiro if (err) 1314b05ca738SKOSAKI Motohiro goto mpol_out; 1315b05ca738SKOSAKI Motohiro 1316d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13176ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1318d8835445SYang Shi 1319d8835445SYang Shi if (ret < 0) { 1320a85dfc30SYang Shi err = ret; 1321d8835445SYang Shi goto up_out; 1322d8835445SYang Shi } 1323d8835445SYang Shi 1324f4e9e0e6SLiam R. Howlett vma_iter_init(&vmi, mm, start); 1325f4e9e0e6SLiam R. Howlett prev = vma_prev(&vmi); 1326f4e9e0e6SLiam R. Howlett for_each_vma_range(vmi, vma, end) { 1327f4e9e0e6SLiam R. Howlett err = mbind_range(&vmi, vma, &prev, start, end, new); 1328f4e9e0e6SLiam R. Howlett if (err) 1329f4e9e0e6SLiam R. Howlett break; 1330f4e9e0e6SLiam R. Howlett } 13317e2ab150SChristoph Lameter 1332b24f53a0SLee Schermerhorn if (!err) { 1333b24f53a0SLee Schermerhorn int nr_failed = 0; 1334b24f53a0SLee Schermerhorn 1335cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1336b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 13374e096ae1SMatthew Wilcox (Oracle) nr_failed = migrate_pages(&pagelist, new_folio, NULL, 13385ac95884SYang Shi start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1339cf608ac1SMinchan Kim if (nr_failed) 134074060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1341cf608ac1SMinchan Kim } 13426ce3c4c0SChristoph Lameter 1343d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13446ce3c4c0SChristoph Lameter err = -EIO; 1345a85dfc30SYang Shi } else { 1346d8835445SYang Shi up_out: 1347a85dfc30SYang Shi if (!list_empty(&pagelist)) 1348a85dfc30SYang Shi putback_movable_pages(&pagelist); 1349a85dfc30SYang Shi } 1350a85dfc30SYang Shi 1351d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1352b05ca738SKOSAKI Motohiro mpol_out: 1353f0be3d32SLee Schermerhorn mpol_put(new); 1354d479960eSMinchan Kim if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1355361a2a22SMinchan Kim lru_cache_enable(); 13566ce3c4c0SChristoph Lameter return err; 13576ce3c4c0SChristoph Lameter } 13586ce3c4c0SChristoph Lameter 135939743889SChristoph Lameter /* 13608bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13618bccd85fSChristoph Lameter */ 1362e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, 1363e130242dSArnd Bergmann unsigned long maxnode) 1364e130242dSArnd Bergmann { 1365e130242dSArnd Bergmann unsigned long nlongs = BITS_TO_LONGS(maxnode); 1366e130242dSArnd Bergmann int ret; 1367e130242dSArnd Bergmann 1368e130242dSArnd Bergmann if (in_compat_syscall()) 1369e130242dSArnd Bergmann ret = compat_get_bitmap(mask, 1370e130242dSArnd Bergmann (const compat_ulong_t __user *)nmask, 1371e130242dSArnd Bergmann maxnode); 1372e130242dSArnd Bergmann else 1373e130242dSArnd Bergmann ret = copy_from_user(mask, nmask, 1374e130242dSArnd Bergmann nlongs * sizeof(unsigned long)); 1375e130242dSArnd Bergmann 1376e130242dSArnd Bergmann if (ret) 1377e130242dSArnd Bergmann return -EFAULT; 1378e130242dSArnd Bergmann 1379e130242dSArnd Bergmann if (maxnode % BITS_PER_LONG) 1380e130242dSArnd Bergmann mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; 1381e130242dSArnd Bergmann 1382e130242dSArnd Bergmann return 0; 1383e130242dSArnd Bergmann } 13848bccd85fSChristoph Lameter 13858bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 138639743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13878bccd85fSChristoph Lameter unsigned long maxnode) 13888bccd85fSChristoph Lameter { 13898bccd85fSChristoph Lameter --maxnode; 13908bccd85fSChristoph Lameter nodes_clear(*nodes); 13918bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13928bccd85fSChristoph Lameter return 0; 1393a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1394636f13c1SChris Wright return -EINVAL; 13958bccd85fSChristoph Lameter 139656521e7aSYisheng Xie /* 139756521e7aSYisheng Xie * When the user specified more nodes than supported just check 1398e130242dSArnd Bergmann * if the non supported part is all zero, one word at a time, 1399e130242dSArnd Bergmann * starting at the end. 140056521e7aSYisheng Xie */ 1401e130242dSArnd Bergmann while (maxnode > MAX_NUMNODES) { 1402e130242dSArnd Bergmann unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG); 1403e130242dSArnd Bergmann unsigned long t; 14048bccd85fSChristoph Lameter 1405000eca5dSTianyu Li if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) 140656521e7aSYisheng Xie return -EFAULT; 1407e130242dSArnd Bergmann 1408e130242dSArnd Bergmann if (maxnode - bits >= MAX_NUMNODES) { 1409e130242dSArnd Bergmann maxnode -= bits; 1410e130242dSArnd Bergmann } else { 1411e130242dSArnd Bergmann maxnode = MAX_NUMNODES; 1412e130242dSArnd Bergmann t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 1413e130242dSArnd Bergmann } 1414e130242dSArnd Bergmann if (t) 141556521e7aSYisheng Xie return -EINVAL; 141656521e7aSYisheng Xie } 141756521e7aSYisheng Xie 1418e130242dSArnd Bergmann return get_bitmap(nodes_addr(*nodes), nmask, maxnode); 14198bccd85fSChristoph Lameter } 14208bccd85fSChristoph Lameter 14218bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14228bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14238bccd85fSChristoph Lameter nodemask_t *nodes) 14248bccd85fSChristoph Lameter { 14258bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1426050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 1427e130242dSArnd Bergmann bool compat = in_compat_syscall(); 1428e130242dSArnd Bergmann 1429e130242dSArnd Bergmann if (compat) 1430e130242dSArnd Bergmann nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); 14318bccd85fSChristoph Lameter 14328bccd85fSChristoph Lameter if (copy > nbytes) { 14338bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14348bccd85fSChristoph Lameter return -EINVAL; 14358bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14368bccd85fSChristoph Lameter return -EFAULT; 14378bccd85fSChristoph Lameter copy = nbytes; 1438e130242dSArnd Bergmann maxnode = nr_node_ids; 14398bccd85fSChristoph Lameter } 1440e130242dSArnd Bergmann 1441e130242dSArnd Bergmann if (compat) 1442e130242dSArnd Bergmann return compat_put_bitmap((compat_ulong_t __user *)mask, 1443e130242dSArnd Bergmann nodes_addr(*nodes), maxnode); 1444e130242dSArnd Bergmann 14458bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14468bccd85fSChristoph Lameter } 14478bccd85fSChristoph Lameter 144895837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 144995837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 145095837924SFeng Tang { 145195837924SFeng Tang *flags = *mode & MPOL_MODE_FLAGS; 145295837924SFeng Tang *mode &= ~MPOL_MODE_FLAGS; 1453b27abaccSDave Hansen 1454a38a59fdSBen Widawsky if ((unsigned int)(*mode) >= MPOL_MAX) 145595837924SFeng Tang return -EINVAL; 145695837924SFeng Tang if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 145795837924SFeng Tang return -EINVAL; 14586d2aec9eSEric Dumazet if (*flags & MPOL_F_NUMA_BALANCING) { 14596d2aec9eSEric Dumazet if (*mode != MPOL_BIND) 14606d2aec9eSEric Dumazet return -EINVAL; 14616d2aec9eSEric Dumazet *flags |= (MPOL_F_MOF | MPOL_F_MORON); 14626d2aec9eSEric Dumazet } 146395837924SFeng Tang return 0; 146495837924SFeng Tang } 146595837924SFeng Tang 1466e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1467e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1468e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14698bccd85fSChristoph Lameter { 1470028fec41SDavid Rientjes unsigned short mode_flags; 147195837924SFeng Tang nodemask_t nodes; 147295837924SFeng Tang int lmode = mode; 147395837924SFeng Tang int err; 14748bccd85fSChristoph Lameter 1475057d3389SAndrey Konovalov start = untagged_addr(start); 147695837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 147795837924SFeng Tang if (err) 147895837924SFeng Tang return err; 147995837924SFeng Tang 14808bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14818bccd85fSChristoph Lameter if (err) 14828bccd85fSChristoph Lameter return err; 148395837924SFeng Tang 148495837924SFeng Tang return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 14858bccd85fSChristoph Lameter } 14868bccd85fSChristoph Lameter 1487c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len, 1488c6018b4bSAneesh Kumar K.V unsigned long, home_node, unsigned long, flags) 1489c6018b4bSAneesh Kumar K.V { 1490c6018b4bSAneesh Kumar K.V struct mm_struct *mm = current->mm; 1491f4e9e0e6SLiam R. Howlett struct vm_area_struct *vma, *prev; 1492e976936cSMichal Hocko struct mempolicy *new, *old; 1493c6018b4bSAneesh Kumar K.V unsigned long end; 1494c6018b4bSAneesh Kumar K.V int err = -ENOENT; 149566850be5SLiam R. Howlett VMA_ITERATOR(vmi, mm, start); 1496c6018b4bSAneesh Kumar K.V 1497c6018b4bSAneesh Kumar K.V start = untagged_addr(start); 1498c6018b4bSAneesh Kumar K.V if (start & ~PAGE_MASK) 1499c6018b4bSAneesh Kumar K.V return -EINVAL; 1500c6018b4bSAneesh Kumar K.V /* 1501c6018b4bSAneesh Kumar K.V * flags is used for future extension if any. 1502c6018b4bSAneesh Kumar K.V */ 1503c6018b4bSAneesh Kumar K.V if (flags != 0) 1504c6018b4bSAneesh Kumar K.V return -EINVAL; 1505c6018b4bSAneesh Kumar K.V 1506c6018b4bSAneesh Kumar K.V /* 1507c6018b4bSAneesh Kumar K.V * Check home_node is online to avoid accessing uninitialized 1508c6018b4bSAneesh Kumar K.V * NODE_DATA. 1509c6018b4bSAneesh Kumar K.V */ 1510c6018b4bSAneesh Kumar K.V if (home_node >= MAX_NUMNODES || !node_online(home_node)) 1511c6018b4bSAneesh Kumar K.V return -EINVAL; 1512c6018b4bSAneesh Kumar K.V 1513aaa31e05Sze zuo len = PAGE_ALIGN(len); 1514c6018b4bSAneesh Kumar K.V end = start + len; 1515c6018b4bSAneesh Kumar K.V 1516c6018b4bSAneesh Kumar K.V if (end < start) 1517c6018b4bSAneesh Kumar K.V return -EINVAL; 1518c6018b4bSAneesh Kumar K.V if (end == start) 1519c6018b4bSAneesh Kumar K.V return 0; 1520c6018b4bSAneesh Kumar K.V mmap_write_lock(mm); 1521f4e9e0e6SLiam R. Howlett prev = vma_prev(&vmi); 152266850be5SLiam R. Howlett for_each_vma_range(vmi, vma, end) { 1523c6018b4bSAneesh Kumar K.V /* 1524c6018b4bSAneesh Kumar K.V * If any vma in the range got policy other than MPOL_BIND 1525c6018b4bSAneesh Kumar K.V * or MPOL_PREFERRED_MANY we return error. We don't reset 1526c6018b4bSAneesh Kumar K.V * the home node for vmas we already updated before. 1527c6018b4bSAneesh Kumar K.V */ 1528e976936cSMichal Hocko old = vma_policy(vma); 1529e976936cSMichal Hocko if (!old) 1530e976936cSMichal Hocko continue; 1531e976936cSMichal Hocko if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) { 1532c6018b4bSAneesh Kumar K.V err = -EOPNOTSUPP; 1533c6018b4bSAneesh Kumar K.V break; 1534c6018b4bSAneesh Kumar K.V } 1535e976936cSMichal Hocko new = mpol_dup(old); 1536e976936cSMichal Hocko if (IS_ERR(new)) { 1537e976936cSMichal Hocko err = PTR_ERR(new); 1538e976936cSMichal Hocko break; 1539e976936cSMichal Hocko } 1540c6018b4bSAneesh Kumar K.V 1541c6018b4bSAneesh Kumar K.V new->home_node = home_node; 1542f4e9e0e6SLiam R. Howlett err = mbind_range(&vmi, vma, &prev, start, end, new); 1543c6018b4bSAneesh Kumar K.V mpol_put(new); 1544c6018b4bSAneesh Kumar K.V if (err) 1545c6018b4bSAneesh Kumar K.V break; 1546c6018b4bSAneesh Kumar K.V } 1547c6018b4bSAneesh Kumar K.V mmap_write_unlock(mm); 1548c6018b4bSAneesh Kumar K.V return err; 1549c6018b4bSAneesh Kumar K.V } 1550c6018b4bSAneesh Kumar K.V 1551e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1552e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1553e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1554e7dc9ad6SDominik Brodowski { 1555e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1556e7dc9ad6SDominik Brodowski } 1557e7dc9ad6SDominik Brodowski 15588bccd85fSChristoph Lameter /* Set the process memory policy */ 1559af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1560af03c4acSDominik Brodowski unsigned long maxnode) 15618bccd85fSChristoph Lameter { 156295837924SFeng Tang unsigned short mode_flags; 15638bccd85fSChristoph Lameter nodemask_t nodes; 156495837924SFeng Tang int lmode = mode; 156595837924SFeng Tang int err; 15668bccd85fSChristoph Lameter 156795837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 156895837924SFeng Tang if (err) 156995837924SFeng Tang return err; 157095837924SFeng Tang 15718bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 15728bccd85fSChristoph Lameter if (err) 15738bccd85fSChristoph Lameter return err; 157495837924SFeng Tang 157595837924SFeng Tang return do_set_mempolicy(lmode, mode_flags, &nodes); 15768bccd85fSChristoph Lameter } 15778bccd85fSChristoph Lameter 1578af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1579af03c4acSDominik Brodowski unsigned long, maxnode) 1580af03c4acSDominik Brodowski { 1581af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1582af03c4acSDominik Brodowski } 1583af03c4acSDominik Brodowski 1584b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1585b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1586b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 158739743889SChristoph Lameter { 1588596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 158939743889SChristoph Lameter struct task_struct *task; 159039743889SChristoph Lameter nodemask_t task_nodes; 159139743889SChristoph Lameter int err; 1592596d7cfaSKOSAKI Motohiro nodemask_t *old; 1593596d7cfaSKOSAKI Motohiro nodemask_t *new; 1594596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 159539743889SChristoph Lameter 1596596d7cfaSKOSAKI Motohiro if (!scratch) 1597596d7cfaSKOSAKI Motohiro return -ENOMEM; 159839743889SChristoph Lameter 1599596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1600596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1601596d7cfaSKOSAKI Motohiro 1602596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 160339743889SChristoph Lameter if (err) 1604596d7cfaSKOSAKI Motohiro goto out; 1605596d7cfaSKOSAKI Motohiro 1606596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1607596d7cfaSKOSAKI Motohiro if (err) 1608596d7cfaSKOSAKI Motohiro goto out; 160939743889SChristoph Lameter 161039743889SChristoph Lameter /* Find the mm_struct */ 161155cfaa3cSZeng Zhaoming rcu_read_lock(); 1612228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 161339743889SChristoph Lameter if (!task) { 161455cfaa3cSZeng Zhaoming rcu_read_unlock(); 1615596d7cfaSKOSAKI Motohiro err = -ESRCH; 1616596d7cfaSKOSAKI Motohiro goto out; 161739743889SChristoph Lameter } 16183268c63eSChristoph Lameter get_task_struct(task); 161939743889SChristoph Lameter 1620596d7cfaSKOSAKI Motohiro err = -EINVAL; 162139743889SChristoph Lameter 162239743889SChristoph Lameter /* 162331367466SOtto Ebeling * Check if this process has the right to modify the specified process. 162431367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 162539743889SChristoph Lameter */ 162631367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1627c69e8d9cSDavid Howells rcu_read_unlock(); 162839743889SChristoph Lameter err = -EPERM; 16293268c63eSChristoph Lameter goto out_put; 163039743889SChristoph Lameter } 1631c69e8d9cSDavid Howells rcu_read_unlock(); 163239743889SChristoph Lameter 163339743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 163439743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1635596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 163639743889SChristoph Lameter err = -EPERM; 16373268c63eSChristoph Lameter goto out_put; 163839743889SChristoph Lameter } 163939743889SChristoph Lameter 16400486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 16410486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 16420486a38bSYisheng Xie if (nodes_empty(*new)) 16433268c63eSChristoph Lameter goto out_put; 16440486a38bSYisheng Xie 164586c3a764SDavid Quigley err = security_task_movememory(task); 164686c3a764SDavid Quigley if (err) 16473268c63eSChristoph Lameter goto out_put; 164886c3a764SDavid Quigley 16493268c63eSChristoph Lameter mm = get_task_mm(task); 16503268c63eSChristoph Lameter put_task_struct(task); 1651f2a9ef88SSasha Levin 1652f2a9ef88SSasha Levin if (!mm) { 1653f2a9ef88SSasha Levin err = -EINVAL; 1654f2a9ef88SSasha Levin goto out; 1655f2a9ef88SSasha Levin } 1656f2a9ef88SSasha Levin 1657596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 165874c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 16593268c63eSChristoph Lameter 166039743889SChristoph Lameter mmput(mm); 16613268c63eSChristoph Lameter out: 1662596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1663596d7cfaSKOSAKI Motohiro 166439743889SChristoph Lameter return err; 16653268c63eSChristoph Lameter 16663268c63eSChristoph Lameter out_put: 16673268c63eSChristoph Lameter put_task_struct(task); 16683268c63eSChristoph Lameter goto out; 16693268c63eSChristoph Lameter 167039743889SChristoph Lameter } 167139743889SChristoph Lameter 1672b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1673b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1674b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1675b6e9b0baSDominik Brodowski { 1676b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1677b6e9b0baSDominik Brodowski } 1678b6e9b0baSDominik Brodowski 167939743889SChristoph Lameter 16808bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1681af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1682af03c4acSDominik Brodowski unsigned long __user *nmask, 1683af03c4acSDominik Brodowski unsigned long maxnode, 1684af03c4acSDominik Brodowski unsigned long addr, 1685af03c4acSDominik Brodowski unsigned long flags) 16868bccd85fSChristoph Lameter { 1687dbcb0f19SAdrian Bunk int err; 16883f649ab7SKees Cook int pval; 16898bccd85fSChristoph Lameter nodemask_t nodes; 16908bccd85fSChristoph Lameter 1691050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 16928bccd85fSChristoph Lameter return -EINVAL; 16938bccd85fSChristoph Lameter 16944605f057SWenchao Hao addr = untagged_addr(addr); 16954605f057SWenchao Hao 16968bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 16978bccd85fSChristoph Lameter 16988bccd85fSChristoph Lameter if (err) 16998bccd85fSChristoph Lameter return err; 17008bccd85fSChristoph Lameter 17018bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 17028bccd85fSChristoph Lameter return -EFAULT; 17038bccd85fSChristoph Lameter 17048bccd85fSChristoph Lameter if (nmask) 17058bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 17068bccd85fSChristoph Lameter 17078bccd85fSChristoph Lameter return err; 17088bccd85fSChristoph Lameter } 17098bccd85fSChristoph Lameter 1710af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1711af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1712af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1713af03c4acSDominik Brodowski { 1714af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1715af03c4acSDominik Brodowski } 1716af03c4acSDominik Brodowski 171720ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 171820ca87f2SLi Xinhai { 171920ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 172020ca87f2SLi Xinhai return false; 172120ca87f2SLi Xinhai 172220ca87f2SLi Xinhai /* 172320ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 172420ca87f2SLi Xinhai * incurring periodic faults. 172520ca87f2SLi Xinhai */ 172620ca87f2SLi Xinhai if (vma_is_dax(vma)) 172720ca87f2SLi Xinhai return false; 172820ca87f2SLi Xinhai 172920ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 173020ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 173120ca87f2SLi Xinhai return false; 173220ca87f2SLi Xinhai 173320ca87f2SLi Xinhai /* 173420ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 173520ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 173620ca87f2SLi Xinhai * possible. 173720ca87f2SLi Xinhai */ 173820ca87f2SLi Xinhai if (vma->vm_file && 173920ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 174020ca87f2SLi Xinhai < policy_zone) 174120ca87f2SLi Xinhai return false; 174220ca87f2SLi Xinhai return true; 174320ca87f2SLi Xinhai } 174420ca87f2SLi Xinhai 174574d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 174674d2c3a0SOleg Nesterov unsigned long addr) 17471da177e4SLinus Torvalds { 17488d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17491da177e4SLinus Torvalds 17501da177e4SLinus Torvalds if (vma) { 1751480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17528d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 175300442ad0SMel Gorman } else if (vma->vm_policy) { 17541da177e4SLinus Torvalds pol = vma->vm_policy; 175500442ad0SMel Gorman 175600442ad0SMel Gorman /* 175700442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 175800442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 175900442ad0SMel Gorman * count on these policies which will be dropped by 176000442ad0SMel Gorman * mpol_cond_put() later 176100442ad0SMel Gorman */ 176200442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 176300442ad0SMel Gorman mpol_get(pol); 176400442ad0SMel Gorman } 17651da177e4SLinus Torvalds } 1766f15ca78eSOleg Nesterov 176774d2c3a0SOleg Nesterov return pol; 176874d2c3a0SOleg Nesterov } 176974d2c3a0SOleg Nesterov 177074d2c3a0SOleg Nesterov /* 1771dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 177274d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 177374d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 177474d2c3a0SOleg Nesterov * 177574d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1776dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 177774d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 177874d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 177974d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 178074d2c3a0SOleg Nesterov * extra reference for shared policies. 178174d2c3a0SOleg Nesterov */ 1782ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1783dd6eecb9SOleg Nesterov unsigned long addr) 178474d2c3a0SOleg Nesterov { 178574d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 178674d2c3a0SOleg Nesterov 17878d90274bSOleg Nesterov if (!pol) 1788dd6eecb9SOleg Nesterov pol = get_task_policy(current); 17898d90274bSOleg Nesterov 17901da177e4SLinus Torvalds return pol; 17911da177e4SLinus Torvalds } 17921da177e4SLinus Torvalds 17936b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1794fc314724SMel Gorman { 17956b6482bbSOleg Nesterov struct mempolicy *pol; 1796f15ca78eSOleg Nesterov 1797fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1798fc314724SMel Gorman bool ret = false; 1799fc314724SMel Gorman 1800fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1801fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1802fc314724SMel Gorman ret = true; 1803fc314724SMel Gorman mpol_cond_put(pol); 1804fc314724SMel Gorman 1805fc314724SMel Gorman return ret; 18068d90274bSOleg Nesterov } 18078d90274bSOleg Nesterov 1808fc314724SMel Gorman pol = vma->vm_policy; 18098d90274bSOleg Nesterov if (!pol) 18106b6482bbSOleg Nesterov pol = get_task_policy(current); 1811fc314724SMel Gorman 1812fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1813fc314724SMel Gorman } 1814fc314724SMel Gorman 1815d2226ebdSFeng Tang bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1816d3eb1570SLai Jiangshan { 1817d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1818d3eb1570SLai Jiangshan 1819d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1820d3eb1570SLai Jiangshan 1821d3eb1570SLai Jiangshan /* 1822269fbe72SBen Widawsky * if policy->nodes has movable memory only, 1823d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1824d3eb1570SLai Jiangshan * 1825269fbe72SBen Widawsky * policy->nodes is intersect with node_states[N_MEMORY]. 1826f0953a1bSIngo Molnar * so if the following test fails, it implies 1827269fbe72SBen Widawsky * policy->nodes has movable memory only. 1828d3eb1570SLai Jiangshan */ 1829269fbe72SBen Widawsky if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1830d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1831d3eb1570SLai Jiangshan 1832d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1833d3eb1570SLai Jiangshan } 1834d3eb1570SLai Jiangshan 183552cd3b07SLee Schermerhorn /* 183652cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 183752cd3b07SLee Schermerhorn * page allocation 183852cd3b07SLee Schermerhorn */ 18398ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 184019770b32SMel Gorman { 1841b27abaccSDave Hansen int mode = policy->mode; 1842b27abaccSDave Hansen 184319770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1844b27abaccSDave Hansen if (unlikely(mode == MPOL_BIND) && 1845d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 1846269fbe72SBen Widawsky cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1847269fbe72SBen Widawsky return &policy->nodes; 184819770b32SMel Gorman 1849b27abaccSDave Hansen if (mode == MPOL_PREFERRED_MANY) 1850b27abaccSDave Hansen return &policy->nodes; 1851b27abaccSDave Hansen 185219770b32SMel Gorman return NULL; 185319770b32SMel Gorman } 185419770b32SMel Gorman 1855b27abaccSDave Hansen /* 1856b27abaccSDave Hansen * Return the preferred node id for 'prefer' mempolicy, and return 1857b27abaccSDave Hansen * the given id for all other policies. 1858b27abaccSDave Hansen * 1859b27abaccSDave Hansen * policy_node() is always coupled with policy_nodemask(), which 1860b27abaccSDave Hansen * secures the nodemask limit for 'bind' and 'prefer-many' policy. 1861b27abaccSDave Hansen */ 1862f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 18631da177e4SLinus Torvalds { 18647858d7bcSFeng Tang if (policy->mode == MPOL_PREFERRED) { 1865269fbe72SBen Widawsky nd = first_node(policy->nodes); 18667858d7bcSFeng Tang } else { 186719770b32SMel Gorman /* 18686d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18696d840958SMichal Hocko * because we might easily break the expectation to stay on the 18706d840958SMichal Hocko * requested node and not break the policy. 187119770b32SMel Gorman */ 18726d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 18731da177e4SLinus Torvalds } 18746d840958SMichal Hocko 1875c6018b4bSAneesh Kumar K.V if ((policy->mode == MPOL_BIND || 1876c6018b4bSAneesh Kumar K.V policy->mode == MPOL_PREFERRED_MANY) && 1877c6018b4bSAneesh Kumar K.V policy->home_node != NUMA_NO_NODE) 1878c6018b4bSAneesh Kumar K.V return policy->home_node; 1879c6018b4bSAneesh Kumar K.V 188004ec6264SVlastimil Babka return nd; 18811da177e4SLinus Torvalds } 18821da177e4SLinus Torvalds 18831da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 18841da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 18851da177e4SLinus Torvalds { 188645816682SVlastimil Babka unsigned next; 18871da177e4SLinus Torvalds struct task_struct *me = current; 18881da177e4SLinus Torvalds 1889269fbe72SBen Widawsky next = next_node_in(me->il_prev, policy->nodes); 1890f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 189145816682SVlastimil Babka me->il_prev = next; 189245816682SVlastimil Babka return next; 18931da177e4SLinus Torvalds } 18941da177e4SLinus Torvalds 1895dc85da15SChristoph Lameter /* 1896dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1897dc85da15SChristoph Lameter * next slab entry. 1898dc85da15SChristoph Lameter */ 18992a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1900dc85da15SChristoph Lameter { 1901e7b691b0SAndi Kleen struct mempolicy *policy; 19022a389610SDavid Rientjes int node = numa_mem_id(); 1903e7b691b0SAndi Kleen 190438b031ddSVasily Averin if (!in_task()) 19052a389610SDavid Rientjes return node; 1906e7b691b0SAndi Kleen 1907e7b691b0SAndi Kleen policy = current->mempolicy; 19087858d7bcSFeng Tang if (!policy) 19092a389610SDavid Rientjes return node; 1910765c4507SChristoph Lameter 1911bea904d5SLee Schermerhorn switch (policy->mode) { 1912bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1913269fbe72SBen Widawsky return first_node(policy->nodes); 1914bea904d5SLee Schermerhorn 1915dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1916dc85da15SChristoph Lameter return interleave_nodes(policy); 1917dc85da15SChristoph Lameter 1918b27abaccSDave Hansen case MPOL_BIND: 1919b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 1920b27abaccSDave Hansen { 1921c33d6c06SMel Gorman struct zoneref *z; 1922c33d6c06SMel Gorman 1923dc85da15SChristoph Lameter /* 1924dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1925dc85da15SChristoph Lameter * first node. 1926dc85da15SChristoph Lameter */ 192719770b32SMel Gorman struct zonelist *zonelist; 192819770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1929c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1930c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1931269fbe72SBen Widawsky &policy->nodes); 1932c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1933dd1a239fSMel Gorman } 19347858d7bcSFeng Tang case MPOL_LOCAL: 19357858d7bcSFeng Tang return node; 1936dc85da15SChristoph Lameter 1937dc85da15SChristoph Lameter default: 1938bea904d5SLee Schermerhorn BUG(); 1939dc85da15SChristoph Lameter } 1940dc85da15SChristoph Lameter } 1941dc85da15SChristoph Lameter 1942fee83b3aSAndrew Morton /* 1943fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1944269fbe72SBen Widawsky * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1945fee83b3aSAndrew Morton * number of present nodes. 1946fee83b3aSAndrew Morton */ 194798c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19481da177e4SLinus Torvalds { 1949276aeee1Syanghui nodemask_t nodemask = pol->nodes; 1950276aeee1Syanghui unsigned int target, nnodes; 1951fee83b3aSAndrew Morton int i; 1952fee83b3aSAndrew Morton int nid; 1953276aeee1Syanghui /* 1954276aeee1Syanghui * The barrier will stabilize the nodemask in a register or on 1955276aeee1Syanghui * the stack so that it will stop changing under the code. 1956276aeee1Syanghui * 1957276aeee1Syanghui * Between first_node() and next_node(), pol->nodes could be changed 1958276aeee1Syanghui * by other threads. So we put pol->nodes in a local stack. 1959276aeee1Syanghui */ 1960276aeee1Syanghui barrier(); 19611da177e4SLinus Torvalds 1962276aeee1Syanghui nnodes = nodes_weight(nodemask); 1963f5b087b5SDavid Rientjes if (!nnodes) 1964f5b087b5SDavid Rientjes return numa_node_id(); 1965fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1966276aeee1Syanghui nid = first_node(nodemask); 1967fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1968276aeee1Syanghui nid = next_node(nid, nodemask); 19691da177e4SLinus Torvalds return nid; 19701da177e4SLinus Torvalds } 19711da177e4SLinus Torvalds 19725da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19735da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19745da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19755da7ca86SChristoph Lameter { 19765da7ca86SChristoph Lameter if (vma) { 19775da7ca86SChristoph Lameter unsigned long off; 19785da7ca86SChristoph Lameter 19793b98b087SNishanth Aravamudan /* 19803b98b087SNishanth Aravamudan * for small pages, there is no difference between 19813b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 19823b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 19833b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 19843b98b087SNishanth Aravamudan * a useful offset. 19853b98b087SNishanth Aravamudan */ 19863b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 19873b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 19885da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 198998c70baaSLaurent Dufour return offset_il_node(pol, off); 19905da7ca86SChristoph Lameter } else 19915da7ca86SChristoph Lameter return interleave_nodes(pol); 19925da7ca86SChristoph Lameter } 19935da7ca86SChristoph Lameter 199400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1995480eccf9SLee Schermerhorn /* 199604ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 1997b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1998b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1999b46e14acSFabian Frederick * @gfp_flags: for requested zone 2000b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2001b27abaccSDave Hansen * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 2002480eccf9SLee Schermerhorn * 200304ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 200452cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 2005b27abaccSDave Hansen * If the effective policy is 'bind' or 'prefer-many', returns a pointer 2006b27abaccSDave Hansen * to the mempolicy's @nodemask for filtering the zonelist. 2007c0ff7453SMiao Xie * 2008d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2009480eccf9SLee Schermerhorn */ 201004ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 201104ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20125da7ca86SChristoph Lameter { 201304ec6264SVlastimil Babka int nid; 2014b27abaccSDave Hansen int mode; 20155da7ca86SChristoph Lameter 2016dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 2017b27abaccSDave Hansen *nodemask = NULL; 2018b27abaccSDave Hansen mode = (*mpol)->mode; 20195da7ca86SChristoph Lameter 2020b27abaccSDave Hansen if (unlikely(mode == MPOL_INTERLEAVE)) { 202104ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 202204ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 202352cd3b07SLee Schermerhorn } else { 202404ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 2025b27abaccSDave Hansen if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) 2026269fbe72SBen Widawsky *nodemask = &(*mpol)->nodes; 2027480eccf9SLee Schermerhorn } 202804ec6264SVlastimil Babka return nid; 20295da7ca86SChristoph Lameter } 203006808b08SLee Schermerhorn 203106808b08SLee Schermerhorn /* 203206808b08SLee Schermerhorn * init_nodemask_of_mempolicy 203306808b08SLee Schermerhorn * 203406808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 203506808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 203606808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 203706808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 203806808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 203906808b08SLee Schermerhorn * of non-default mempolicy. 204006808b08SLee Schermerhorn * 204106808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 204206808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 204306808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 204406808b08SLee Schermerhorn * 204506808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 204606808b08SLee Schermerhorn */ 204706808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 204806808b08SLee Schermerhorn { 204906808b08SLee Schermerhorn struct mempolicy *mempolicy; 205006808b08SLee Schermerhorn 205106808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 205206808b08SLee Schermerhorn return false; 205306808b08SLee Schermerhorn 2054c0ff7453SMiao Xie task_lock(current); 205506808b08SLee Schermerhorn mempolicy = current->mempolicy; 205606808b08SLee Schermerhorn switch (mempolicy->mode) { 205706808b08SLee Schermerhorn case MPOL_PREFERRED: 2058b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 205906808b08SLee Schermerhorn case MPOL_BIND: 206006808b08SLee Schermerhorn case MPOL_INTERLEAVE: 2061269fbe72SBen Widawsky *mask = mempolicy->nodes; 206206808b08SLee Schermerhorn break; 206306808b08SLee Schermerhorn 20647858d7bcSFeng Tang case MPOL_LOCAL: 2065269fbe72SBen Widawsky init_nodemask_of_node(mask, numa_node_id()); 20667858d7bcSFeng Tang break; 20677858d7bcSFeng Tang 206806808b08SLee Schermerhorn default: 206906808b08SLee Schermerhorn BUG(); 207006808b08SLee Schermerhorn } 2071c0ff7453SMiao Xie task_unlock(current); 207206808b08SLee Schermerhorn 207306808b08SLee Schermerhorn return true; 207406808b08SLee Schermerhorn } 207500ac59adSChen, Kenneth W #endif 20765da7ca86SChristoph Lameter 20776f48d0ebSDavid Rientjes /* 2078b26e517aSFeng Tang * mempolicy_in_oom_domain 20796f48d0ebSDavid Rientjes * 2080b26e517aSFeng Tang * If tsk's mempolicy is "bind", check for intersection between mask and 2081b26e517aSFeng Tang * the policy nodemask. Otherwise, return true for all other policies 2082b26e517aSFeng Tang * including "interleave", as a tsk with "interleave" policy may have 2083b26e517aSFeng Tang * memory allocated from all nodes in system. 20846f48d0ebSDavid Rientjes * 20856f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 20866f48d0ebSDavid Rientjes */ 2087b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk, 20886f48d0ebSDavid Rientjes const nodemask_t *mask) 20896f48d0ebSDavid Rientjes { 20906f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 20916f48d0ebSDavid Rientjes bool ret = true; 20926f48d0ebSDavid Rientjes 20936f48d0ebSDavid Rientjes if (!mask) 20946f48d0ebSDavid Rientjes return ret; 2095b26e517aSFeng Tang 20966f48d0ebSDavid Rientjes task_lock(tsk); 20976f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 2098b26e517aSFeng Tang if (mempolicy && mempolicy->mode == MPOL_BIND) 2099269fbe72SBen Widawsky ret = nodes_intersects(mempolicy->nodes, *mask); 21006f48d0ebSDavid Rientjes task_unlock(tsk); 2101b26e517aSFeng Tang 21026f48d0ebSDavid Rientjes return ret; 21036f48d0ebSDavid Rientjes } 21046f48d0ebSDavid Rientjes 21051da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 21061da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2107662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2108662f3a0bSAndi Kleen unsigned nid) 21091da177e4SLinus Torvalds { 21101da177e4SLinus Torvalds struct page *page; 21111da177e4SLinus Torvalds 211284172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, nid, NULL); 21134518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21144518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21154518085eSKemi Wang return page; 2116de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2117de55c8b2SAndrey Ryabinin preempt_disable(); 2118f19298b9SMel Gorman __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2119de55c8b2SAndrey Ryabinin preempt_enable(); 2120de55c8b2SAndrey Ryabinin } 21211da177e4SLinus Torvalds return page; 21221da177e4SLinus Torvalds } 21231da177e4SLinus Torvalds 21244c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 21254c54d949SFeng Tang int nid, struct mempolicy *pol) 21264c54d949SFeng Tang { 21274c54d949SFeng Tang struct page *page; 21284c54d949SFeng Tang gfp_t preferred_gfp; 21294c54d949SFeng Tang 21304c54d949SFeng Tang /* 21314c54d949SFeng Tang * This is a two pass approach. The first pass will only try the 21324c54d949SFeng Tang * preferred nodes but skip the direct reclaim and allow the 21334c54d949SFeng Tang * allocation to fail, while the second pass will try all the 21344c54d949SFeng Tang * nodes in system. 21354c54d949SFeng Tang */ 21364c54d949SFeng Tang preferred_gfp = gfp | __GFP_NOWARN; 21374c54d949SFeng Tang preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 21384c54d949SFeng Tang page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); 21394c54d949SFeng Tang if (!page) 2140c0455116SAneesh Kumar K.V page = __alloc_pages(gfp, order, nid, NULL); 21414c54d949SFeng Tang 21424c54d949SFeng Tang return page; 21434c54d949SFeng Tang } 21444c54d949SFeng Tang 21451da177e4SLinus Torvalds /** 2146adf88aa8SMatthew Wilcox (Oracle) * vma_alloc_folio - Allocate a folio for a VMA. 2147eb350739SMatthew Wilcox (Oracle) * @gfp: GFP flags. 2148adf88aa8SMatthew Wilcox (Oracle) * @order: Order of the folio. 21491da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 2150eb350739SMatthew Wilcox (Oracle) * @addr: Virtual address of the allocation. Must be inside @vma. 2151eb350739SMatthew Wilcox (Oracle) * @hugepage: For hugepages try only the preferred node if possible. 21521da177e4SLinus Torvalds * 2153adf88aa8SMatthew Wilcox (Oracle) * Allocate a folio for a specific address in @vma, using the appropriate 2154eb350739SMatthew Wilcox (Oracle) * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2155eb350739SMatthew Wilcox (Oracle) * of the mm_struct of the VMA to prevent it from going away. Should be 2156adf88aa8SMatthew Wilcox (Oracle) * used for all allocations for folios that will be mapped into user space. 2157eb350739SMatthew Wilcox (Oracle) * 2158adf88aa8SMatthew Wilcox (Oracle) * Return: The folio on success or NULL if allocation fails. 21591da177e4SLinus Torvalds */ 2160adf88aa8SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, 2161be1a13ebSMichal Hocko unsigned long addr, bool hugepage) 21621da177e4SLinus Torvalds { 2163cc9a6c87SMel Gorman struct mempolicy *pol; 2164be1a13ebSMichal Hocko int node = numa_node_id(); 2165adf88aa8SMatthew Wilcox (Oracle) struct folio *folio; 216604ec6264SVlastimil Babka int preferred_nid; 2167be97a41bSVlastimil Babka nodemask_t *nmask; 21681da177e4SLinus Torvalds 2169dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2170cc9a6c87SMel Gorman 2171be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 2172adf88aa8SMatthew Wilcox (Oracle) struct page *page; 21731da177e4SLinus Torvalds unsigned nid; 21745da7ca86SChristoph Lameter 21758eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 217652cd3b07SLee Schermerhorn mpol_cond_put(pol); 2177adf88aa8SMatthew Wilcox (Oracle) gfp |= __GFP_COMP; 21780bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2179adf88aa8SMatthew Wilcox (Oracle) if (page && order > 1) 2180adf88aa8SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2181adf88aa8SMatthew Wilcox (Oracle) folio = (struct folio *)page; 2182be97a41bSVlastimil Babka goto out; 21831da177e4SLinus Torvalds } 21841da177e4SLinus Torvalds 21854c54d949SFeng Tang if (pol->mode == MPOL_PREFERRED_MANY) { 2186adf88aa8SMatthew Wilcox (Oracle) struct page *page; 2187adf88aa8SMatthew Wilcox (Oracle) 2188c0455116SAneesh Kumar K.V node = policy_node(gfp, pol, node); 2189adf88aa8SMatthew Wilcox (Oracle) gfp |= __GFP_COMP; 21904c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, node, pol); 21914c54d949SFeng Tang mpol_cond_put(pol); 2192adf88aa8SMatthew Wilcox (Oracle) if (page && order > 1) 2193adf88aa8SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2194adf88aa8SMatthew Wilcox (Oracle) folio = (struct folio *)page; 21954c54d949SFeng Tang goto out; 21964c54d949SFeng Tang } 21974c54d949SFeng Tang 219819deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 219919deb769SDavid Rientjes int hpage_node = node; 220019deb769SDavid Rientjes 220119deb769SDavid Rientjes /* 220219deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 220319deb769SDavid Rientjes * allows the current node (or other explicitly preferred 220419deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 220519deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 220619deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 220719deb769SDavid Rientjes * 2208b27abaccSDave Hansen * If the policy is interleave or does not allow the current 220919deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 221019deb769SDavid Rientjes */ 22117858d7bcSFeng Tang if (pol->mode == MPOL_PREFERRED) 2212269fbe72SBen Widawsky hpage_node = first_node(pol->nodes); 221319deb769SDavid Rientjes 221419deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 221519deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 221619deb769SDavid Rientjes mpol_cond_put(pol); 2217cc638f32SVlastimil Babka /* 2218cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2219cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2220cc638f32SVlastimil Babka */ 2221adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc_node(gfp | __GFP_THISNODE | 2222adf88aa8SMatthew Wilcox (Oracle) __GFP_NORETRY, order, hpage_node); 222376e654ccSDavid Rientjes 222476e654ccSDavid Rientjes /* 222576e654ccSDavid Rientjes * If hugepage allocations are configured to always 222676e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 222776e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2228cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 222976e654ccSDavid Rientjes */ 2230adf88aa8SMatthew Wilcox (Oracle) if (!folio && (gfp & __GFP_DIRECT_RECLAIM)) 2231adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, order, hpage_node, 2232adf88aa8SMatthew Wilcox (Oracle) nmask); 223376e654ccSDavid Rientjes 223419deb769SDavid Rientjes goto out; 223519deb769SDavid Rientjes } 223619deb769SDavid Rientjes } 223719deb769SDavid Rientjes 2238077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 223904ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 2240adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, order, preferred_nid, nmask); 2241d51e9894SVlastimil Babka mpol_cond_put(pol); 2242be97a41bSVlastimil Babka out: 2243f584b680SMatthew Wilcox (Oracle) return folio; 2244f584b680SMatthew Wilcox (Oracle) } 2245adf88aa8SMatthew Wilcox (Oracle) EXPORT_SYMBOL(vma_alloc_folio); 2246f584b680SMatthew Wilcox (Oracle) 22471da177e4SLinus Torvalds /** 2248d7f946d0SMatthew Wilcox (Oracle) * alloc_pages - Allocate pages. 22496421ec76SMatthew Wilcox (Oracle) * @gfp: GFP flags. 22506421ec76SMatthew Wilcox (Oracle) * @order: Power of two of number of pages to allocate. 22511da177e4SLinus Torvalds * 22526421ec76SMatthew Wilcox (Oracle) * Allocate 1 << @order contiguous pages. The physical address of the 22536421ec76SMatthew Wilcox (Oracle) * first page is naturally aligned (eg an order-3 allocation will be aligned 22546421ec76SMatthew Wilcox (Oracle) * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 22556421ec76SMatthew Wilcox (Oracle) * process is honoured when in process context. 22561da177e4SLinus Torvalds * 22576421ec76SMatthew Wilcox (Oracle) * Context: Can be called from any context, providing the appropriate GFP 22586421ec76SMatthew Wilcox (Oracle) * flags are used. 22596421ec76SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 22601da177e4SLinus Torvalds */ 2261d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order) 22621da177e4SLinus Torvalds { 22638d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2264c0ff7453SMiao Xie struct page *page; 22651da177e4SLinus Torvalds 22668d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22678d90274bSOleg Nesterov pol = get_task_policy(current); 226852cd3b07SLee Schermerhorn 226952cd3b07SLee Schermerhorn /* 227052cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 227152cd3b07SLee Schermerhorn * nor system default_policy 227252cd3b07SLee Schermerhorn */ 227345c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2274c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 22754c54d949SFeng Tang else if (pol->mode == MPOL_PREFERRED_MANY) 22764c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, 2277c0455116SAneesh Kumar K.V policy_node(gfp, pol, numa_node_id()), pol); 2278c0ff7453SMiao Xie else 227984172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, 228004ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 22815c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2282cc9a6c87SMel Gorman 2283c0ff7453SMiao Xie return page; 22841da177e4SLinus Torvalds } 2285d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages); 22861da177e4SLinus Torvalds 2287cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order) 2288cc09cb13SMatthew Wilcox (Oracle) { 2289cc09cb13SMatthew Wilcox (Oracle) struct page *page = alloc_pages(gfp | __GFP_COMP, order); 2290cc09cb13SMatthew Wilcox (Oracle) 2291cc09cb13SMatthew Wilcox (Oracle) if (page && order > 1) 2292cc09cb13SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2293cc09cb13SMatthew Wilcox (Oracle) return (struct folio *)page; 2294cc09cb13SMatthew Wilcox (Oracle) } 2295cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc); 2296cc09cb13SMatthew Wilcox (Oracle) 2297c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, 2298c00b6b96SChen Wandun struct mempolicy *pol, unsigned long nr_pages, 2299c00b6b96SChen Wandun struct page **page_array) 2300c00b6b96SChen Wandun { 2301c00b6b96SChen Wandun int nodes; 2302c00b6b96SChen Wandun unsigned long nr_pages_per_node; 2303c00b6b96SChen Wandun int delta; 2304c00b6b96SChen Wandun int i; 2305c00b6b96SChen Wandun unsigned long nr_allocated; 2306c00b6b96SChen Wandun unsigned long total_allocated = 0; 2307c00b6b96SChen Wandun 2308c00b6b96SChen Wandun nodes = nodes_weight(pol->nodes); 2309c00b6b96SChen Wandun nr_pages_per_node = nr_pages / nodes; 2310c00b6b96SChen Wandun delta = nr_pages - nodes * nr_pages_per_node; 2311c00b6b96SChen Wandun 2312c00b6b96SChen Wandun for (i = 0; i < nodes; i++) { 2313c00b6b96SChen Wandun if (delta) { 2314c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(gfp, 2315c00b6b96SChen Wandun interleave_nodes(pol), NULL, 2316c00b6b96SChen Wandun nr_pages_per_node + 1, NULL, 2317c00b6b96SChen Wandun page_array); 2318c00b6b96SChen Wandun delta--; 2319c00b6b96SChen Wandun } else { 2320c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(gfp, 2321c00b6b96SChen Wandun interleave_nodes(pol), NULL, 2322c00b6b96SChen Wandun nr_pages_per_node, NULL, page_array); 2323c00b6b96SChen Wandun } 2324c00b6b96SChen Wandun 2325c00b6b96SChen Wandun page_array += nr_allocated; 2326c00b6b96SChen Wandun total_allocated += nr_allocated; 2327c00b6b96SChen Wandun } 2328c00b6b96SChen Wandun 2329c00b6b96SChen Wandun return total_allocated; 2330c00b6b96SChen Wandun } 2331c00b6b96SChen Wandun 2332c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, 2333c00b6b96SChen Wandun struct mempolicy *pol, unsigned long nr_pages, 2334c00b6b96SChen Wandun struct page **page_array) 2335c00b6b96SChen Wandun { 2336c00b6b96SChen Wandun gfp_t preferred_gfp; 2337c00b6b96SChen Wandun unsigned long nr_allocated = 0; 2338c00b6b96SChen Wandun 2339c00b6b96SChen Wandun preferred_gfp = gfp | __GFP_NOWARN; 2340c00b6b96SChen Wandun preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2341c00b6b96SChen Wandun 2342c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, 2343c00b6b96SChen Wandun nr_pages, NULL, page_array); 2344c00b6b96SChen Wandun 2345c00b6b96SChen Wandun if (nr_allocated < nr_pages) 2346c00b6b96SChen Wandun nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, 2347c00b6b96SChen Wandun nr_pages - nr_allocated, NULL, 2348c00b6b96SChen Wandun page_array + nr_allocated); 2349c00b6b96SChen Wandun return nr_allocated; 2350c00b6b96SChen Wandun } 2351c00b6b96SChen Wandun 2352c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the 2353c00b6b96SChen Wandun * same time in some situation such as vmalloc. 2354c00b6b96SChen Wandun * 2355c00b6b96SChen Wandun * It can accelerate memory allocation especially interleaving 2356c00b6b96SChen Wandun * allocate memory. 2357c00b6b96SChen Wandun */ 2358c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, 2359c00b6b96SChen Wandun unsigned long nr_pages, struct page **page_array) 2360c00b6b96SChen Wandun { 2361c00b6b96SChen Wandun struct mempolicy *pol = &default_policy; 2362c00b6b96SChen Wandun 2363c00b6b96SChen Wandun if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2364c00b6b96SChen Wandun pol = get_task_policy(current); 2365c00b6b96SChen Wandun 2366c00b6b96SChen Wandun if (pol->mode == MPOL_INTERLEAVE) 2367c00b6b96SChen Wandun return alloc_pages_bulk_array_interleave(gfp, pol, 2368c00b6b96SChen Wandun nr_pages, page_array); 2369c00b6b96SChen Wandun 2370c00b6b96SChen Wandun if (pol->mode == MPOL_PREFERRED_MANY) 2371c00b6b96SChen Wandun return alloc_pages_bulk_array_preferred_many(gfp, 2372c00b6b96SChen Wandun numa_node_id(), pol, nr_pages, page_array); 2373c00b6b96SChen Wandun 2374c00b6b96SChen Wandun return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()), 2375c00b6b96SChen Wandun policy_nodemask(gfp, pol), nr_pages, NULL, 2376c00b6b96SChen Wandun page_array); 2377c00b6b96SChen Wandun } 2378c00b6b96SChen Wandun 2379ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2380ef0855d3SOleg Nesterov { 2381ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2382ef0855d3SOleg Nesterov 2383ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2384ef0855d3SOleg Nesterov return PTR_ERR(pol); 2385ef0855d3SOleg Nesterov dst->vm_policy = pol; 2386ef0855d3SOleg Nesterov return 0; 2387ef0855d3SOleg Nesterov } 2388ef0855d3SOleg Nesterov 23894225399aSPaul Jackson /* 2390846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 23914225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 23924225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 23934225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 23944225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2395708c1bbcSMiao Xie * 2396708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2397708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 23984225399aSPaul Jackson */ 23994225399aSPaul Jackson 2400846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2401846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 24021da177e4SLinus Torvalds { 24031da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 24041da177e4SLinus Torvalds 24051da177e4SLinus Torvalds if (!new) 24061da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2407708c1bbcSMiao Xie 2408708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2409708c1bbcSMiao Xie if (old == current->mempolicy) { 2410708c1bbcSMiao Xie task_lock(current); 2411708c1bbcSMiao Xie *new = *old; 2412708c1bbcSMiao Xie task_unlock(current); 2413708c1bbcSMiao Xie } else 2414708c1bbcSMiao Xie *new = *old; 2415708c1bbcSMiao Xie 24164225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 24174225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2418213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 24194225399aSPaul Jackson } 24201da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 24211da177e4SLinus Torvalds return new; 24221da177e4SLinus Torvalds } 24231da177e4SLinus Torvalds 24241da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2425fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 24261da177e4SLinus Torvalds { 24271da177e4SLinus Torvalds if (!a || !b) 2428fcfb4dccSKOSAKI Motohiro return false; 242945c4745aSLee Schermerhorn if (a->mode != b->mode) 2430fcfb4dccSKOSAKI Motohiro return false; 243119800502SBob Liu if (a->flags != b->flags) 2432fcfb4dccSKOSAKI Motohiro return false; 2433c6018b4bSAneesh Kumar K.V if (a->home_node != b->home_node) 2434c6018b4bSAneesh Kumar K.V return false; 243519800502SBob Liu if (mpol_store_user_nodemask(a)) 243619800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2437fcfb4dccSKOSAKI Motohiro return false; 243819800502SBob Liu 243945c4745aSLee Schermerhorn switch (a->mode) { 244019770b32SMel Gorman case MPOL_BIND: 24411da177e4SLinus Torvalds case MPOL_INTERLEAVE: 24421da177e4SLinus Torvalds case MPOL_PREFERRED: 2443b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2444269fbe72SBen Widawsky return !!nodes_equal(a->nodes, b->nodes); 24457858d7bcSFeng Tang case MPOL_LOCAL: 24467858d7bcSFeng Tang return true; 24471da177e4SLinus Torvalds default: 24481da177e4SLinus Torvalds BUG(); 2449fcfb4dccSKOSAKI Motohiro return false; 24501da177e4SLinus Torvalds } 24511da177e4SLinus Torvalds } 24521da177e4SLinus Torvalds 24531da177e4SLinus Torvalds /* 24541da177e4SLinus Torvalds * Shared memory backing store policy support. 24551da177e4SLinus Torvalds * 24561da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 24571da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 24584a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 24591da177e4SLinus Torvalds * for any accesses to the tree. 24601da177e4SLinus Torvalds */ 24611da177e4SLinus Torvalds 24624a8c7bb5SNathan Zimmer /* 24634a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 24644a8c7bb5SNathan Zimmer * reading or for writing 24654a8c7bb5SNathan Zimmer */ 24661da177e4SLinus Torvalds static struct sp_node * 24671da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 24681da177e4SLinus Torvalds { 24691da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 24701da177e4SLinus Torvalds 24711da177e4SLinus Torvalds while (n) { 24721da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 24731da177e4SLinus Torvalds 24741da177e4SLinus Torvalds if (start >= p->end) 24751da177e4SLinus Torvalds n = n->rb_right; 24761da177e4SLinus Torvalds else if (end <= p->start) 24771da177e4SLinus Torvalds n = n->rb_left; 24781da177e4SLinus Torvalds else 24791da177e4SLinus Torvalds break; 24801da177e4SLinus Torvalds } 24811da177e4SLinus Torvalds if (!n) 24821da177e4SLinus Torvalds return NULL; 24831da177e4SLinus Torvalds for (;;) { 24841da177e4SLinus Torvalds struct sp_node *w = NULL; 24851da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 24861da177e4SLinus Torvalds if (!prev) 24871da177e4SLinus Torvalds break; 24881da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 24891da177e4SLinus Torvalds if (w->end <= start) 24901da177e4SLinus Torvalds break; 24911da177e4SLinus Torvalds n = prev; 24921da177e4SLinus Torvalds } 24931da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 24941da177e4SLinus Torvalds } 24951da177e4SLinus Torvalds 24964a8c7bb5SNathan Zimmer /* 24974a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 24984a8c7bb5SNathan Zimmer * writing. 24994a8c7bb5SNathan Zimmer */ 25001da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 25011da177e4SLinus Torvalds { 25021da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 25031da177e4SLinus Torvalds struct rb_node *parent = NULL; 25041da177e4SLinus Torvalds struct sp_node *nd; 25051da177e4SLinus Torvalds 25061da177e4SLinus Torvalds while (*p) { 25071da177e4SLinus Torvalds parent = *p; 25081da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 25091da177e4SLinus Torvalds if (new->start < nd->start) 25101da177e4SLinus Torvalds p = &(*p)->rb_left; 25111da177e4SLinus Torvalds else if (new->end > nd->end) 25121da177e4SLinus Torvalds p = &(*p)->rb_right; 25131da177e4SLinus Torvalds else 25141da177e4SLinus Torvalds BUG(); 25151da177e4SLinus Torvalds } 25161da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 25171da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2518140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 251945c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 25201da177e4SLinus Torvalds } 25211da177e4SLinus Torvalds 25221da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 25231da177e4SLinus Torvalds struct mempolicy * 25241da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 25251da177e4SLinus Torvalds { 25261da177e4SLinus Torvalds struct mempolicy *pol = NULL; 25271da177e4SLinus Torvalds struct sp_node *sn; 25281da177e4SLinus Torvalds 25291da177e4SLinus Torvalds if (!sp->root.rb_node) 25301da177e4SLinus Torvalds return NULL; 25314a8c7bb5SNathan Zimmer read_lock(&sp->lock); 25321da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 25331da177e4SLinus Torvalds if (sn) { 25341da177e4SLinus Torvalds mpol_get(sn->policy); 25351da177e4SLinus Torvalds pol = sn->policy; 25361da177e4SLinus Torvalds } 25374a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 25381da177e4SLinus Torvalds return pol; 25391da177e4SLinus Torvalds } 25401da177e4SLinus Torvalds 254163f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 254263f74ca2SKOSAKI Motohiro { 254363f74ca2SKOSAKI Motohiro mpol_put(n->policy); 254463f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 254563f74ca2SKOSAKI Motohiro } 254663f74ca2SKOSAKI Motohiro 2547771fb4d8SLee Schermerhorn /** 2548771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2549771fb4d8SLee Schermerhorn * 2550b46e14acSFabian Frederick * @page: page to be checked 2551b46e14acSFabian Frederick * @vma: vm area where page mapped 2552b46e14acSFabian Frederick * @addr: virtual address where page mapped 2553771fb4d8SLee Schermerhorn * 2554771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 25555f076944SMatthew Wilcox (Oracle) * node id. Policy determination "mimics" alloc_page_vma(). 2556771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 25575f076944SMatthew Wilcox (Oracle) * 2558062db293SBaolin Wang * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2559062db293SBaolin Wang * policy, or a suitable node ID to allocate a replacement page from. 2560771fb4d8SLee Schermerhorn */ 2561771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2562771fb4d8SLee Schermerhorn { 2563771fb4d8SLee Schermerhorn struct mempolicy *pol; 2564c33d6c06SMel Gorman struct zoneref *z; 2565771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2566771fb4d8SLee Schermerhorn unsigned long pgoff; 256790572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 256890572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 256998fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2570062db293SBaolin Wang int ret = NUMA_NO_NODE; 2571771fb4d8SLee Schermerhorn 2572dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2573771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2574771fb4d8SLee Schermerhorn goto out; 2575771fb4d8SLee Schermerhorn 2576771fb4d8SLee Schermerhorn switch (pol->mode) { 2577771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2578771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2579771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 258098c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2581771fb4d8SLee Schermerhorn break; 2582771fb4d8SLee Schermerhorn 2583771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2584b27abaccSDave Hansen if (node_isset(curnid, pol->nodes)) 2585b27abaccSDave Hansen goto out; 2586269fbe72SBen Widawsky polnid = first_node(pol->nodes); 2587771fb4d8SLee Schermerhorn break; 2588771fb4d8SLee Schermerhorn 25897858d7bcSFeng Tang case MPOL_LOCAL: 25907858d7bcSFeng Tang polnid = numa_node_id(); 25917858d7bcSFeng Tang break; 25927858d7bcSFeng Tang 2593771fb4d8SLee Schermerhorn case MPOL_BIND: 2594bda420b9SHuang Ying /* Optimize placement among multiple nodes via NUMA balancing */ 2595bda420b9SHuang Ying if (pol->flags & MPOL_F_MORON) { 2596269fbe72SBen Widawsky if (node_isset(thisnid, pol->nodes)) 2597bda420b9SHuang Ying break; 2598bda420b9SHuang Ying goto out; 2599bda420b9SHuang Ying } 2600b27abaccSDave Hansen fallthrough; 2601c33d6c06SMel Gorman 2602b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2603771fb4d8SLee Schermerhorn /* 2604771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2605771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2606771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2607771fb4d8SLee Schermerhorn */ 2608269fbe72SBen Widawsky if (node_isset(curnid, pol->nodes)) 2609771fb4d8SLee Schermerhorn goto out; 2610c33d6c06SMel Gorman z = first_zones_zonelist( 2611771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2612771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2613269fbe72SBen Widawsky &pol->nodes); 2614c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2615771fb4d8SLee Schermerhorn break; 2616771fb4d8SLee Schermerhorn 2617771fb4d8SLee Schermerhorn default: 2618771fb4d8SLee Schermerhorn BUG(); 2619771fb4d8SLee Schermerhorn } 26205606e387SMel Gorman 26215606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2622e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 262390572890SPeter Zijlstra polnid = thisnid; 26245606e387SMel Gorman 262510f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2626de1c9ce6SRik van Riel goto out; 2627de1c9ce6SRik van Riel } 2628e42c8ff2SMel Gorman 2629771fb4d8SLee Schermerhorn if (curnid != polnid) 2630771fb4d8SLee Schermerhorn ret = polnid; 2631771fb4d8SLee Schermerhorn out: 2632771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2633771fb4d8SLee Schermerhorn 2634771fb4d8SLee Schermerhorn return ret; 2635771fb4d8SLee Schermerhorn } 2636771fb4d8SLee Schermerhorn 2637c11600e4SDavid Rientjes /* 2638c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2639c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2640c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2641c11600e4SDavid Rientjes * policy. 2642c11600e4SDavid Rientjes */ 2643c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2644c11600e4SDavid Rientjes { 2645c11600e4SDavid Rientjes struct mempolicy *pol; 2646c11600e4SDavid Rientjes 2647c11600e4SDavid Rientjes task_lock(task); 2648c11600e4SDavid Rientjes pol = task->mempolicy; 2649c11600e4SDavid Rientjes task->mempolicy = NULL; 2650c11600e4SDavid Rientjes task_unlock(task); 2651c11600e4SDavid Rientjes mpol_put(pol); 2652c11600e4SDavid Rientjes } 2653c11600e4SDavid Rientjes 26541da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 26551da177e4SLinus Torvalds { 2656140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 26571da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 265863f74ca2SKOSAKI Motohiro sp_free(n); 26591da177e4SLinus Torvalds } 26601da177e4SLinus Torvalds 266142288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 266242288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 266342288fe3SMel Gorman { 266442288fe3SMel Gorman node->start = start; 266542288fe3SMel Gorman node->end = end; 266642288fe3SMel Gorman node->policy = pol; 266742288fe3SMel Gorman } 266842288fe3SMel Gorman 2669dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2670dbcb0f19SAdrian Bunk struct mempolicy *pol) 26711da177e4SLinus Torvalds { 2672869833f2SKOSAKI Motohiro struct sp_node *n; 2673869833f2SKOSAKI Motohiro struct mempolicy *newpol; 26741da177e4SLinus Torvalds 2675869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 26761da177e4SLinus Torvalds if (!n) 26771da177e4SLinus Torvalds return NULL; 2678869833f2SKOSAKI Motohiro 2679869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2680869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2681869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2682869833f2SKOSAKI Motohiro return NULL; 2683869833f2SKOSAKI Motohiro } 2684869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 268542288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2686869833f2SKOSAKI Motohiro 26871da177e4SLinus Torvalds return n; 26881da177e4SLinus Torvalds } 26891da177e4SLinus Torvalds 26901da177e4SLinus Torvalds /* Replace a policy range. */ 26911da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 26921da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 26931da177e4SLinus Torvalds { 2694b22d127aSMel Gorman struct sp_node *n; 269542288fe3SMel Gorman struct sp_node *n_new = NULL; 269642288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2697b22d127aSMel Gorman int ret = 0; 26981da177e4SLinus Torvalds 269942288fe3SMel Gorman restart: 27004a8c7bb5SNathan Zimmer write_lock(&sp->lock); 27011da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 27021da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 27031da177e4SLinus Torvalds while (n && n->start < end) { 27041da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 27051da177e4SLinus Torvalds if (n->start >= start) { 27061da177e4SLinus Torvalds if (n->end <= end) 27071da177e4SLinus Torvalds sp_delete(sp, n); 27081da177e4SLinus Torvalds else 27091da177e4SLinus Torvalds n->start = end; 27101da177e4SLinus Torvalds } else { 27111da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 27121da177e4SLinus Torvalds if (n->end > end) { 271342288fe3SMel Gorman if (!n_new) 271442288fe3SMel Gorman goto alloc_new; 271542288fe3SMel Gorman 271642288fe3SMel Gorman *mpol_new = *n->policy; 271742288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 27187880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 27191da177e4SLinus Torvalds n->end = start; 27205ca39575SHillf Danton sp_insert(sp, n_new); 272142288fe3SMel Gorman n_new = NULL; 272242288fe3SMel Gorman mpol_new = NULL; 27231da177e4SLinus Torvalds break; 27241da177e4SLinus Torvalds } else 27251da177e4SLinus Torvalds n->end = start; 27261da177e4SLinus Torvalds } 27271da177e4SLinus Torvalds if (!next) 27281da177e4SLinus Torvalds break; 27291da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27301da177e4SLinus Torvalds } 27311da177e4SLinus Torvalds if (new) 27321da177e4SLinus Torvalds sp_insert(sp, new); 27334a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 273442288fe3SMel Gorman ret = 0; 273542288fe3SMel Gorman 273642288fe3SMel Gorman err_out: 273742288fe3SMel Gorman if (mpol_new) 273842288fe3SMel Gorman mpol_put(mpol_new); 273942288fe3SMel Gorman if (n_new) 274042288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 274142288fe3SMel Gorman 2742b22d127aSMel Gorman return ret; 274342288fe3SMel Gorman 274442288fe3SMel Gorman alloc_new: 27454a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 274642288fe3SMel Gorman ret = -ENOMEM; 274742288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 274842288fe3SMel Gorman if (!n_new) 274942288fe3SMel Gorman goto err_out; 275042288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 275142288fe3SMel Gorman if (!mpol_new) 275242288fe3SMel Gorman goto err_out; 27534ad09955SMiaohe Lin atomic_set(&mpol_new->refcnt, 1); 275442288fe3SMel Gorman goto restart; 27551da177e4SLinus Torvalds } 27561da177e4SLinus Torvalds 275771fe804bSLee Schermerhorn /** 275871fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 275971fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 276071fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 276171fe804bSLee Schermerhorn * 276271fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 276371fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 276471fe804bSLee Schermerhorn * This must be released on exit. 27654bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 276671fe804bSLee Schermerhorn */ 276771fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 27687339ff83SRobin Holt { 276958568d2aSMiao Xie int ret; 277058568d2aSMiao Xie 277171fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 27724a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 27737339ff83SRobin Holt 277471fe804bSLee Schermerhorn if (mpol) { 27757339ff83SRobin Holt struct vm_area_struct pvma; 277671fe804bSLee Schermerhorn struct mempolicy *new; 27774bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 27787339ff83SRobin Holt 27794bfc4495SKAMEZAWA Hiroyuki if (!scratch) 27805c0c1654SLee Schermerhorn goto put_mpol; 278171fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 278271fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 278315d77835SLee Schermerhorn if (IS_ERR(new)) 27840cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 278558568d2aSMiao Xie 278658568d2aSMiao Xie task_lock(current); 27874bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 278858568d2aSMiao Xie task_unlock(current); 278915d77835SLee Schermerhorn if (ret) 27905c0c1654SLee Schermerhorn goto put_new; 279171fe804bSLee Schermerhorn 279271fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 27932c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 279471fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 279571fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 279615d77835SLee Schermerhorn 27975c0c1654SLee Schermerhorn put_new: 279871fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 27990cae3457SDan Carpenter free_scratch: 28004bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 28015c0c1654SLee Schermerhorn put_mpol: 28025c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 28037339ff83SRobin Holt } 28047339ff83SRobin Holt } 28057339ff83SRobin Holt 28061da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 28071da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 28081da177e4SLinus Torvalds { 28091da177e4SLinus Torvalds int err; 28101da177e4SLinus Torvalds struct sp_node *new = NULL; 28111da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 28121da177e4SLinus Torvalds 2813028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 28141da177e4SLinus Torvalds vma->vm_pgoff, 281545c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2816028fec41SDavid Rientjes npol ? npol->flags : -1, 2817269fbe72SBen Widawsky npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 28181da177e4SLinus Torvalds 28191da177e4SLinus Torvalds if (npol) { 28201da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 28211da177e4SLinus Torvalds if (!new) 28221da177e4SLinus Torvalds return -ENOMEM; 28231da177e4SLinus Torvalds } 28241da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 28251da177e4SLinus Torvalds if (err && new) 282663f74ca2SKOSAKI Motohiro sp_free(new); 28271da177e4SLinus Torvalds return err; 28281da177e4SLinus Torvalds } 28291da177e4SLinus Torvalds 28301da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 28311da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 28321da177e4SLinus Torvalds { 28331da177e4SLinus Torvalds struct sp_node *n; 28341da177e4SLinus Torvalds struct rb_node *next; 28351da177e4SLinus Torvalds 28361da177e4SLinus Torvalds if (!p->root.rb_node) 28371da177e4SLinus Torvalds return; 28384a8c7bb5SNathan Zimmer write_lock(&p->lock); 28391da177e4SLinus Torvalds next = rb_first(&p->root); 28401da177e4SLinus Torvalds while (next) { 28411da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 28421da177e4SLinus Torvalds next = rb_next(&n->nd); 284363f74ca2SKOSAKI Motohiro sp_delete(p, n); 28441da177e4SLinus Torvalds } 28454a8c7bb5SNathan Zimmer write_unlock(&p->lock); 28461da177e4SLinus Torvalds } 28471da177e4SLinus Torvalds 28481a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2849c297663cSMel Gorman static int __initdata numabalancing_override; 28501a687c2eSMel Gorman 28511a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 28521a687c2eSMel Gorman { 28531a687c2eSMel Gorman bool numabalancing_default = false; 28541a687c2eSMel Gorman 28551a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 28561a687c2eSMel Gorman numabalancing_default = true; 28571a687c2eSMel Gorman 2858c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2859c297663cSMel Gorman if (numabalancing_override) 2860c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2861c297663cSMel Gorman 2862b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2863756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2864c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 28651a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 28661a687c2eSMel Gorman } 28671a687c2eSMel Gorman } 28681a687c2eSMel Gorman 28691a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 28701a687c2eSMel Gorman { 28711a687c2eSMel Gorman int ret = 0; 28721a687c2eSMel Gorman if (!str) 28731a687c2eSMel Gorman goto out; 28741a687c2eSMel Gorman 28751a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2876c297663cSMel Gorman numabalancing_override = 1; 28771a687c2eSMel Gorman ret = 1; 28781a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2879c297663cSMel Gorman numabalancing_override = -1; 28801a687c2eSMel Gorman ret = 1; 28811a687c2eSMel Gorman } 28821a687c2eSMel Gorman out: 28831a687c2eSMel Gorman if (!ret) 28844a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 28851a687c2eSMel Gorman 28861a687c2eSMel Gorman return ret; 28871a687c2eSMel Gorman } 28881a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 28891a687c2eSMel Gorman #else 28901a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 28911a687c2eSMel Gorman { 28921a687c2eSMel Gorman } 28931a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 28941a687c2eSMel Gorman 28951da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 28961da177e4SLinus Torvalds void __init numa_policy_init(void) 28971da177e4SLinus Torvalds { 2898b71636e2SPaul Mundt nodemask_t interleave_nodes; 2899b71636e2SPaul Mundt unsigned long largest = 0; 2900b71636e2SPaul Mundt int nid, prefer = 0; 2901b71636e2SPaul Mundt 29021da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 29031da177e4SLinus Torvalds sizeof(struct mempolicy), 290420c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 29051da177e4SLinus Torvalds 29061da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 29071da177e4SLinus Torvalds sizeof(struct sp_node), 290820c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 29091da177e4SLinus Torvalds 29105606e387SMel Gorman for_each_node(nid) { 29115606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 29125606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 29135606e387SMel Gorman .mode = MPOL_PREFERRED, 29145606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 2915269fbe72SBen Widawsky .nodes = nodemask_of_node(nid), 29165606e387SMel Gorman }; 29175606e387SMel Gorman } 29185606e387SMel Gorman 2919b71636e2SPaul Mundt /* 2920b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2921b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2922b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2923b71636e2SPaul Mundt */ 2924b71636e2SPaul Mundt nodes_clear(interleave_nodes); 292501f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2926b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 29271da177e4SLinus Torvalds 2928b71636e2SPaul Mundt /* Preserve the largest node */ 2929b71636e2SPaul Mundt if (largest < total_pages) { 2930b71636e2SPaul Mundt largest = total_pages; 2931b71636e2SPaul Mundt prefer = nid; 2932b71636e2SPaul Mundt } 2933b71636e2SPaul Mundt 2934b71636e2SPaul Mundt /* Interleave this node? */ 2935b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2936b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2937b71636e2SPaul Mundt } 2938b71636e2SPaul Mundt 2939b71636e2SPaul Mundt /* All too small, use the largest */ 2940b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2941b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2942b71636e2SPaul Mundt 2943028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2944b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 29451a687c2eSMel Gorman 29461a687c2eSMel Gorman check_numabalancing_enable(); 29471da177e4SLinus Torvalds } 29481da177e4SLinus Torvalds 29498bccd85fSChristoph Lameter /* Reset policy of current process to default */ 29501da177e4SLinus Torvalds void numa_default_policy(void) 29511da177e4SLinus Torvalds { 2952028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 29531da177e4SLinus Torvalds } 295468860ec1SPaul Jackson 29554225399aSPaul Jackson /* 2956095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2957095f1fc4SLee Schermerhorn */ 2958095f1fc4SLee Schermerhorn 2959345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2960345ace9cSLee Schermerhorn { 2961345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2962345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2963345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2964345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2965d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2966b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = "prefer (many)", 2967345ace9cSLee Schermerhorn }; 29681a75a6c8SChristoph Lameter 2969095f1fc4SLee Schermerhorn 2970095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2971095f1fc4SLee Schermerhorn /** 2972f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2973095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 297471fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2975095f1fc4SLee Schermerhorn * 2976095f1fc4SLee Schermerhorn * Format of input: 2977095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2978095f1fc4SLee Schermerhorn * 2979dad5b023SRandy Dunlap * Return: %0 on success, else %1 2980095f1fc4SLee Schermerhorn */ 2981a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2982095f1fc4SLee Schermerhorn { 298371fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2984f2a07f40SHugh Dickins unsigned short mode_flags; 298571fe804bSLee Schermerhorn nodemask_t nodes; 2986095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2987095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2988dedf2c73Szhong jiang int err = 1, mode; 2989095f1fc4SLee Schermerhorn 2990c7a91bc7SDan Carpenter if (flags) 2991c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 2992c7a91bc7SDan Carpenter 2993095f1fc4SLee Schermerhorn if (nodelist) { 2994095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2995095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 299671fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2997095f1fc4SLee Schermerhorn goto out; 299801f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2999095f1fc4SLee Schermerhorn goto out; 300071fe804bSLee Schermerhorn } else 300171fe804bSLee Schermerhorn nodes_clear(nodes); 300271fe804bSLee Schermerhorn 3003dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 3004dedf2c73Szhong jiang if (mode < 0) 3005095f1fc4SLee Schermerhorn goto out; 3006095f1fc4SLee Schermerhorn 300771fe804bSLee Schermerhorn switch (mode) { 3008095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 300971fe804bSLee Schermerhorn /* 3010aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 3011aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 3012aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 301371fe804bSLee Schermerhorn */ 3014095f1fc4SLee Schermerhorn if (nodelist) { 3015095f1fc4SLee Schermerhorn char *rest = nodelist; 3016095f1fc4SLee Schermerhorn while (isdigit(*rest)) 3017095f1fc4SLee Schermerhorn rest++; 3018926f2ae0SKOSAKI Motohiro if (*rest) 3019926f2ae0SKOSAKI Motohiro goto out; 3020aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 3021aa9f7d51SRandy Dunlap goto out; 3022095f1fc4SLee Schermerhorn } 3023095f1fc4SLee Schermerhorn break; 3024095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 3025095f1fc4SLee Schermerhorn /* 3026095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 3027095f1fc4SLee Schermerhorn */ 3028095f1fc4SLee Schermerhorn if (!nodelist) 302901f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 30303f226aa1SLee Schermerhorn break; 303171fe804bSLee Schermerhorn case MPOL_LOCAL: 30323f226aa1SLee Schermerhorn /* 303371fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 30343f226aa1SLee Schermerhorn */ 303571fe804bSLee Schermerhorn if (nodelist) 30363f226aa1SLee Schermerhorn goto out; 30373f226aa1SLee Schermerhorn break; 3038413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 3039413b43deSRavikiran G Thirumalai /* 3040413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 3041413b43deSRavikiran G Thirumalai */ 3042413b43deSRavikiran G Thirumalai if (!nodelist) 3043413b43deSRavikiran G Thirumalai err = 0; 3044413b43deSRavikiran G Thirumalai goto out; 3045b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 3046d69b2e63SKOSAKI Motohiro case MPOL_BIND: 304771fe804bSLee Schermerhorn /* 3048d69b2e63SKOSAKI Motohiro * Insist on a nodelist 304971fe804bSLee Schermerhorn */ 3050d69b2e63SKOSAKI Motohiro if (!nodelist) 3051d69b2e63SKOSAKI Motohiro goto out; 3052095f1fc4SLee Schermerhorn } 3053095f1fc4SLee Schermerhorn 305471fe804bSLee Schermerhorn mode_flags = 0; 3055095f1fc4SLee Schermerhorn if (flags) { 3056095f1fc4SLee Schermerhorn /* 3057095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 3058095f1fc4SLee Schermerhorn * mode flags. 3059095f1fc4SLee Schermerhorn */ 3060095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 306171fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 3062095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 306371fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 3064095f1fc4SLee Schermerhorn else 3065926f2ae0SKOSAKI Motohiro goto out; 3066095f1fc4SLee Schermerhorn } 306771fe804bSLee Schermerhorn 306871fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 306971fe804bSLee Schermerhorn if (IS_ERR(new)) 3070926f2ae0SKOSAKI Motohiro goto out; 3071926f2ae0SKOSAKI Motohiro 3072f2a07f40SHugh Dickins /* 3073f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 3074f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 3075f2a07f40SHugh Dickins */ 3076269fbe72SBen Widawsky if (mode != MPOL_PREFERRED) { 3077269fbe72SBen Widawsky new->nodes = nodes; 3078269fbe72SBen Widawsky } else if (nodelist) { 3079269fbe72SBen Widawsky nodes_clear(new->nodes); 3080269fbe72SBen Widawsky node_set(first_node(nodes), new->nodes); 3081269fbe72SBen Widawsky } else { 30827858d7bcSFeng Tang new->mode = MPOL_LOCAL; 3083269fbe72SBen Widawsky } 3084f2a07f40SHugh Dickins 3085f2a07f40SHugh Dickins /* 3086f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 3087f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 3088f2a07f40SHugh Dickins */ 3089e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 3090f2a07f40SHugh Dickins 3091926f2ae0SKOSAKI Motohiro err = 0; 309271fe804bSLee Schermerhorn 3093095f1fc4SLee Schermerhorn out: 3094095f1fc4SLee Schermerhorn /* Restore string for error message */ 3095095f1fc4SLee Schermerhorn if (nodelist) 3096095f1fc4SLee Schermerhorn *--nodelist = ':'; 3097095f1fc4SLee Schermerhorn if (flags) 3098095f1fc4SLee Schermerhorn *--flags = '='; 309971fe804bSLee Schermerhorn if (!err) 310071fe804bSLee Schermerhorn *mpol = new; 3101095f1fc4SLee Schermerhorn return err; 3102095f1fc4SLee Schermerhorn } 3103095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 3104095f1fc4SLee Schermerhorn 310571fe804bSLee Schermerhorn /** 310671fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 310771fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 310871fe804bSLee Schermerhorn * @maxlen: length of @buffer 310971fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 311071fe804bSLee Schermerhorn * 3111948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 3112948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3113948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 31141a75a6c8SChristoph Lameter */ 3115948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 31161a75a6c8SChristoph Lameter { 31171a75a6c8SChristoph Lameter char *p = buffer; 3118948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 3119948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 3120948927eeSDavid Rientjes unsigned short flags = 0; 31211a75a6c8SChristoph Lameter 31228790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3123bea904d5SLee Schermerhorn mode = pol->mode; 3124948927eeSDavid Rientjes flags = pol->flags; 3125948927eeSDavid Rientjes } 3126bea904d5SLee Schermerhorn 31271a75a6c8SChristoph Lameter switch (mode) { 31281a75a6c8SChristoph Lameter case MPOL_DEFAULT: 31297858d7bcSFeng Tang case MPOL_LOCAL: 31301a75a6c8SChristoph Lameter break; 31311a75a6c8SChristoph Lameter case MPOL_PREFERRED: 3132b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 31331a75a6c8SChristoph Lameter case MPOL_BIND: 31341a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 3135269fbe72SBen Widawsky nodes = pol->nodes; 31361a75a6c8SChristoph Lameter break; 31371a75a6c8SChristoph Lameter default: 3138948927eeSDavid Rientjes WARN_ON_ONCE(1); 3139948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 3140948927eeSDavid Rientjes return; 31411a75a6c8SChristoph Lameter } 31421a75a6c8SChristoph Lameter 3143b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 31441a75a6c8SChristoph Lameter 3145fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3146948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3147f5b087b5SDavid Rientjes 31482291990aSLee Schermerhorn /* 31492291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 31502291990aSLee Schermerhorn */ 3151f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 31522291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 31532291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 31542291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3155f5b087b5SDavid Rientjes } 3156f5b087b5SDavid Rientjes 31579e763e0fSTejun Heo if (!nodes_empty(nodes)) 31589e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 31599e763e0fSTejun Heo nodemask_pr_args(&nodes)); 31601a75a6c8SChristoph Lameter } 3161