146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 34b27abaccSDave Hansen * preferred many Try a set of nodes first before normal fallback. This is 35b27abaccSDave Hansen * similar to preferred without the special case. 36b27abaccSDave Hansen * 371da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 381da177e4SLinus Torvalds * use the process policy. This is what Linux always did 391da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 401da177e4SLinus Torvalds * 411da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 421da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 431da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 441da177e4SLinus Torvalds * allocations for a VMA in the VM. 451da177e4SLinus Torvalds * 461da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 471da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 481da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 491da177e4SLinus Torvalds * 501da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 511da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 521da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 531da177e4SLinus Torvalds * Same with GFP_DMA allocations. 541da177e4SLinus Torvalds * 551da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 561da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 571da177e4SLinus Torvalds */ 581da177e4SLinus Torvalds 591da177e4SLinus Torvalds /* Notebook: 601da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 611da177e4SLinus Torvalds object 621da177e4SLinus Torvalds statistics for bigpages 631da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 641da177e4SLinus Torvalds first item above. 651da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 661da177e4SLinus Torvalds grows down? 671da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 681da177e4SLinus Torvalds kernel is not always grateful with that. 691da177e4SLinus Torvalds */ 701da177e4SLinus Torvalds 71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72b1de0d13SMitchel Humpherys 731da177e4SLinus Torvalds #include <linux/mempolicy.h> 74a520110eSChristoph Hellwig #include <linux/pagewalk.h> 751da177e4SLinus Torvalds #include <linux/highmem.h> 761da177e4SLinus Torvalds #include <linux/hugetlb.h> 771da177e4SLinus Torvalds #include <linux/kernel.h> 781da177e4SLinus Torvalds #include <linux/sched.h> 796e84f315SIngo Molnar #include <linux/sched/mm.h> 806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 81f719ff9bSIngo Molnar #include <linux/sched/task.h> 821da177e4SLinus Torvalds #include <linux/nodemask.h> 831da177e4SLinus Torvalds #include <linux/cpuset.h> 841da177e4SLinus Torvalds #include <linux/slab.h> 851da177e4SLinus Torvalds #include <linux/string.h> 86b95f1b31SPaul Gortmaker #include <linux/export.h> 87b488893aSPavel Emelyanov #include <linux/nsproxy.h> 881da177e4SLinus Torvalds #include <linux/interrupt.h> 891da177e4SLinus Torvalds #include <linux/init.h> 901da177e4SLinus Torvalds #include <linux/compat.h> 9131367466SOtto Ebeling #include <linux/ptrace.h> 92dc9aa5b9SChristoph Lameter #include <linux/swap.h> 931a75a6c8SChristoph Lameter #include <linux/seq_file.h> 941a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 95b20a3503SChristoph Lameter #include <linux/migrate.h> 9662b61f61SHugh Dickins #include <linux/ksm.h> 9795a402c3SChristoph Lameter #include <linux/rmap.h> 9886c3a764SDavid Quigley #include <linux/security.h> 99dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 100095f1fc4SLee Schermerhorn #include <linux/ctype.h> 1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 103b1de0d13SMitchel Humpherys #include <linux/printk.h> 104c8633798SNaoya Horiguchi #include <linux/swapops.h> 105dc9aa5b9SChristoph Lameter 1061da177e4SLinus Torvalds #include <asm/tlbflush.h> 1074a18419fSNadav Amit #include <asm/tlb.h> 1087c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1091da177e4SLinus Torvalds 11062695a84SNick Piggin #include "internal.h" 11162695a84SNick Piggin 11238e35860SChristoph Lameter /* Internal flags */ 113dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11438e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 115dc9aa5b9SChristoph Lameter 116fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 117fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1181da177e4SLinus Torvalds 1191da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1201da177e4SLinus Torvalds policied. */ 1216267276fSChristoph Lameter enum zone_type policy_zone = 0; 1221da177e4SLinus Torvalds 123bea904d5SLee Schermerhorn /* 124bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 125bea904d5SLee Schermerhorn */ 126e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1271da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 1287858d7bcSFeng Tang .mode = MPOL_LOCAL, 1291da177e4SLinus Torvalds }; 1301da177e4SLinus Torvalds 1315606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1325606e387SMel Gorman 133b2ca916cSDan Williams /** 134b2ca916cSDan Williams * numa_map_to_online_node - Find closest online node 135f6e92f40SKrzysztof Kozlowski * @node: Node id to start the search 136b2ca916cSDan Williams * 137b2ca916cSDan Williams * Lookup the next closest node by distance if @nid is not online. 138dad5b023SRandy Dunlap * 139dad5b023SRandy Dunlap * Return: this @node if it is online, otherwise the closest node by distance 140b2ca916cSDan Williams */ 141b2ca916cSDan Williams int numa_map_to_online_node(int node) 142b2ca916cSDan Williams { 1434fcbe96eSDan Williams int min_dist = INT_MAX, dist, n, min_node; 144b2ca916cSDan Williams 1454fcbe96eSDan Williams if (node == NUMA_NO_NODE || node_online(node)) 1464fcbe96eSDan Williams return node; 147b2ca916cSDan Williams 148b2ca916cSDan Williams min_node = node; 149b2ca916cSDan Williams for_each_online_node(n) { 150b2ca916cSDan Williams dist = node_distance(node, n); 151b2ca916cSDan Williams if (dist < min_dist) { 152b2ca916cSDan Williams min_dist = dist; 153b2ca916cSDan Williams min_node = n; 154b2ca916cSDan Williams } 155b2ca916cSDan Williams } 156b2ca916cSDan Williams 157b2ca916cSDan Williams return min_node; 158b2ca916cSDan Williams } 159b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node); 160b2ca916cSDan Williams 16174d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1625606e387SMel Gorman { 1635606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 164f15ca78eSOleg Nesterov int node; 1655606e387SMel Gorman 166f15ca78eSOleg Nesterov if (pol) 167f15ca78eSOleg Nesterov return pol; 1685606e387SMel Gorman 169f15ca78eSOleg Nesterov node = numa_node_id(); 1701da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1711da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 172f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 173f15ca78eSOleg Nesterov if (pol->mode) 174f15ca78eSOleg Nesterov return pol; 1751da6f0e1SJianguo Wu } 1765606e387SMel Gorman 177f15ca78eSOleg Nesterov return &default_policy; 1785606e387SMel Gorman } 1795606e387SMel Gorman 18037012946SDavid Rientjes static const struct mempolicy_operations { 18137012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 182213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 18337012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 18437012946SDavid Rientjes 185f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 186f5b087b5SDavid Rientjes { 1876d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1884c50bc01SDavid Rientjes } 1894c50bc01SDavid Rientjes 1904c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1914c50bc01SDavid Rientjes const nodemask_t *rel) 1924c50bc01SDavid Rientjes { 1934c50bc01SDavid Rientjes nodemask_t tmp; 1944c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1954c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 196f5b087b5SDavid Rientjes } 197f5b087b5SDavid Rientjes 198be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 19937012946SDavid Rientjes { 20037012946SDavid Rientjes if (nodes_empty(*nodes)) 20137012946SDavid Rientjes return -EINVAL; 202269fbe72SBen Widawsky pol->nodes = *nodes; 20337012946SDavid Rientjes return 0; 20437012946SDavid Rientjes } 20537012946SDavid Rientjes 20637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 20737012946SDavid Rientjes { 2087858d7bcSFeng Tang if (nodes_empty(*nodes)) 2097858d7bcSFeng Tang return -EINVAL; 210269fbe72SBen Widawsky 211269fbe72SBen Widawsky nodes_clear(pol->nodes); 212269fbe72SBen Widawsky node_set(first_node(*nodes), pol->nodes); 21337012946SDavid Rientjes return 0; 21437012946SDavid Rientjes } 21537012946SDavid Rientjes 21658568d2aSMiao Xie /* 21758568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 21858568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 2197858d7bcSFeng Tang * parameter with respect to the policy mode and flags. 22058568d2aSMiao Xie * 22158568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 222c1e8d7c6SMichel Lespinasse * and mempolicy. May also be called holding the mmap_lock for write. 22358568d2aSMiao Xie */ 2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2254bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22658568d2aSMiao Xie { 22758568d2aSMiao Xie int ret; 22858568d2aSMiao Xie 2297858d7bcSFeng Tang /* 2307858d7bcSFeng Tang * Default (pol==NULL) resp. local memory policies are not a 2317858d7bcSFeng Tang * subject of any remapping. They also do not need any special 2327858d7bcSFeng Tang * constructor. 2337858d7bcSFeng Tang */ 2347858d7bcSFeng Tang if (!pol || pol->mode == MPOL_LOCAL) 23558568d2aSMiao Xie return 0; 2367858d7bcSFeng Tang 23701f13bd6SLai Jiangshan /* Check N_MEMORY */ 2384bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 23901f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 24058568d2aSMiao Xie 24158568d2aSMiao Xie VM_BUG_ON(!nodes); 2427858d7bcSFeng Tang 24358568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2444bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 24558568d2aSMiao Xie else 2464bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2474bfc4495SKAMEZAWA Hiroyuki 24858568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 24958568d2aSMiao Xie pol->w.user_nodemask = *nodes; 25058568d2aSMiao Xie else 2517858d7bcSFeng Tang pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 25258568d2aSMiao Xie 2534bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 25458568d2aSMiao Xie return ret; 25558568d2aSMiao Xie } 25658568d2aSMiao Xie 25758568d2aSMiao Xie /* 25858568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25958568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 26058568d2aSMiao Xie */ 261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 262028fec41SDavid Rientjes nodemask_t *nodes) 2631da177e4SLinus Torvalds { 2641da177e4SLinus Torvalds struct mempolicy *policy; 2651da177e4SLinus Torvalds 266028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26700ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 268140d5a49SPaul Mundt 2693e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2703e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 27137012946SDavid Rientjes return ERR_PTR(-EINVAL); 272d3a71033SLee Schermerhorn return NULL; 27337012946SDavid Rientjes } 2743e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2753e1f0645SDavid Rientjes 2763e1f0645SDavid Rientjes /* 2773e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2783e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2793e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2803e1f0645SDavid Rientjes */ 2813e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2823e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2833e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2843e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2853e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2867858d7bcSFeng Tang 2877858d7bcSFeng Tang mode = MPOL_LOCAL; 2883e1f0645SDavid Rientjes } 289479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2908d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2918d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2928d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 293479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 2943e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2953e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2961da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2971da177e4SLinus Torvalds if (!policy) 2981da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2991da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 30045c4745aSLee Schermerhorn policy->mode = mode; 30137012946SDavid Rientjes policy->flags = flags; 302c6018b4bSAneesh Kumar K.V policy->home_node = NUMA_NO_NODE; 3033e1f0645SDavid Rientjes 30437012946SDavid Rientjes return policy; 30537012946SDavid Rientjes } 30637012946SDavid Rientjes 30752cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30852cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 30952cd3b07SLee Schermerhorn { 31052cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 31152cd3b07SLee Schermerhorn return; 31252cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 31352cd3b07SLee Schermerhorn } 31452cd3b07SLee Schermerhorn 315213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 31637012946SDavid Rientjes { 31737012946SDavid Rientjes } 31837012946SDavid Rientjes 319213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 3201d0d2680SDavid Rientjes { 3211d0d2680SDavid Rientjes nodemask_t tmp; 3221d0d2680SDavid Rientjes 32337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32437012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32537012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32637012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3271d0d2680SDavid Rientjes else { 328269fbe72SBen Widawsky nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 329213980c0SVlastimil Babka *nodes); 33029b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3311d0d2680SDavid Rientjes } 33237012946SDavid Rientjes 333708c1bbcSMiao Xie if (nodes_empty(tmp)) 334708c1bbcSMiao Xie tmp = *nodes; 335708c1bbcSMiao Xie 336269fbe72SBen Widawsky pol->nodes = tmp; 33737012946SDavid Rientjes } 33837012946SDavid Rientjes 33937012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 340213980c0SVlastimil Babka const nodemask_t *nodes) 34137012946SDavid Rientjes { 34237012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3431d0d2680SDavid Rientjes } 34437012946SDavid Rientjes 345708c1bbcSMiao Xie /* 346708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 347708c1bbcSMiao Xie * 348c1e8d7c6SMichel Lespinasse * Per-vma policies are protected by mmap_lock. Allocations using per-task 349213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 350213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 351708c1bbcSMiao Xie */ 352213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35337012946SDavid Rientjes { 354018160adSWang Cheng if (!pol || pol->mode == MPOL_LOCAL) 35537012946SDavid Rientjes return; 3567858d7bcSFeng Tang if (!mpol_store_user_nodemask(pol) && 35737012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35837012946SDavid Rientjes return; 359708c1bbcSMiao Xie 360213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3611d0d2680SDavid Rientjes } 3621d0d2680SDavid Rientjes 3631d0d2680SDavid Rientjes /* 3641d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3651d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36658568d2aSMiao Xie * 36758568d2aSMiao Xie * Called with task's alloc_lock held. 3681d0d2680SDavid Rientjes */ 3691d0d2680SDavid Rientjes 370213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3711d0d2680SDavid Rientjes { 372213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3731d0d2680SDavid Rientjes } 3741d0d2680SDavid Rientjes 3751d0d2680SDavid Rientjes /* 3761d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3771d0d2680SDavid Rientjes * 378c1e8d7c6SMichel Lespinasse * Call holding a reference to mm. Takes mm->mmap_lock during call. 3791d0d2680SDavid Rientjes */ 3801d0d2680SDavid Rientjes 3811d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3821d0d2680SDavid Rientjes { 3831d0d2680SDavid Rientjes struct vm_area_struct *vma; 38466850be5SLiam R. Howlett VMA_ITERATOR(vmi, mm, 0); 3851d0d2680SDavid Rientjes 386d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 38766850be5SLiam R. Howlett for_each_vma(vmi, vma) 388213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 389d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3901d0d2680SDavid Rientjes } 3911d0d2680SDavid Rientjes 39237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 39337012946SDavid Rientjes [MPOL_DEFAULT] = { 39437012946SDavid Rientjes .rebind = mpol_rebind_default, 39537012946SDavid Rientjes }, 39637012946SDavid Rientjes [MPOL_INTERLEAVE] = { 397be897d48SFeng Tang .create = mpol_new_nodemask, 39837012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39937012946SDavid Rientjes }, 40037012946SDavid Rientjes [MPOL_PREFERRED] = { 40137012946SDavid Rientjes .create = mpol_new_preferred, 40237012946SDavid Rientjes .rebind = mpol_rebind_preferred, 40337012946SDavid Rientjes }, 40437012946SDavid Rientjes [MPOL_BIND] = { 405be897d48SFeng Tang .create = mpol_new_nodemask, 40637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40737012946SDavid Rientjes }, 4087858d7bcSFeng Tang [MPOL_LOCAL] = { 4097858d7bcSFeng Tang .rebind = mpol_rebind_default, 4107858d7bcSFeng Tang }, 411b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = { 412be897d48SFeng Tang .create = mpol_new_nodemask, 413b27abaccSDave Hansen .rebind = mpol_rebind_preferred, 414b27abaccSDave Hansen }, 41537012946SDavid Rientjes }; 41637012946SDavid Rientjes 417a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 418fc301289SChristoph Lameter unsigned long flags); 4191a75a6c8SChristoph Lameter 4206f4576e3SNaoya Horiguchi struct queue_pages { 4216f4576e3SNaoya Horiguchi struct list_head *pagelist; 4226f4576e3SNaoya Horiguchi unsigned long flags; 4236f4576e3SNaoya Horiguchi nodemask_t *nmask; 424f18da660SLi Xinhai unsigned long start; 425f18da660SLi Xinhai unsigned long end; 426f18da660SLi Xinhai struct vm_area_struct *first; 4276f4576e3SNaoya Horiguchi }; 4286f4576e3SNaoya Horiguchi 42998094945SNaoya Horiguchi /* 43088aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 43188aaa2a1SNaoya Horiguchi * 43288aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 43388aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 43488aaa2a1SNaoya Horiguchi */ 43588aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 43688aaa2a1SNaoya Horiguchi struct queue_pages *qp) 43788aaa2a1SNaoya Horiguchi { 43888aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 43988aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 44088aaa2a1SNaoya Horiguchi 44188aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 44288aaa2a1SNaoya Horiguchi } 44388aaa2a1SNaoya Horiguchi 444a7f40cfeSYang Shi /* 445bc78b5edSMiaohe Lin * queue_pages_pmd() has three possible return values: 446e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 447e5947d23SYang Shi * special page is met, i.e. huge zero page. 448d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 449d8835445SYang Shi * specified. 450d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 451d8835445SYang Shi * existing page was already on a node that does not follow the 452d8835445SYang Shi * policy. 453a7f40cfeSYang Shi */ 454c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 455c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 456959a7e13SJules Irenge __releases(ptl) 457c8633798SNaoya Horiguchi { 458c8633798SNaoya Horiguchi int ret = 0; 459c8633798SNaoya Horiguchi struct page *page; 460c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 461c8633798SNaoya Horiguchi unsigned long flags; 462c8633798SNaoya Horiguchi 463c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 464a7f40cfeSYang Shi ret = -EIO; 465c8633798SNaoya Horiguchi goto unlock; 466c8633798SNaoya Horiguchi } 467c8633798SNaoya Horiguchi page = pmd_page(*pmd); 468c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 469e5947d23SYang Shi walk->action = ACTION_CONTINUE; 4706d97cf88SMiaohe Lin goto unlock; 471c8633798SNaoya Horiguchi } 472d8835445SYang Shi if (!queue_pages_required(page, qp)) 473c8633798SNaoya Horiguchi goto unlock; 474c8633798SNaoya Horiguchi 475c8633798SNaoya Horiguchi flags = qp->flags; 476c8633798SNaoya Horiguchi /* go to thp migration */ 477a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 478a53190a4SYang Shi if (!vma_migratable(walk->vma) || 479a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 480d8835445SYang Shi ret = 1; 481a7f40cfeSYang Shi goto unlock; 482a7f40cfeSYang Shi } 483a7f40cfeSYang Shi } else 484a7f40cfeSYang Shi ret = -EIO; 485c8633798SNaoya Horiguchi unlock: 486c8633798SNaoya Horiguchi spin_unlock(ptl); 487c8633798SNaoya Horiguchi return ret; 488c8633798SNaoya Horiguchi } 489c8633798SNaoya Horiguchi 49088aaa2a1SNaoya Horiguchi /* 49198094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 49298094945SNaoya Horiguchi * and move them to the pagelist if they do. 493d8835445SYang Shi * 494d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 495e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 496e5947d23SYang Shi * special page is met, i.e. zero page. 497d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 498d8835445SYang Shi * specified. 499d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 500d8835445SYang Shi * on a node that does not follow the policy. 50198094945SNaoya Horiguchi */ 5026f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 5036f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 5041da177e4SLinus Torvalds { 5056f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5066f4576e3SNaoya Horiguchi struct page *page; 5076f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5086f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 509d8835445SYang Shi bool has_unmovable = false; 5103f088420SShijie Luo pte_t *pte, *mapped_pte; 511705e87c0SHugh Dickins spinlock_t *ptl; 512941150a3SHugh Dickins 513c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 514bc78b5edSMiaohe Lin if (ptl) 515bc78b5edSMiaohe Lin return queue_pages_pmd(pmd, ptl, addr, end, walk); 51691612e0dSHugh Dickins 517337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 518337d9abfSNaoya Horiguchi return 0; 51994723aafSMichal Hocko 5203f088420SShijie Luo mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5216f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 52291612e0dSHugh Dickins if (!pte_present(*pte)) 52391612e0dSHugh Dickins continue; 5246aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5253218f871SAlex Sierra if (!page || is_zone_device_page(page)) 52691612e0dSHugh Dickins continue; 527053837fcSNick Piggin /* 52862b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 52962b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 530053837fcSNick Piggin */ 531b79bc0a0SHugh Dickins if (PageReserved(page)) 532f4598c8bSChristoph Lameter continue; 53388aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 53438e35860SChristoph Lameter continue; 535a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 536d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 537d8835445SYang Shi if (!vma_migratable(vma)) { 538d8835445SYang Shi has_unmovable = true; 539a7f40cfeSYang Shi break; 540d8835445SYang Shi } 541a53190a4SYang Shi 542a53190a4SYang Shi /* 543a53190a4SYang Shi * Do not abort immediately since there may be 544a53190a4SYang Shi * temporary off LRU pages in the range. Still 545a53190a4SYang Shi * need migrate other LRU pages. 546a53190a4SYang Shi */ 547a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 548a53190a4SYang Shi has_unmovable = true; 549a7f40cfeSYang Shi } else 550a7f40cfeSYang Shi break; 5516f4576e3SNaoya Horiguchi } 5523f088420SShijie Luo pte_unmap_unlock(mapped_pte, ptl); 5536f4576e3SNaoya Horiguchi cond_resched(); 554d8835445SYang Shi 555d8835445SYang Shi if (has_unmovable) 556d8835445SYang Shi return 1; 557d8835445SYang Shi 558a7f40cfeSYang Shi return addr != end ? -EIO : 0; 55991612e0dSHugh Dickins } 56091612e0dSHugh Dickins 5616f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5626f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5636f4576e3SNaoya Horiguchi struct mm_walk *walk) 564e2d8cf40SNaoya Horiguchi { 565dcf17635SLi Xinhai int ret = 0; 566e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5676f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 568dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 569e2d8cf40SNaoya Horiguchi struct page *page; 570cb900f41SKirill A. Shutemov spinlock_t *ptl; 571d4c54919SNaoya Horiguchi pte_t entry; 572e2d8cf40SNaoya Horiguchi 5736f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5746f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 575d4c54919SNaoya Horiguchi if (!pte_present(entry)) 576d4c54919SNaoya Horiguchi goto unlock; 577d4c54919SNaoya Horiguchi page = pte_page(entry); 57888aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 579e2d8cf40SNaoya Horiguchi goto unlock; 580dcf17635SLi Xinhai 581dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 582dcf17635SLi Xinhai /* 583dcf17635SLi Xinhai * STRICT alone means only detecting misplaced page and no 584dcf17635SLi Xinhai * need to further check other vma. 585dcf17635SLi Xinhai */ 586dcf17635SLi Xinhai ret = -EIO; 587dcf17635SLi Xinhai goto unlock; 588dcf17635SLi Xinhai } 589dcf17635SLi Xinhai 590dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 591dcf17635SLi Xinhai /* 592dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 593dcf17635SLi Xinhai * stopped walking current vma. 594dcf17635SLi Xinhai * Detecting misplaced page but allow migrating pages which 595dcf17635SLi Xinhai * have been queued. 596dcf17635SLi Xinhai */ 597dcf17635SLi Xinhai ret = 1; 598dcf17635SLi Xinhai goto unlock; 599dcf17635SLi Xinhai } 600dcf17635SLi Xinhai 601e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 602e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 60373bdf65eSMike Kravetz (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 && 60473bdf65eSMike Kravetz !hugetlb_pmd_shared(pte))) { 6056aa3a920SSidhartha Kumar if (isolate_hugetlb(page_folio(page), qp->pagelist) && 606dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 607dcf17635SLi Xinhai /* 608dcf17635SLi Xinhai * Failed to isolate page but allow migrating pages 609dcf17635SLi Xinhai * which have been queued. 610dcf17635SLi Xinhai */ 611dcf17635SLi Xinhai ret = 1; 612dcf17635SLi Xinhai } 613e2d8cf40SNaoya Horiguchi unlock: 614cb900f41SKirill A. Shutemov spin_unlock(ptl); 615e2d8cf40SNaoya Horiguchi #else 616e2d8cf40SNaoya Horiguchi BUG(); 617e2d8cf40SNaoya Horiguchi #endif 618dcf17635SLi Xinhai return ret; 6191da177e4SLinus Torvalds } 6201da177e4SLinus Torvalds 6215877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 622b24f53a0SLee Schermerhorn /* 6234b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6244b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6254b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6264b10e7d5SMel Gorman * 6274b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6284b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6294b10e7d5SMel Gorman * changes to the core. 630b24f53a0SLee Schermerhorn */ 6314b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6324b10e7d5SMel Gorman unsigned long addr, unsigned long end) 633b24f53a0SLee Schermerhorn { 6344a18419fSNadav Amit struct mmu_gather tlb; 635a79390f5SPeter Xu long nr_updated; 636b24f53a0SLee Schermerhorn 6374a18419fSNadav Amit tlb_gather_mmu(&tlb, vma->vm_mm); 6384a18419fSNadav Amit 6391ef488edSDavid Hildenbrand nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA); 640d1751118SPeter Xu if (nr_updated > 0) 64103c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 642b24f53a0SLee Schermerhorn 6434a18419fSNadav Amit tlb_finish_mmu(&tlb); 6444a18419fSNadav Amit 6454b10e7d5SMel Gorman return nr_updated; 646b24f53a0SLee Schermerhorn } 647b24f53a0SLee Schermerhorn #else 648b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 649b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 650b24f53a0SLee Schermerhorn { 651b24f53a0SLee Schermerhorn return 0; 652b24f53a0SLee Schermerhorn } 6535877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 654b24f53a0SLee Schermerhorn 6556f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6566f4576e3SNaoya Horiguchi struct mm_walk *walk) 6571da177e4SLinus Torvalds { 65866850be5SLiam R. Howlett struct vm_area_struct *next, *vma = walk->vma; 6596f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6605b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6616f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 662dc9aa5b9SChristoph Lameter 663a18b3ac2SLi Xinhai /* range check first */ 664ce33135cSMiaohe Lin VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 665f18da660SLi Xinhai 666f18da660SLi Xinhai if (!qp->first) { 667f18da660SLi Xinhai qp->first = vma; 668f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 669f18da660SLi Xinhai (qp->start < vma->vm_start)) 670f18da660SLi Xinhai /* hole at head side of range */ 671a18b3ac2SLi Xinhai return -EFAULT; 672a18b3ac2SLi Xinhai } 67366850be5SLiam R. Howlett next = find_vma(vma->vm_mm, vma->vm_end); 674f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 675f18da660SLi Xinhai ((vma->vm_end < qp->end) && 67666850be5SLiam R. Howlett (!next || vma->vm_end < next->vm_start))) 677f18da660SLi Xinhai /* hole at middle or tail of range */ 678f18da660SLi Xinhai return -EFAULT; 679a18b3ac2SLi Xinhai 680a7f40cfeSYang Shi /* 681a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 682a7f40cfeSYang Shi * regardless of vma_migratable 683a7f40cfeSYang Shi */ 684a7f40cfeSYang Shi if (!vma_migratable(vma) && 685a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 68648684a65SNaoya Horiguchi return 1; 68748684a65SNaoya Horiguchi 6885b952b3cSAndi Kleen if (endvma > end) 6895b952b3cSAndi Kleen endvma = end; 690b24f53a0SLee Schermerhorn 691b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6922c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6933122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 6944355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 695b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6966f4576e3SNaoya Horiguchi return 1; 697b24f53a0SLee Schermerhorn } 698b24f53a0SLee Schermerhorn 6996f4576e3SNaoya Horiguchi /* queue pages from current vma */ 700a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 7016f4576e3SNaoya Horiguchi return 0; 7026f4576e3SNaoya Horiguchi return 1; 7036f4576e3SNaoya Horiguchi } 704b24f53a0SLee Schermerhorn 7057b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 7067b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 7077b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 7087b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 7097b86ac33SChristoph Hellwig }; 7107b86ac33SChristoph Hellwig 7116f4576e3SNaoya Horiguchi /* 7126f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7136f4576e3SNaoya Horiguchi * 7146f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7156f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 716d8835445SYang Shi * passed via @private. 717d8835445SYang Shi * 718d8835445SYang Shi * queue_pages_range() has three possible return values: 719d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 720d8835445SYang Shi * specified. 721d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 722a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 723a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 724a85dfc30SYang Shi * your accessible address space (-EFAULT) 7256f4576e3SNaoya Horiguchi */ 7266f4576e3SNaoya Horiguchi static int 7276f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7286f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 7296f4576e3SNaoya Horiguchi struct list_head *pagelist) 7306f4576e3SNaoya Horiguchi { 731f18da660SLi Xinhai int err; 7326f4576e3SNaoya Horiguchi struct queue_pages qp = { 7336f4576e3SNaoya Horiguchi .pagelist = pagelist, 7346f4576e3SNaoya Horiguchi .flags = flags, 7356f4576e3SNaoya Horiguchi .nmask = nodes, 736f18da660SLi Xinhai .start = start, 737f18da660SLi Xinhai .end = end, 738f18da660SLi Xinhai .first = NULL, 7396f4576e3SNaoya Horiguchi }; 7406f4576e3SNaoya Horiguchi 741f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 742f18da660SLi Xinhai 743f18da660SLi Xinhai if (!qp.first) 744f18da660SLi Xinhai /* whole range in hole */ 745f18da660SLi Xinhai err = -EFAULT; 746f18da660SLi Xinhai 747f18da660SLi Xinhai return err; 7481da177e4SLinus Torvalds } 7491da177e4SLinus Torvalds 750869833f2SKOSAKI Motohiro /* 751869833f2SKOSAKI Motohiro * Apply policy to a single VMA 752c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for writing. 753869833f2SKOSAKI Motohiro */ 754869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 755869833f2SKOSAKI Motohiro struct mempolicy *pol) 7568d34694cSKOSAKI Motohiro { 757869833f2SKOSAKI Motohiro int err; 758869833f2SKOSAKI Motohiro struct mempolicy *old; 759869833f2SKOSAKI Motohiro struct mempolicy *new; 7608d34694cSKOSAKI Motohiro 7618d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7628d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7638d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7648d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7658d34694cSKOSAKI Motohiro 766869833f2SKOSAKI Motohiro new = mpol_dup(pol); 767869833f2SKOSAKI Motohiro if (IS_ERR(new)) 768869833f2SKOSAKI Motohiro return PTR_ERR(new); 769869833f2SKOSAKI Motohiro 770869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7718d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 772869833f2SKOSAKI Motohiro if (err) 773869833f2SKOSAKI Motohiro goto err_out; 7748d34694cSKOSAKI Motohiro } 775869833f2SKOSAKI Motohiro 776869833f2SKOSAKI Motohiro old = vma->vm_policy; 777c1e8d7c6SMichel Lespinasse vma->vm_policy = new; /* protected by mmap_lock */ 778869833f2SKOSAKI Motohiro mpol_put(old); 779869833f2SKOSAKI Motohiro 780869833f2SKOSAKI Motohiro return 0; 781869833f2SKOSAKI Motohiro err_out: 782869833f2SKOSAKI Motohiro mpol_put(new); 7838d34694cSKOSAKI Motohiro return err; 7848d34694cSKOSAKI Motohiro } 7858d34694cSKOSAKI Motohiro 7861da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7879d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7889d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7891da177e4SLinus Torvalds { 790f10c2abcSLiam R. Howlett VMA_ITERATOR(vmi, mm, start); 7919d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7929d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7939d8cebd4SKOSAKI Motohiro int err = 0; 794e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7951da177e4SLinus Torvalds 796f10c2abcSLiam R. Howlett prev = vma_prev(&vmi); 797f10c2abcSLiam R. Howlett vma = vma_find(&vmi, end); 7987329e3ebSLiam Howlett if (WARN_ON(!vma)) 7997329e3ebSLiam Howlett return 0; 8007329e3ebSLiam Howlett 8017329e3ebSLiam Howlett if (start > vma->vm_start) 8027329e3ebSLiam Howlett prev = vma; 8039d8cebd4SKOSAKI Motohiro 804f10c2abcSLiam R. Howlett do { 80566850be5SLiam R. Howlett unsigned long vmstart = max(start, vma->vm_start); 80666850be5SLiam R. Howlett unsigned long vmend = min(end, vma->vm_end); 8079d8cebd4SKOSAKI Motohiro 808e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 80966850be5SLiam R. Howlett goto next; 810e26a5114SKOSAKI Motohiro 811e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 812e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 8139760ebffSLiam R. Howlett prev = vma_merge(&vmi, mm, prev, vmstart, vmend, vma->vm_flags, 814e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 8159a10064fSColin Cross new_pol, vma->vm_userfaultfd_ctx, 8165c26f6acSSuren Baghdasaryan anon_vma_name(vma)); 8179d8cebd4SKOSAKI Motohiro if (prev) { 8189d8cebd4SKOSAKI Motohiro vma = prev; 8193964acd0SOleg Nesterov goto replace; 8201da177e4SLinus Torvalds } 8219d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 8229760ebffSLiam R. Howlett err = split_vma(&vmi, vma, vmstart, 1); 8239d8cebd4SKOSAKI Motohiro if (err) 8249d8cebd4SKOSAKI Motohiro goto out; 8259d8cebd4SKOSAKI Motohiro } 8269d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 8279760ebffSLiam R. Howlett err = split_vma(&vmi, vma, vmend, 0); 8289d8cebd4SKOSAKI Motohiro if (err) 8299d8cebd4SKOSAKI Motohiro goto out; 8309d8cebd4SKOSAKI Motohiro } 8313964acd0SOleg Nesterov replace: 832869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 8339d8cebd4SKOSAKI Motohiro if (err) 8349d8cebd4SKOSAKI Motohiro goto out; 83566850be5SLiam R. Howlett next: 83666850be5SLiam R. Howlett prev = vma; 837f10c2abcSLiam R. Howlett } for_each_vma_range(vmi, vma, end); 8389d8cebd4SKOSAKI Motohiro 8399d8cebd4SKOSAKI Motohiro out: 8401da177e4SLinus Torvalds return err; 8411da177e4SLinus Torvalds } 8421da177e4SLinus Torvalds 8431da177e4SLinus Torvalds /* Set the process memory policy */ 844028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 845028fec41SDavid Rientjes nodemask_t *nodes) 8461da177e4SLinus Torvalds { 84758568d2aSMiao Xie struct mempolicy *new, *old; 8484bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 84958568d2aSMiao Xie int ret; 8501da177e4SLinus Torvalds 8514bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8524bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 853f4e53d91SLee Schermerhorn 8544bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8554bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8564bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8574bfc4495SKAMEZAWA Hiroyuki goto out; 8584bfc4495SKAMEZAWA Hiroyuki } 8592c7c3a7dSOleg Nesterov 86012c1dc8eSAbel Wu task_lock(current); 8614bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 86258568d2aSMiao Xie if (ret) { 86312c1dc8eSAbel Wu task_unlock(current); 86458568d2aSMiao Xie mpol_put(new); 8654bfc4495SKAMEZAWA Hiroyuki goto out; 86658568d2aSMiao Xie } 86712c1dc8eSAbel Wu 86858568d2aSMiao Xie old = current->mempolicy; 8691da177e4SLinus Torvalds current->mempolicy = new; 87045816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 87145816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 87258568d2aSMiao Xie task_unlock(current); 87358568d2aSMiao Xie mpol_put(old); 8744bfc4495SKAMEZAWA Hiroyuki ret = 0; 8754bfc4495SKAMEZAWA Hiroyuki out: 8764bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8774bfc4495SKAMEZAWA Hiroyuki return ret; 8781da177e4SLinus Torvalds } 8791da177e4SLinus Torvalds 880bea904d5SLee Schermerhorn /* 881bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 88258568d2aSMiao Xie * 88358568d2aSMiao Xie * Called with task's alloc_lock held 884bea904d5SLee Schermerhorn */ 885bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8861da177e4SLinus Torvalds { 887dfcd3c0dSAndi Kleen nodes_clear(*nodes); 888bea904d5SLee Schermerhorn if (p == &default_policy) 889bea904d5SLee Schermerhorn return; 890bea904d5SLee Schermerhorn 89145c4745aSLee Schermerhorn switch (p->mode) { 89219770b32SMel Gorman case MPOL_BIND: 8931da177e4SLinus Torvalds case MPOL_INTERLEAVE: 894269fbe72SBen Widawsky case MPOL_PREFERRED: 895b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 896269fbe72SBen Widawsky *nodes = p->nodes; 8971da177e4SLinus Torvalds break; 8987858d7bcSFeng Tang case MPOL_LOCAL: 8997858d7bcSFeng Tang /* return empty node mask for local allocation */ 9007858d7bcSFeng Tang break; 9011da177e4SLinus Torvalds default: 9021da177e4SLinus Torvalds BUG(); 9031da177e4SLinus Torvalds } 9041da177e4SLinus Torvalds } 9051da177e4SLinus Torvalds 9063b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 9071da177e4SLinus Torvalds { 908ba841078SPeter Xu struct page *p = NULL; 909f728b9c4SJohn Hubbard int ret; 9101da177e4SLinus Torvalds 911f728b9c4SJohn Hubbard ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p); 912f728b9c4SJohn Hubbard if (ret > 0) { 913f728b9c4SJohn Hubbard ret = page_to_nid(p); 9141da177e4SLinus Torvalds put_page(p); 9151da177e4SLinus Torvalds } 916f728b9c4SJohn Hubbard return ret; 9171da177e4SLinus Torvalds } 9181da177e4SLinus Torvalds 9191da177e4SLinus Torvalds /* Retrieve NUMA policy */ 920dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9211da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9221da177e4SLinus Torvalds { 9238bccd85fSChristoph Lameter int err; 9241da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9251da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9263b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9271da177e4SLinus Torvalds 928754af6f5SLee Schermerhorn if (flags & 929754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9301da177e4SLinus Torvalds return -EINVAL; 931754af6f5SLee Schermerhorn 932754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 933754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 934754af6f5SLee Schermerhorn return -EINVAL; 935754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 93658568d2aSMiao Xie task_lock(current); 937754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 93858568d2aSMiao Xie task_unlock(current); 939754af6f5SLee Schermerhorn return 0; 940754af6f5SLee Schermerhorn } 941754af6f5SLee Schermerhorn 9421da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 943bea904d5SLee Schermerhorn /* 944bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 945bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 946bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 947bea904d5SLee Schermerhorn */ 948d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 94933e3575cSLiam Howlett vma = vma_lookup(mm, addr); 9501da177e4SLinus Torvalds if (!vma) { 951d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9521da177e4SLinus Torvalds return -EFAULT; 9531da177e4SLinus Torvalds } 9541da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9551da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9561da177e4SLinus Torvalds else 9571da177e4SLinus Torvalds pol = vma->vm_policy; 9581da177e4SLinus Torvalds } else if (addr) 9591da177e4SLinus Torvalds return -EINVAL; 9601da177e4SLinus Torvalds 9611da177e4SLinus Torvalds if (!pol) 962bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9631da177e4SLinus Torvalds 9641da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9651da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9663b9aadf7SAndrea Arcangeli /* 967f728b9c4SJohn Hubbard * Take a refcount on the mpol, because we are about to 968f728b9c4SJohn Hubbard * drop the mmap_lock, after which only "pol" remains 969f728b9c4SJohn Hubbard * valid, "vma" is stale. 9703b9aadf7SAndrea Arcangeli */ 9713b9aadf7SAndrea Arcangeli pol_refcount = pol; 9723b9aadf7SAndrea Arcangeli vma = NULL; 9733b9aadf7SAndrea Arcangeli mpol_get(pol); 974f728b9c4SJohn Hubbard mmap_read_unlock(mm); 9753b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9761da177e4SLinus Torvalds if (err < 0) 9771da177e4SLinus Torvalds goto out; 9788bccd85fSChristoph Lameter *policy = err; 9791da177e4SLinus Torvalds } else if (pol == current->mempolicy && 98045c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 981269fbe72SBen Widawsky *policy = next_node_in(current->il_prev, pol->nodes); 9821da177e4SLinus Torvalds } else { 9831da177e4SLinus Torvalds err = -EINVAL; 9841da177e4SLinus Torvalds goto out; 9851da177e4SLinus Torvalds } 986bea904d5SLee Schermerhorn } else { 987bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 988bea904d5SLee Schermerhorn pol->mode; 989d79df630SDavid Rientjes /* 990d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 991d79df630SDavid Rientjes * the policy to userspace. 992d79df630SDavid Rientjes */ 993d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 994bea904d5SLee Schermerhorn } 9951da177e4SLinus Torvalds 9961da177e4SLinus Torvalds err = 0; 99758568d2aSMiao Xie if (nmask) { 998c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 999c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 1000c6b6ef8bSLee Schermerhorn } else { 100158568d2aSMiao Xie task_lock(current); 1002bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 100358568d2aSMiao Xie task_unlock(current); 100458568d2aSMiao Xie } 1005c6b6ef8bSLee Schermerhorn } 10061da177e4SLinus Torvalds 10071da177e4SLinus Torvalds out: 100852cd3b07SLee Schermerhorn mpol_cond_put(pol); 10091da177e4SLinus Torvalds if (vma) 1010d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 10113b9aadf7SAndrea Arcangeli if (pol_refcount) 10123b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10131da177e4SLinus Torvalds return err; 10141da177e4SLinus Torvalds } 10151da177e4SLinus Torvalds 1016b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10178bccd85fSChristoph Lameter /* 1018c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 10196ce3c4c0SChristoph Lameter */ 1020a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1021fc301289SChristoph Lameter unsigned long flags) 10226ce3c4c0SChristoph Lameter { 1023c8633798SNaoya Horiguchi struct page *head = compound_head(page); 10246ce3c4c0SChristoph Lameter /* 1025fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 10266ce3c4c0SChristoph Lameter */ 1027c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1028c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 1029c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 1030c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 10319de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head), 10326c357848SMatthew Wilcox (Oracle) thp_nr_pages(head)); 1033a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1034a53190a4SYang Shi /* 1035a53190a4SYang Shi * Non-movable page may reach here. And, there may be 1036a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 1037a53190a4SYang Shi * Treat them as unmovable pages since they can't be 1038a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1039a53190a4SYang Shi * should return -EIO for this case too. 1040a53190a4SYang Shi */ 1041a53190a4SYang Shi return -EIO; 104262695a84SNick Piggin } 104362695a84SNick Piggin } 1044a53190a4SYang Shi 1045a53190a4SYang Shi return 0; 10466ce3c4c0SChristoph Lameter } 10476ce3c4c0SChristoph Lameter 10486ce3c4c0SChristoph Lameter /* 10497e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10507e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10517e2ab150SChristoph Lameter */ 1052dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1053dbcb0f19SAdrian Bunk int flags) 10547e2ab150SChristoph Lameter { 10557e2ab150SChristoph Lameter nodemask_t nmask; 105666850be5SLiam R. Howlett struct vm_area_struct *vma; 10577e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10587e2ab150SChristoph Lameter int err = 0; 1059a0976311SJoonsoo Kim struct migration_target_control mtc = { 1060a0976311SJoonsoo Kim .nid = dest, 1061a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1062a0976311SJoonsoo Kim }; 10637e2ab150SChristoph Lameter 10647e2ab150SChristoph Lameter nodes_clear(nmask); 10657e2ab150SChristoph Lameter node_set(source, nmask); 10667e2ab150SChristoph Lameter 106708270807SMinchan Kim /* 106808270807SMinchan Kim * This does not "check" the range but isolates all pages that 106908270807SMinchan Kim * need migration. Between passing in the full user address 107008270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 107108270807SMinchan Kim */ 107266850be5SLiam R. Howlett vma = find_vma(mm, 0); 107308270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 107466850be5SLiam R. Howlett queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, 10757e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10767e2ab150SChristoph Lameter 1077cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1078a0976311SJoonsoo Kim err = migrate_pages(&pagelist, alloc_migration_target, NULL, 10795ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1080cf608ac1SMinchan Kim if (err) 1081e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1082cf608ac1SMinchan Kim } 108395a402c3SChristoph Lameter 10847e2ab150SChristoph Lameter return err; 10857e2ab150SChristoph Lameter } 10867e2ab150SChristoph Lameter 10877e2ab150SChristoph Lameter /* 10887e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10897e2ab150SChristoph Lameter * layout as much as possible. 109039743889SChristoph Lameter * 109139743889SChristoph Lameter * Returns the number of page that could not be moved. 109239743889SChristoph Lameter */ 10930ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10940ce72d4fSAndrew Morton const nodemask_t *to, int flags) 109539743889SChristoph Lameter { 10967e2ab150SChristoph Lameter int busy = 0; 1097f555befdSJan Stancek int err = 0; 10987e2ab150SChristoph Lameter nodemask_t tmp; 109939743889SChristoph Lameter 1100361a2a22SMinchan Kim lru_cache_disable(); 11010aedadf9SChristoph Lameter 1102d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1103d4984711SChristoph Lameter 11047e2ab150SChristoph Lameter /* 11057e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11067e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11077e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11087e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11097e2ab150SChristoph Lameter * 11107e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11117e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11127e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11137e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11147e2ab150SChristoph Lameter * 11157e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11167e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11177e2ab150SChristoph Lameter * (nothing left to migrate). 11187e2ab150SChristoph Lameter * 11197e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11207e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11217e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11227e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11237e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11247e2ab150SChristoph Lameter * 11257e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11267e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11277e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11287e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1129ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11307e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11317e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11327e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11337e2ab150SChristoph Lameter */ 11347e2ab150SChristoph Lameter 11350ce72d4fSAndrew Morton tmp = *from; 11367e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11377e2ab150SChristoph Lameter int s, d; 1138b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11397e2ab150SChristoph Lameter int dest = 0; 11407e2ab150SChristoph Lameter 11417e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11424a5b18ccSLarry Woodman 11434a5b18ccSLarry Woodman /* 11444a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11454a5b18ccSLarry Woodman * node relationship of the pages established between 11464a5b18ccSLarry Woodman * threads and memory areas. 11474a5b18ccSLarry Woodman * 11484a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11494a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11504a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11514a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11524a5b18ccSLarry Woodman * mask. 11534a5b18ccSLarry Woodman * 11544a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11554a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11564a5b18ccSLarry Woodman */ 11574a5b18ccSLarry Woodman 11580ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11590ce72d4fSAndrew Morton (node_isset(s, *to))) 11604a5b18ccSLarry Woodman continue; 11614a5b18ccSLarry Woodman 11620ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11637e2ab150SChristoph Lameter if (s == d) 11647e2ab150SChristoph Lameter continue; 11657e2ab150SChristoph Lameter 11667e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11677e2ab150SChristoph Lameter dest = d; 11687e2ab150SChristoph Lameter 11697e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11707e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11717e2ab150SChristoph Lameter break; 11727e2ab150SChristoph Lameter } 1173b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11747e2ab150SChristoph Lameter break; 11757e2ab150SChristoph Lameter 11767e2ab150SChristoph Lameter node_clear(source, tmp); 11777e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11787e2ab150SChristoph Lameter if (err > 0) 11797e2ab150SChristoph Lameter busy += err; 11807e2ab150SChristoph Lameter if (err < 0) 11817e2ab150SChristoph Lameter break; 118239743889SChristoph Lameter } 1183d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1184d479960eSMinchan Kim 1185361a2a22SMinchan Kim lru_cache_enable(); 11867e2ab150SChristoph Lameter if (err < 0) 11877e2ab150SChristoph Lameter return err; 11887e2ab150SChristoph Lameter return busy; 1189b20a3503SChristoph Lameter 119039743889SChristoph Lameter } 119139743889SChristoph Lameter 11923ad33b24SLee Schermerhorn /* 11933ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1194d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 11953ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11963ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11973ad33b24SLee Schermerhorn * is in virtual address order. 11983ad33b24SLee Schermerhorn */ 1199666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 120095a402c3SChristoph Lameter { 1201ec4858e0SMatthew Wilcox (Oracle) struct folio *dst, *src = page_folio(page); 1202d05f0cdcSHugh Dickins struct vm_area_struct *vma; 12033f649ab7SKees Cook unsigned long address; 120466850be5SLiam R. Howlett VMA_ITERATOR(vmi, current->mm, start); 1205ec4858e0SMatthew Wilcox (Oracle) gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; 120695a402c3SChristoph Lameter 120766850be5SLiam R. Howlett for_each_vma(vmi, vma) { 12083ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 12093ad33b24SLee Schermerhorn if (address != -EFAULT) 12103ad33b24SLee Schermerhorn break; 12113ad33b24SLee Schermerhorn } 12123ad33b24SLee Schermerhorn 1213*d0ce0e47SSidhartha Kumar if (folio_test_hugetlb(src)) { 1214*d0ce0e47SSidhartha Kumar dst = alloc_hugetlb_folio_vma(folio_hstate(src), 1215389c8178SMichal Hocko vma, address); 1216*d0ce0e47SSidhartha Kumar return &dst->page; 1217*d0ce0e47SSidhartha Kumar } 1218c8633798SNaoya Horiguchi 1219ec4858e0SMatthew Wilcox (Oracle) if (folio_test_large(src)) 1220ec4858e0SMatthew Wilcox (Oracle) gfp = GFP_TRANSHUGE; 1221ec4858e0SMatthew Wilcox (Oracle) 122211c731e8SWanpeng Li /* 1223ec4858e0SMatthew Wilcox (Oracle) * if !vma, vma_alloc_folio() will use task or system default policy 122411c731e8SWanpeng Li */ 1225ec4858e0SMatthew Wilcox (Oracle) dst = vma_alloc_folio(gfp, folio_order(src), vma, address, 1226ec4858e0SMatthew Wilcox (Oracle) folio_test_large(src)); 1227ec4858e0SMatthew Wilcox (Oracle) return &dst->page; 122895a402c3SChristoph Lameter } 1229b20a3503SChristoph Lameter #else 1230b20a3503SChristoph Lameter 1231a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1232b20a3503SChristoph Lameter unsigned long flags) 1233b20a3503SChristoph Lameter { 1234a53190a4SYang Shi return -EIO; 1235b20a3503SChristoph Lameter } 1236b20a3503SChristoph Lameter 12370ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12380ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1239b20a3503SChristoph Lameter { 1240b20a3503SChristoph Lameter return -ENOSYS; 1241b20a3503SChristoph Lameter } 124295a402c3SChristoph Lameter 1243666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 124495a402c3SChristoph Lameter { 124595a402c3SChristoph Lameter return NULL; 124695a402c3SChristoph Lameter } 1247b20a3503SChristoph Lameter #endif 1248b20a3503SChristoph Lameter 1249dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1250028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1251028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12526ce3c4c0SChristoph Lameter { 12536ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12546ce3c4c0SChristoph Lameter struct mempolicy *new; 12556ce3c4c0SChristoph Lameter unsigned long end; 12566ce3c4c0SChristoph Lameter int err; 1257d8835445SYang Shi int ret; 12586ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12596ce3c4c0SChristoph Lameter 1260b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12616ce3c4c0SChristoph Lameter return -EINVAL; 126274c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12636ce3c4c0SChristoph Lameter return -EPERM; 12646ce3c4c0SChristoph Lameter 12656ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12666ce3c4c0SChristoph Lameter return -EINVAL; 12676ce3c4c0SChristoph Lameter 12686ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12696ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12706ce3c4c0SChristoph Lameter 1271aaa31e05Sze zuo len = PAGE_ALIGN(len); 12726ce3c4c0SChristoph Lameter end = start + len; 12736ce3c4c0SChristoph Lameter 12746ce3c4c0SChristoph Lameter if (end < start) 12756ce3c4c0SChristoph Lameter return -EINVAL; 12766ce3c4c0SChristoph Lameter if (end == start) 12776ce3c4c0SChristoph Lameter return 0; 12786ce3c4c0SChristoph Lameter 1279028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12806ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12816ce3c4c0SChristoph Lameter return PTR_ERR(new); 12826ce3c4c0SChristoph Lameter 1283b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1284b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1285b24f53a0SLee Schermerhorn 12866ce3c4c0SChristoph Lameter /* 12876ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12886ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12896ce3c4c0SChristoph Lameter */ 12906ce3c4c0SChristoph Lameter if (!new) 12916ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12926ce3c4c0SChristoph Lameter 1293028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1294028fec41SDavid Rientjes start, start + len, mode, mode_flags, 129500ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12966ce3c4c0SChristoph Lameter 12970aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12980aedadf9SChristoph Lameter 1299361a2a22SMinchan Kim lru_cache_disable(); 13000aedadf9SChristoph Lameter } 13014bfc4495SKAMEZAWA Hiroyuki { 13024bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 13034bfc4495SKAMEZAWA Hiroyuki if (scratch) { 1304d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 13054bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 13064bfc4495SKAMEZAWA Hiroyuki if (err) 1307d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 13084bfc4495SKAMEZAWA Hiroyuki } else 13094bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13104bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13114bfc4495SKAMEZAWA Hiroyuki } 1312b05ca738SKOSAKI Motohiro if (err) 1313b05ca738SKOSAKI Motohiro goto mpol_out; 1314b05ca738SKOSAKI Motohiro 1315d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13166ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1317d8835445SYang Shi 1318d8835445SYang Shi if (ret < 0) { 1319a85dfc30SYang Shi err = ret; 1320d8835445SYang Shi goto up_out; 1321d8835445SYang Shi } 1322d8835445SYang Shi 13239d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13247e2ab150SChristoph Lameter 1325b24f53a0SLee Schermerhorn if (!err) { 1326b24f53a0SLee Schermerhorn int nr_failed = 0; 1327b24f53a0SLee Schermerhorn 1328cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1329b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1330d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 13315ac95884SYang Shi start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1332cf608ac1SMinchan Kim if (nr_failed) 133374060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1334cf608ac1SMinchan Kim } 13356ce3c4c0SChristoph Lameter 1336d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13376ce3c4c0SChristoph Lameter err = -EIO; 1338a85dfc30SYang Shi } else { 1339d8835445SYang Shi up_out: 1340a85dfc30SYang Shi if (!list_empty(&pagelist)) 1341a85dfc30SYang Shi putback_movable_pages(&pagelist); 1342a85dfc30SYang Shi } 1343a85dfc30SYang Shi 1344d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1345b05ca738SKOSAKI Motohiro mpol_out: 1346f0be3d32SLee Schermerhorn mpol_put(new); 1347d479960eSMinchan Kim if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1348361a2a22SMinchan Kim lru_cache_enable(); 13496ce3c4c0SChristoph Lameter return err; 13506ce3c4c0SChristoph Lameter } 13516ce3c4c0SChristoph Lameter 135239743889SChristoph Lameter /* 13538bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13548bccd85fSChristoph Lameter */ 1355e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, 1356e130242dSArnd Bergmann unsigned long maxnode) 1357e130242dSArnd Bergmann { 1358e130242dSArnd Bergmann unsigned long nlongs = BITS_TO_LONGS(maxnode); 1359e130242dSArnd Bergmann int ret; 1360e130242dSArnd Bergmann 1361e130242dSArnd Bergmann if (in_compat_syscall()) 1362e130242dSArnd Bergmann ret = compat_get_bitmap(mask, 1363e130242dSArnd Bergmann (const compat_ulong_t __user *)nmask, 1364e130242dSArnd Bergmann maxnode); 1365e130242dSArnd Bergmann else 1366e130242dSArnd Bergmann ret = copy_from_user(mask, nmask, 1367e130242dSArnd Bergmann nlongs * sizeof(unsigned long)); 1368e130242dSArnd Bergmann 1369e130242dSArnd Bergmann if (ret) 1370e130242dSArnd Bergmann return -EFAULT; 1371e130242dSArnd Bergmann 1372e130242dSArnd Bergmann if (maxnode % BITS_PER_LONG) 1373e130242dSArnd Bergmann mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; 1374e130242dSArnd Bergmann 1375e130242dSArnd Bergmann return 0; 1376e130242dSArnd Bergmann } 13778bccd85fSChristoph Lameter 13788bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 137939743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13808bccd85fSChristoph Lameter unsigned long maxnode) 13818bccd85fSChristoph Lameter { 13828bccd85fSChristoph Lameter --maxnode; 13838bccd85fSChristoph Lameter nodes_clear(*nodes); 13848bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13858bccd85fSChristoph Lameter return 0; 1386a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1387636f13c1SChris Wright return -EINVAL; 13888bccd85fSChristoph Lameter 138956521e7aSYisheng Xie /* 139056521e7aSYisheng Xie * When the user specified more nodes than supported just check 1391e130242dSArnd Bergmann * if the non supported part is all zero, one word at a time, 1392e130242dSArnd Bergmann * starting at the end. 139356521e7aSYisheng Xie */ 1394e130242dSArnd Bergmann while (maxnode > MAX_NUMNODES) { 1395e130242dSArnd Bergmann unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG); 1396e130242dSArnd Bergmann unsigned long t; 13978bccd85fSChristoph Lameter 1398000eca5dSTianyu Li if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) 139956521e7aSYisheng Xie return -EFAULT; 1400e130242dSArnd Bergmann 1401e130242dSArnd Bergmann if (maxnode - bits >= MAX_NUMNODES) { 1402e130242dSArnd Bergmann maxnode -= bits; 1403e130242dSArnd Bergmann } else { 1404e130242dSArnd Bergmann maxnode = MAX_NUMNODES; 1405e130242dSArnd Bergmann t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 1406e130242dSArnd Bergmann } 1407e130242dSArnd Bergmann if (t) 140856521e7aSYisheng Xie return -EINVAL; 140956521e7aSYisheng Xie } 141056521e7aSYisheng Xie 1411e130242dSArnd Bergmann return get_bitmap(nodes_addr(*nodes), nmask, maxnode); 14128bccd85fSChristoph Lameter } 14138bccd85fSChristoph Lameter 14148bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14158bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14168bccd85fSChristoph Lameter nodemask_t *nodes) 14178bccd85fSChristoph Lameter { 14188bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1419050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 1420e130242dSArnd Bergmann bool compat = in_compat_syscall(); 1421e130242dSArnd Bergmann 1422e130242dSArnd Bergmann if (compat) 1423e130242dSArnd Bergmann nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); 14248bccd85fSChristoph Lameter 14258bccd85fSChristoph Lameter if (copy > nbytes) { 14268bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14278bccd85fSChristoph Lameter return -EINVAL; 14288bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14298bccd85fSChristoph Lameter return -EFAULT; 14308bccd85fSChristoph Lameter copy = nbytes; 1431e130242dSArnd Bergmann maxnode = nr_node_ids; 14328bccd85fSChristoph Lameter } 1433e130242dSArnd Bergmann 1434e130242dSArnd Bergmann if (compat) 1435e130242dSArnd Bergmann return compat_put_bitmap((compat_ulong_t __user *)mask, 1436e130242dSArnd Bergmann nodes_addr(*nodes), maxnode); 1437e130242dSArnd Bergmann 14388bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14398bccd85fSChristoph Lameter } 14408bccd85fSChristoph Lameter 144195837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 144295837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 144395837924SFeng Tang { 144495837924SFeng Tang *flags = *mode & MPOL_MODE_FLAGS; 144595837924SFeng Tang *mode &= ~MPOL_MODE_FLAGS; 1446b27abaccSDave Hansen 1447a38a59fdSBen Widawsky if ((unsigned int)(*mode) >= MPOL_MAX) 144895837924SFeng Tang return -EINVAL; 144995837924SFeng Tang if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 145095837924SFeng Tang return -EINVAL; 14516d2aec9eSEric Dumazet if (*flags & MPOL_F_NUMA_BALANCING) { 14526d2aec9eSEric Dumazet if (*mode != MPOL_BIND) 14536d2aec9eSEric Dumazet return -EINVAL; 14546d2aec9eSEric Dumazet *flags |= (MPOL_F_MOF | MPOL_F_MORON); 14556d2aec9eSEric Dumazet } 145695837924SFeng Tang return 0; 145795837924SFeng Tang } 145895837924SFeng Tang 1459e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1460e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1461e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14628bccd85fSChristoph Lameter { 1463028fec41SDavid Rientjes unsigned short mode_flags; 146495837924SFeng Tang nodemask_t nodes; 146595837924SFeng Tang int lmode = mode; 146695837924SFeng Tang int err; 14678bccd85fSChristoph Lameter 1468057d3389SAndrey Konovalov start = untagged_addr(start); 146995837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 147095837924SFeng Tang if (err) 147195837924SFeng Tang return err; 147295837924SFeng Tang 14738bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14748bccd85fSChristoph Lameter if (err) 14758bccd85fSChristoph Lameter return err; 147695837924SFeng Tang 147795837924SFeng Tang return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 14788bccd85fSChristoph Lameter } 14798bccd85fSChristoph Lameter 1480c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len, 1481c6018b4bSAneesh Kumar K.V unsigned long, home_node, unsigned long, flags) 1482c6018b4bSAneesh Kumar K.V { 1483c6018b4bSAneesh Kumar K.V struct mm_struct *mm = current->mm; 1484c6018b4bSAneesh Kumar K.V struct vm_area_struct *vma; 1485e976936cSMichal Hocko struct mempolicy *new, *old; 1486c6018b4bSAneesh Kumar K.V unsigned long vmstart; 1487c6018b4bSAneesh Kumar K.V unsigned long vmend; 1488c6018b4bSAneesh Kumar K.V unsigned long end; 1489c6018b4bSAneesh Kumar K.V int err = -ENOENT; 149066850be5SLiam R. Howlett VMA_ITERATOR(vmi, mm, start); 1491c6018b4bSAneesh Kumar K.V 1492c6018b4bSAneesh Kumar K.V start = untagged_addr(start); 1493c6018b4bSAneesh Kumar K.V if (start & ~PAGE_MASK) 1494c6018b4bSAneesh Kumar K.V return -EINVAL; 1495c6018b4bSAneesh Kumar K.V /* 1496c6018b4bSAneesh Kumar K.V * flags is used for future extension if any. 1497c6018b4bSAneesh Kumar K.V */ 1498c6018b4bSAneesh Kumar K.V if (flags != 0) 1499c6018b4bSAneesh Kumar K.V return -EINVAL; 1500c6018b4bSAneesh Kumar K.V 1501c6018b4bSAneesh Kumar K.V /* 1502c6018b4bSAneesh Kumar K.V * Check home_node is online to avoid accessing uninitialized 1503c6018b4bSAneesh Kumar K.V * NODE_DATA. 1504c6018b4bSAneesh Kumar K.V */ 1505c6018b4bSAneesh Kumar K.V if (home_node >= MAX_NUMNODES || !node_online(home_node)) 1506c6018b4bSAneesh Kumar K.V return -EINVAL; 1507c6018b4bSAneesh Kumar K.V 1508aaa31e05Sze zuo len = PAGE_ALIGN(len); 1509c6018b4bSAneesh Kumar K.V end = start + len; 1510c6018b4bSAneesh Kumar K.V 1511c6018b4bSAneesh Kumar K.V if (end < start) 1512c6018b4bSAneesh Kumar K.V return -EINVAL; 1513c6018b4bSAneesh Kumar K.V if (end == start) 1514c6018b4bSAneesh Kumar K.V return 0; 1515c6018b4bSAneesh Kumar K.V mmap_write_lock(mm); 151666850be5SLiam R. Howlett for_each_vma_range(vmi, vma, end) { 1517c6018b4bSAneesh Kumar K.V /* 1518c6018b4bSAneesh Kumar K.V * If any vma in the range got policy other than MPOL_BIND 1519c6018b4bSAneesh Kumar K.V * or MPOL_PREFERRED_MANY we return error. We don't reset 1520c6018b4bSAneesh Kumar K.V * the home node for vmas we already updated before. 1521c6018b4bSAneesh Kumar K.V */ 1522e976936cSMichal Hocko old = vma_policy(vma); 1523e976936cSMichal Hocko if (!old) 1524e976936cSMichal Hocko continue; 1525e976936cSMichal Hocko if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) { 1526c6018b4bSAneesh Kumar K.V err = -EOPNOTSUPP; 1527c6018b4bSAneesh Kumar K.V break; 1528c6018b4bSAneesh Kumar K.V } 1529e976936cSMichal Hocko new = mpol_dup(old); 1530e976936cSMichal Hocko if (IS_ERR(new)) { 1531e976936cSMichal Hocko err = PTR_ERR(new); 1532e976936cSMichal Hocko break; 1533e976936cSMichal Hocko } 1534c6018b4bSAneesh Kumar K.V 1535c6018b4bSAneesh Kumar K.V new->home_node = home_node; 1536e976936cSMichal Hocko vmstart = max(start, vma->vm_start); 1537e976936cSMichal Hocko vmend = min(end, vma->vm_end); 1538c6018b4bSAneesh Kumar K.V err = mbind_range(mm, vmstart, vmend, new); 1539c6018b4bSAneesh Kumar K.V mpol_put(new); 1540c6018b4bSAneesh Kumar K.V if (err) 1541c6018b4bSAneesh Kumar K.V break; 1542c6018b4bSAneesh Kumar K.V } 1543c6018b4bSAneesh Kumar K.V mmap_write_unlock(mm); 1544c6018b4bSAneesh Kumar K.V return err; 1545c6018b4bSAneesh Kumar K.V } 1546c6018b4bSAneesh Kumar K.V 1547e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1548e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1549e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1550e7dc9ad6SDominik Brodowski { 1551e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1552e7dc9ad6SDominik Brodowski } 1553e7dc9ad6SDominik Brodowski 15548bccd85fSChristoph Lameter /* Set the process memory policy */ 1555af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1556af03c4acSDominik Brodowski unsigned long maxnode) 15578bccd85fSChristoph Lameter { 155895837924SFeng Tang unsigned short mode_flags; 15598bccd85fSChristoph Lameter nodemask_t nodes; 156095837924SFeng Tang int lmode = mode; 156195837924SFeng Tang int err; 15628bccd85fSChristoph Lameter 156395837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 156495837924SFeng Tang if (err) 156595837924SFeng Tang return err; 156695837924SFeng Tang 15678bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 15688bccd85fSChristoph Lameter if (err) 15698bccd85fSChristoph Lameter return err; 157095837924SFeng Tang 157195837924SFeng Tang return do_set_mempolicy(lmode, mode_flags, &nodes); 15728bccd85fSChristoph Lameter } 15738bccd85fSChristoph Lameter 1574af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1575af03c4acSDominik Brodowski unsigned long, maxnode) 1576af03c4acSDominik Brodowski { 1577af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1578af03c4acSDominik Brodowski } 1579af03c4acSDominik Brodowski 1580b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1581b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1582b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 158339743889SChristoph Lameter { 1584596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 158539743889SChristoph Lameter struct task_struct *task; 158639743889SChristoph Lameter nodemask_t task_nodes; 158739743889SChristoph Lameter int err; 1588596d7cfaSKOSAKI Motohiro nodemask_t *old; 1589596d7cfaSKOSAKI Motohiro nodemask_t *new; 1590596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 159139743889SChristoph Lameter 1592596d7cfaSKOSAKI Motohiro if (!scratch) 1593596d7cfaSKOSAKI Motohiro return -ENOMEM; 159439743889SChristoph Lameter 1595596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1596596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1597596d7cfaSKOSAKI Motohiro 1598596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 159939743889SChristoph Lameter if (err) 1600596d7cfaSKOSAKI Motohiro goto out; 1601596d7cfaSKOSAKI Motohiro 1602596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1603596d7cfaSKOSAKI Motohiro if (err) 1604596d7cfaSKOSAKI Motohiro goto out; 160539743889SChristoph Lameter 160639743889SChristoph Lameter /* Find the mm_struct */ 160755cfaa3cSZeng Zhaoming rcu_read_lock(); 1608228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 160939743889SChristoph Lameter if (!task) { 161055cfaa3cSZeng Zhaoming rcu_read_unlock(); 1611596d7cfaSKOSAKI Motohiro err = -ESRCH; 1612596d7cfaSKOSAKI Motohiro goto out; 161339743889SChristoph Lameter } 16143268c63eSChristoph Lameter get_task_struct(task); 161539743889SChristoph Lameter 1616596d7cfaSKOSAKI Motohiro err = -EINVAL; 161739743889SChristoph Lameter 161839743889SChristoph Lameter /* 161931367466SOtto Ebeling * Check if this process has the right to modify the specified process. 162031367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 162139743889SChristoph Lameter */ 162231367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1623c69e8d9cSDavid Howells rcu_read_unlock(); 162439743889SChristoph Lameter err = -EPERM; 16253268c63eSChristoph Lameter goto out_put; 162639743889SChristoph Lameter } 1627c69e8d9cSDavid Howells rcu_read_unlock(); 162839743889SChristoph Lameter 162939743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 163039743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1631596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 163239743889SChristoph Lameter err = -EPERM; 16333268c63eSChristoph Lameter goto out_put; 163439743889SChristoph Lameter } 163539743889SChristoph Lameter 16360486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 16370486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 16380486a38bSYisheng Xie if (nodes_empty(*new)) 16393268c63eSChristoph Lameter goto out_put; 16400486a38bSYisheng Xie 164186c3a764SDavid Quigley err = security_task_movememory(task); 164286c3a764SDavid Quigley if (err) 16433268c63eSChristoph Lameter goto out_put; 164486c3a764SDavid Quigley 16453268c63eSChristoph Lameter mm = get_task_mm(task); 16463268c63eSChristoph Lameter put_task_struct(task); 1647f2a9ef88SSasha Levin 1648f2a9ef88SSasha Levin if (!mm) { 1649f2a9ef88SSasha Levin err = -EINVAL; 1650f2a9ef88SSasha Levin goto out; 1651f2a9ef88SSasha Levin } 1652f2a9ef88SSasha Levin 1653596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 165474c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 16553268c63eSChristoph Lameter 165639743889SChristoph Lameter mmput(mm); 16573268c63eSChristoph Lameter out: 1658596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1659596d7cfaSKOSAKI Motohiro 166039743889SChristoph Lameter return err; 16613268c63eSChristoph Lameter 16623268c63eSChristoph Lameter out_put: 16633268c63eSChristoph Lameter put_task_struct(task); 16643268c63eSChristoph Lameter goto out; 16653268c63eSChristoph Lameter 166639743889SChristoph Lameter } 166739743889SChristoph Lameter 1668b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1669b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1670b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1671b6e9b0baSDominik Brodowski { 1672b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1673b6e9b0baSDominik Brodowski } 1674b6e9b0baSDominik Brodowski 167539743889SChristoph Lameter 16768bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1677af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1678af03c4acSDominik Brodowski unsigned long __user *nmask, 1679af03c4acSDominik Brodowski unsigned long maxnode, 1680af03c4acSDominik Brodowski unsigned long addr, 1681af03c4acSDominik Brodowski unsigned long flags) 16828bccd85fSChristoph Lameter { 1683dbcb0f19SAdrian Bunk int err; 16843f649ab7SKees Cook int pval; 16858bccd85fSChristoph Lameter nodemask_t nodes; 16868bccd85fSChristoph Lameter 1687050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 16888bccd85fSChristoph Lameter return -EINVAL; 16898bccd85fSChristoph Lameter 16904605f057SWenchao Hao addr = untagged_addr(addr); 16914605f057SWenchao Hao 16928bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 16938bccd85fSChristoph Lameter 16948bccd85fSChristoph Lameter if (err) 16958bccd85fSChristoph Lameter return err; 16968bccd85fSChristoph Lameter 16978bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 16988bccd85fSChristoph Lameter return -EFAULT; 16998bccd85fSChristoph Lameter 17008bccd85fSChristoph Lameter if (nmask) 17018bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 17028bccd85fSChristoph Lameter 17038bccd85fSChristoph Lameter return err; 17048bccd85fSChristoph Lameter } 17058bccd85fSChristoph Lameter 1706af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1707af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1708af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1709af03c4acSDominik Brodowski { 1710af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1711af03c4acSDominik Brodowski } 1712af03c4acSDominik Brodowski 171320ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 171420ca87f2SLi Xinhai { 171520ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 171620ca87f2SLi Xinhai return false; 171720ca87f2SLi Xinhai 171820ca87f2SLi Xinhai /* 171920ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 172020ca87f2SLi Xinhai * incurring periodic faults. 172120ca87f2SLi Xinhai */ 172220ca87f2SLi Xinhai if (vma_is_dax(vma)) 172320ca87f2SLi Xinhai return false; 172420ca87f2SLi Xinhai 172520ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 172620ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 172720ca87f2SLi Xinhai return false; 172820ca87f2SLi Xinhai 172920ca87f2SLi Xinhai /* 173020ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 173120ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 173220ca87f2SLi Xinhai * possible. 173320ca87f2SLi Xinhai */ 173420ca87f2SLi Xinhai if (vma->vm_file && 173520ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 173620ca87f2SLi Xinhai < policy_zone) 173720ca87f2SLi Xinhai return false; 173820ca87f2SLi Xinhai return true; 173920ca87f2SLi Xinhai } 174020ca87f2SLi Xinhai 174174d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 174274d2c3a0SOleg Nesterov unsigned long addr) 17431da177e4SLinus Torvalds { 17448d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17451da177e4SLinus Torvalds 17461da177e4SLinus Torvalds if (vma) { 1747480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17488d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 174900442ad0SMel Gorman } else if (vma->vm_policy) { 17501da177e4SLinus Torvalds pol = vma->vm_policy; 175100442ad0SMel Gorman 175200442ad0SMel Gorman /* 175300442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 175400442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 175500442ad0SMel Gorman * count on these policies which will be dropped by 175600442ad0SMel Gorman * mpol_cond_put() later 175700442ad0SMel Gorman */ 175800442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 175900442ad0SMel Gorman mpol_get(pol); 176000442ad0SMel Gorman } 17611da177e4SLinus Torvalds } 1762f15ca78eSOleg Nesterov 176374d2c3a0SOleg Nesterov return pol; 176474d2c3a0SOleg Nesterov } 176574d2c3a0SOleg Nesterov 176674d2c3a0SOleg Nesterov /* 1767dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 176874d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 176974d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 177074d2c3a0SOleg Nesterov * 177174d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1772dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 177374d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 177474d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 177574d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 177674d2c3a0SOleg Nesterov * extra reference for shared policies. 177774d2c3a0SOleg Nesterov */ 1778ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1779dd6eecb9SOleg Nesterov unsigned long addr) 178074d2c3a0SOleg Nesterov { 178174d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 178274d2c3a0SOleg Nesterov 17838d90274bSOleg Nesterov if (!pol) 1784dd6eecb9SOleg Nesterov pol = get_task_policy(current); 17858d90274bSOleg Nesterov 17861da177e4SLinus Torvalds return pol; 17871da177e4SLinus Torvalds } 17881da177e4SLinus Torvalds 17896b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1790fc314724SMel Gorman { 17916b6482bbSOleg Nesterov struct mempolicy *pol; 1792f15ca78eSOleg Nesterov 1793fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1794fc314724SMel Gorman bool ret = false; 1795fc314724SMel Gorman 1796fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1797fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1798fc314724SMel Gorman ret = true; 1799fc314724SMel Gorman mpol_cond_put(pol); 1800fc314724SMel Gorman 1801fc314724SMel Gorman return ret; 18028d90274bSOleg Nesterov } 18038d90274bSOleg Nesterov 1804fc314724SMel Gorman pol = vma->vm_policy; 18058d90274bSOleg Nesterov if (!pol) 18066b6482bbSOleg Nesterov pol = get_task_policy(current); 1807fc314724SMel Gorman 1808fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1809fc314724SMel Gorman } 1810fc314724SMel Gorman 1811d2226ebdSFeng Tang bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1812d3eb1570SLai Jiangshan { 1813d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1814d3eb1570SLai Jiangshan 1815d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1816d3eb1570SLai Jiangshan 1817d3eb1570SLai Jiangshan /* 1818269fbe72SBen Widawsky * if policy->nodes has movable memory only, 1819d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1820d3eb1570SLai Jiangshan * 1821269fbe72SBen Widawsky * policy->nodes is intersect with node_states[N_MEMORY]. 1822f0953a1bSIngo Molnar * so if the following test fails, it implies 1823269fbe72SBen Widawsky * policy->nodes has movable memory only. 1824d3eb1570SLai Jiangshan */ 1825269fbe72SBen Widawsky if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1826d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1827d3eb1570SLai Jiangshan 1828d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1829d3eb1570SLai Jiangshan } 1830d3eb1570SLai Jiangshan 183152cd3b07SLee Schermerhorn /* 183252cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 183352cd3b07SLee Schermerhorn * page allocation 183452cd3b07SLee Schermerhorn */ 18358ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 183619770b32SMel Gorman { 1837b27abaccSDave Hansen int mode = policy->mode; 1838b27abaccSDave Hansen 183919770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1840b27abaccSDave Hansen if (unlikely(mode == MPOL_BIND) && 1841d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 1842269fbe72SBen Widawsky cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1843269fbe72SBen Widawsky return &policy->nodes; 184419770b32SMel Gorman 1845b27abaccSDave Hansen if (mode == MPOL_PREFERRED_MANY) 1846b27abaccSDave Hansen return &policy->nodes; 1847b27abaccSDave Hansen 184819770b32SMel Gorman return NULL; 184919770b32SMel Gorman } 185019770b32SMel Gorman 1851b27abaccSDave Hansen /* 1852b27abaccSDave Hansen * Return the preferred node id for 'prefer' mempolicy, and return 1853b27abaccSDave Hansen * the given id for all other policies. 1854b27abaccSDave Hansen * 1855b27abaccSDave Hansen * policy_node() is always coupled with policy_nodemask(), which 1856b27abaccSDave Hansen * secures the nodemask limit for 'bind' and 'prefer-many' policy. 1857b27abaccSDave Hansen */ 1858f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 18591da177e4SLinus Torvalds { 18607858d7bcSFeng Tang if (policy->mode == MPOL_PREFERRED) { 1861269fbe72SBen Widawsky nd = first_node(policy->nodes); 18627858d7bcSFeng Tang } else { 186319770b32SMel Gorman /* 18646d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18656d840958SMichal Hocko * because we might easily break the expectation to stay on the 18666d840958SMichal Hocko * requested node and not break the policy. 186719770b32SMel Gorman */ 18686d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 18691da177e4SLinus Torvalds } 18706d840958SMichal Hocko 1871c6018b4bSAneesh Kumar K.V if ((policy->mode == MPOL_BIND || 1872c6018b4bSAneesh Kumar K.V policy->mode == MPOL_PREFERRED_MANY) && 1873c6018b4bSAneesh Kumar K.V policy->home_node != NUMA_NO_NODE) 1874c6018b4bSAneesh Kumar K.V return policy->home_node; 1875c6018b4bSAneesh Kumar K.V 187604ec6264SVlastimil Babka return nd; 18771da177e4SLinus Torvalds } 18781da177e4SLinus Torvalds 18791da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 18801da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 18811da177e4SLinus Torvalds { 188245816682SVlastimil Babka unsigned next; 18831da177e4SLinus Torvalds struct task_struct *me = current; 18841da177e4SLinus Torvalds 1885269fbe72SBen Widawsky next = next_node_in(me->il_prev, policy->nodes); 1886f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 188745816682SVlastimil Babka me->il_prev = next; 188845816682SVlastimil Babka return next; 18891da177e4SLinus Torvalds } 18901da177e4SLinus Torvalds 1891dc85da15SChristoph Lameter /* 1892dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1893dc85da15SChristoph Lameter * next slab entry. 1894dc85da15SChristoph Lameter */ 18952a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1896dc85da15SChristoph Lameter { 1897e7b691b0SAndi Kleen struct mempolicy *policy; 18982a389610SDavid Rientjes int node = numa_mem_id(); 1899e7b691b0SAndi Kleen 190038b031ddSVasily Averin if (!in_task()) 19012a389610SDavid Rientjes return node; 1902e7b691b0SAndi Kleen 1903e7b691b0SAndi Kleen policy = current->mempolicy; 19047858d7bcSFeng Tang if (!policy) 19052a389610SDavid Rientjes return node; 1906765c4507SChristoph Lameter 1907bea904d5SLee Schermerhorn switch (policy->mode) { 1908bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1909269fbe72SBen Widawsky return first_node(policy->nodes); 1910bea904d5SLee Schermerhorn 1911dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1912dc85da15SChristoph Lameter return interleave_nodes(policy); 1913dc85da15SChristoph Lameter 1914b27abaccSDave Hansen case MPOL_BIND: 1915b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 1916b27abaccSDave Hansen { 1917c33d6c06SMel Gorman struct zoneref *z; 1918c33d6c06SMel Gorman 1919dc85da15SChristoph Lameter /* 1920dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1921dc85da15SChristoph Lameter * first node. 1922dc85da15SChristoph Lameter */ 192319770b32SMel Gorman struct zonelist *zonelist; 192419770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1925c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1926c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1927269fbe72SBen Widawsky &policy->nodes); 1928c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1929dd1a239fSMel Gorman } 19307858d7bcSFeng Tang case MPOL_LOCAL: 19317858d7bcSFeng Tang return node; 1932dc85da15SChristoph Lameter 1933dc85da15SChristoph Lameter default: 1934bea904d5SLee Schermerhorn BUG(); 1935dc85da15SChristoph Lameter } 1936dc85da15SChristoph Lameter } 1937dc85da15SChristoph Lameter 1938fee83b3aSAndrew Morton /* 1939fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1940269fbe72SBen Widawsky * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1941fee83b3aSAndrew Morton * number of present nodes. 1942fee83b3aSAndrew Morton */ 194398c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19441da177e4SLinus Torvalds { 1945276aeee1Syanghui nodemask_t nodemask = pol->nodes; 1946276aeee1Syanghui unsigned int target, nnodes; 1947fee83b3aSAndrew Morton int i; 1948fee83b3aSAndrew Morton int nid; 1949276aeee1Syanghui /* 1950276aeee1Syanghui * The barrier will stabilize the nodemask in a register or on 1951276aeee1Syanghui * the stack so that it will stop changing under the code. 1952276aeee1Syanghui * 1953276aeee1Syanghui * Between first_node() and next_node(), pol->nodes could be changed 1954276aeee1Syanghui * by other threads. So we put pol->nodes in a local stack. 1955276aeee1Syanghui */ 1956276aeee1Syanghui barrier(); 19571da177e4SLinus Torvalds 1958276aeee1Syanghui nnodes = nodes_weight(nodemask); 1959f5b087b5SDavid Rientjes if (!nnodes) 1960f5b087b5SDavid Rientjes return numa_node_id(); 1961fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1962276aeee1Syanghui nid = first_node(nodemask); 1963fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1964276aeee1Syanghui nid = next_node(nid, nodemask); 19651da177e4SLinus Torvalds return nid; 19661da177e4SLinus Torvalds } 19671da177e4SLinus Torvalds 19685da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19695da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19705da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19715da7ca86SChristoph Lameter { 19725da7ca86SChristoph Lameter if (vma) { 19735da7ca86SChristoph Lameter unsigned long off; 19745da7ca86SChristoph Lameter 19753b98b087SNishanth Aravamudan /* 19763b98b087SNishanth Aravamudan * for small pages, there is no difference between 19773b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 19783b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 19793b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 19803b98b087SNishanth Aravamudan * a useful offset. 19813b98b087SNishanth Aravamudan */ 19823b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 19833b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 19845da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 198598c70baaSLaurent Dufour return offset_il_node(pol, off); 19865da7ca86SChristoph Lameter } else 19875da7ca86SChristoph Lameter return interleave_nodes(pol); 19885da7ca86SChristoph Lameter } 19895da7ca86SChristoph Lameter 199000ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1991480eccf9SLee Schermerhorn /* 199204ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 1993b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1994b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1995b46e14acSFabian Frederick * @gfp_flags: for requested zone 1996b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1997b27abaccSDave Hansen * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 1998480eccf9SLee Schermerhorn * 199904ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 200052cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 2001b27abaccSDave Hansen * If the effective policy is 'bind' or 'prefer-many', returns a pointer 2002b27abaccSDave Hansen * to the mempolicy's @nodemask for filtering the zonelist. 2003c0ff7453SMiao Xie * 2004d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2005480eccf9SLee Schermerhorn */ 200604ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 200704ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20085da7ca86SChristoph Lameter { 200904ec6264SVlastimil Babka int nid; 2010b27abaccSDave Hansen int mode; 20115da7ca86SChristoph Lameter 2012dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 2013b27abaccSDave Hansen *nodemask = NULL; 2014b27abaccSDave Hansen mode = (*mpol)->mode; 20155da7ca86SChristoph Lameter 2016b27abaccSDave Hansen if (unlikely(mode == MPOL_INTERLEAVE)) { 201704ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 201804ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 201952cd3b07SLee Schermerhorn } else { 202004ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 2021b27abaccSDave Hansen if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) 2022269fbe72SBen Widawsky *nodemask = &(*mpol)->nodes; 2023480eccf9SLee Schermerhorn } 202404ec6264SVlastimil Babka return nid; 20255da7ca86SChristoph Lameter } 202606808b08SLee Schermerhorn 202706808b08SLee Schermerhorn /* 202806808b08SLee Schermerhorn * init_nodemask_of_mempolicy 202906808b08SLee Schermerhorn * 203006808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 203106808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 203206808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 203306808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 203406808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 203506808b08SLee Schermerhorn * of non-default mempolicy. 203606808b08SLee Schermerhorn * 203706808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 203806808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 203906808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 204006808b08SLee Schermerhorn * 204106808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 204206808b08SLee Schermerhorn */ 204306808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 204406808b08SLee Schermerhorn { 204506808b08SLee Schermerhorn struct mempolicy *mempolicy; 204606808b08SLee Schermerhorn 204706808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 204806808b08SLee Schermerhorn return false; 204906808b08SLee Schermerhorn 2050c0ff7453SMiao Xie task_lock(current); 205106808b08SLee Schermerhorn mempolicy = current->mempolicy; 205206808b08SLee Schermerhorn switch (mempolicy->mode) { 205306808b08SLee Schermerhorn case MPOL_PREFERRED: 2054b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 205506808b08SLee Schermerhorn case MPOL_BIND: 205606808b08SLee Schermerhorn case MPOL_INTERLEAVE: 2057269fbe72SBen Widawsky *mask = mempolicy->nodes; 205806808b08SLee Schermerhorn break; 205906808b08SLee Schermerhorn 20607858d7bcSFeng Tang case MPOL_LOCAL: 2061269fbe72SBen Widawsky init_nodemask_of_node(mask, numa_node_id()); 20627858d7bcSFeng Tang break; 20637858d7bcSFeng Tang 206406808b08SLee Schermerhorn default: 206506808b08SLee Schermerhorn BUG(); 206606808b08SLee Schermerhorn } 2067c0ff7453SMiao Xie task_unlock(current); 206806808b08SLee Schermerhorn 206906808b08SLee Schermerhorn return true; 207006808b08SLee Schermerhorn } 207100ac59adSChen, Kenneth W #endif 20725da7ca86SChristoph Lameter 20736f48d0ebSDavid Rientjes /* 2074b26e517aSFeng Tang * mempolicy_in_oom_domain 20756f48d0ebSDavid Rientjes * 2076b26e517aSFeng Tang * If tsk's mempolicy is "bind", check for intersection between mask and 2077b26e517aSFeng Tang * the policy nodemask. Otherwise, return true for all other policies 2078b26e517aSFeng Tang * including "interleave", as a tsk with "interleave" policy may have 2079b26e517aSFeng Tang * memory allocated from all nodes in system. 20806f48d0ebSDavid Rientjes * 20816f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 20826f48d0ebSDavid Rientjes */ 2083b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk, 20846f48d0ebSDavid Rientjes const nodemask_t *mask) 20856f48d0ebSDavid Rientjes { 20866f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 20876f48d0ebSDavid Rientjes bool ret = true; 20886f48d0ebSDavid Rientjes 20896f48d0ebSDavid Rientjes if (!mask) 20906f48d0ebSDavid Rientjes return ret; 2091b26e517aSFeng Tang 20926f48d0ebSDavid Rientjes task_lock(tsk); 20936f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 2094b26e517aSFeng Tang if (mempolicy && mempolicy->mode == MPOL_BIND) 2095269fbe72SBen Widawsky ret = nodes_intersects(mempolicy->nodes, *mask); 20966f48d0ebSDavid Rientjes task_unlock(tsk); 2097b26e517aSFeng Tang 20986f48d0ebSDavid Rientjes return ret; 20996f48d0ebSDavid Rientjes } 21006f48d0ebSDavid Rientjes 21011da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 21021da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2103662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2104662f3a0bSAndi Kleen unsigned nid) 21051da177e4SLinus Torvalds { 21061da177e4SLinus Torvalds struct page *page; 21071da177e4SLinus Torvalds 210884172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, nid, NULL); 21094518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21104518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21114518085eSKemi Wang return page; 2112de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2113de55c8b2SAndrey Ryabinin preempt_disable(); 2114f19298b9SMel Gorman __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2115de55c8b2SAndrey Ryabinin preempt_enable(); 2116de55c8b2SAndrey Ryabinin } 21171da177e4SLinus Torvalds return page; 21181da177e4SLinus Torvalds } 21191da177e4SLinus Torvalds 21204c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 21214c54d949SFeng Tang int nid, struct mempolicy *pol) 21224c54d949SFeng Tang { 21234c54d949SFeng Tang struct page *page; 21244c54d949SFeng Tang gfp_t preferred_gfp; 21254c54d949SFeng Tang 21264c54d949SFeng Tang /* 21274c54d949SFeng Tang * This is a two pass approach. The first pass will only try the 21284c54d949SFeng Tang * preferred nodes but skip the direct reclaim and allow the 21294c54d949SFeng Tang * allocation to fail, while the second pass will try all the 21304c54d949SFeng Tang * nodes in system. 21314c54d949SFeng Tang */ 21324c54d949SFeng Tang preferred_gfp = gfp | __GFP_NOWARN; 21334c54d949SFeng Tang preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 21344c54d949SFeng Tang page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); 21354c54d949SFeng Tang if (!page) 2136c0455116SAneesh Kumar K.V page = __alloc_pages(gfp, order, nid, NULL); 21374c54d949SFeng Tang 21384c54d949SFeng Tang return page; 21394c54d949SFeng Tang } 21404c54d949SFeng Tang 21411da177e4SLinus Torvalds /** 2142adf88aa8SMatthew Wilcox (Oracle) * vma_alloc_folio - Allocate a folio for a VMA. 2143eb350739SMatthew Wilcox (Oracle) * @gfp: GFP flags. 2144adf88aa8SMatthew Wilcox (Oracle) * @order: Order of the folio. 21451da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 2146eb350739SMatthew Wilcox (Oracle) * @addr: Virtual address of the allocation. Must be inside @vma. 2147eb350739SMatthew Wilcox (Oracle) * @hugepage: For hugepages try only the preferred node if possible. 21481da177e4SLinus Torvalds * 2149adf88aa8SMatthew Wilcox (Oracle) * Allocate a folio for a specific address in @vma, using the appropriate 2150eb350739SMatthew Wilcox (Oracle) * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2151eb350739SMatthew Wilcox (Oracle) * of the mm_struct of the VMA to prevent it from going away. Should be 2152adf88aa8SMatthew Wilcox (Oracle) * used for all allocations for folios that will be mapped into user space. 2153eb350739SMatthew Wilcox (Oracle) * 2154adf88aa8SMatthew Wilcox (Oracle) * Return: The folio on success or NULL if allocation fails. 21551da177e4SLinus Torvalds */ 2156adf88aa8SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, 2157be1a13ebSMichal Hocko unsigned long addr, bool hugepage) 21581da177e4SLinus Torvalds { 2159cc9a6c87SMel Gorman struct mempolicy *pol; 2160be1a13ebSMichal Hocko int node = numa_node_id(); 2161adf88aa8SMatthew Wilcox (Oracle) struct folio *folio; 216204ec6264SVlastimil Babka int preferred_nid; 2163be97a41bSVlastimil Babka nodemask_t *nmask; 21641da177e4SLinus Torvalds 2165dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2166cc9a6c87SMel Gorman 2167be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 2168adf88aa8SMatthew Wilcox (Oracle) struct page *page; 21691da177e4SLinus Torvalds unsigned nid; 21705da7ca86SChristoph Lameter 21718eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 217252cd3b07SLee Schermerhorn mpol_cond_put(pol); 2173adf88aa8SMatthew Wilcox (Oracle) gfp |= __GFP_COMP; 21740bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2175adf88aa8SMatthew Wilcox (Oracle) if (page && order > 1) 2176adf88aa8SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2177adf88aa8SMatthew Wilcox (Oracle) folio = (struct folio *)page; 2178be97a41bSVlastimil Babka goto out; 21791da177e4SLinus Torvalds } 21801da177e4SLinus Torvalds 21814c54d949SFeng Tang if (pol->mode == MPOL_PREFERRED_MANY) { 2182adf88aa8SMatthew Wilcox (Oracle) struct page *page; 2183adf88aa8SMatthew Wilcox (Oracle) 2184c0455116SAneesh Kumar K.V node = policy_node(gfp, pol, node); 2185adf88aa8SMatthew Wilcox (Oracle) gfp |= __GFP_COMP; 21864c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, node, pol); 21874c54d949SFeng Tang mpol_cond_put(pol); 2188adf88aa8SMatthew Wilcox (Oracle) if (page && order > 1) 2189adf88aa8SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2190adf88aa8SMatthew Wilcox (Oracle) folio = (struct folio *)page; 21914c54d949SFeng Tang goto out; 21924c54d949SFeng Tang } 21934c54d949SFeng Tang 219419deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 219519deb769SDavid Rientjes int hpage_node = node; 219619deb769SDavid Rientjes 219719deb769SDavid Rientjes /* 219819deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 219919deb769SDavid Rientjes * allows the current node (or other explicitly preferred 220019deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 220119deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 220219deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 220319deb769SDavid Rientjes * 2204b27abaccSDave Hansen * If the policy is interleave or does not allow the current 220519deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 220619deb769SDavid Rientjes */ 22077858d7bcSFeng Tang if (pol->mode == MPOL_PREFERRED) 2208269fbe72SBen Widawsky hpage_node = first_node(pol->nodes); 220919deb769SDavid Rientjes 221019deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 221119deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 221219deb769SDavid Rientjes mpol_cond_put(pol); 2213cc638f32SVlastimil Babka /* 2214cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2215cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2216cc638f32SVlastimil Babka */ 2217adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc_node(gfp | __GFP_THISNODE | 2218adf88aa8SMatthew Wilcox (Oracle) __GFP_NORETRY, order, hpage_node); 221976e654ccSDavid Rientjes 222076e654ccSDavid Rientjes /* 222176e654ccSDavid Rientjes * If hugepage allocations are configured to always 222276e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 222376e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2224cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 222576e654ccSDavid Rientjes */ 2226adf88aa8SMatthew Wilcox (Oracle) if (!folio && (gfp & __GFP_DIRECT_RECLAIM)) 2227adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, order, hpage_node, 2228adf88aa8SMatthew Wilcox (Oracle) nmask); 222976e654ccSDavid Rientjes 223019deb769SDavid Rientjes goto out; 223119deb769SDavid Rientjes } 223219deb769SDavid Rientjes } 223319deb769SDavid Rientjes 2234077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 223504ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 2236adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, order, preferred_nid, nmask); 2237d51e9894SVlastimil Babka mpol_cond_put(pol); 2238be97a41bSVlastimil Babka out: 2239f584b680SMatthew Wilcox (Oracle) return folio; 2240f584b680SMatthew Wilcox (Oracle) } 2241adf88aa8SMatthew Wilcox (Oracle) EXPORT_SYMBOL(vma_alloc_folio); 2242f584b680SMatthew Wilcox (Oracle) 22431da177e4SLinus Torvalds /** 2244d7f946d0SMatthew Wilcox (Oracle) * alloc_pages - Allocate pages. 22456421ec76SMatthew Wilcox (Oracle) * @gfp: GFP flags. 22466421ec76SMatthew Wilcox (Oracle) * @order: Power of two of number of pages to allocate. 22471da177e4SLinus Torvalds * 22486421ec76SMatthew Wilcox (Oracle) * Allocate 1 << @order contiguous pages. The physical address of the 22496421ec76SMatthew Wilcox (Oracle) * first page is naturally aligned (eg an order-3 allocation will be aligned 22506421ec76SMatthew Wilcox (Oracle) * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 22516421ec76SMatthew Wilcox (Oracle) * process is honoured when in process context. 22521da177e4SLinus Torvalds * 22536421ec76SMatthew Wilcox (Oracle) * Context: Can be called from any context, providing the appropriate GFP 22546421ec76SMatthew Wilcox (Oracle) * flags are used. 22556421ec76SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 22561da177e4SLinus Torvalds */ 2257d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order) 22581da177e4SLinus Torvalds { 22598d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2260c0ff7453SMiao Xie struct page *page; 22611da177e4SLinus Torvalds 22628d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22638d90274bSOleg Nesterov pol = get_task_policy(current); 226452cd3b07SLee Schermerhorn 226552cd3b07SLee Schermerhorn /* 226652cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 226752cd3b07SLee Schermerhorn * nor system default_policy 226852cd3b07SLee Schermerhorn */ 226945c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2270c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 22714c54d949SFeng Tang else if (pol->mode == MPOL_PREFERRED_MANY) 22724c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, 2273c0455116SAneesh Kumar K.V policy_node(gfp, pol, numa_node_id()), pol); 2274c0ff7453SMiao Xie else 227584172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, 227604ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 22775c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2278cc9a6c87SMel Gorman 2279c0ff7453SMiao Xie return page; 22801da177e4SLinus Torvalds } 2281d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages); 22821da177e4SLinus Torvalds 2283cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order) 2284cc09cb13SMatthew Wilcox (Oracle) { 2285cc09cb13SMatthew Wilcox (Oracle) struct page *page = alloc_pages(gfp | __GFP_COMP, order); 2286cc09cb13SMatthew Wilcox (Oracle) 2287cc09cb13SMatthew Wilcox (Oracle) if (page && order > 1) 2288cc09cb13SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2289cc09cb13SMatthew Wilcox (Oracle) return (struct folio *)page; 2290cc09cb13SMatthew Wilcox (Oracle) } 2291cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc); 2292cc09cb13SMatthew Wilcox (Oracle) 2293c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, 2294c00b6b96SChen Wandun struct mempolicy *pol, unsigned long nr_pages, 2295c00b6b96SChen Wandun struct page **page_array) 2296c00b6b96SChen Wandun { 2297c00b6b96SChen Wandun int nodes; 2298c00b6b96SChen Wandun unsigned long nr_pages_per_node; 2299c00b6b96SChen Wandun int delta; 2300c00b6b96SChen Wandun int i; 2301c00b6b96SChen Wandun unsigned long nr_allocated; 2302c00b6b96SChen Wandun unsigned long total_allocated = 0; 2303c00b6b96SChen Wandun 2304c00b6b96SChen Wandun nodes = nodes_weight(pol->nodes); 2305c00b6b96SChen Wandun nr_pages_per_node = nr_pages / nodes; 2306c00b6b96SChen Wandun delta = nr_pages - nodes * nr_pages_per_node; 2307c00b6b96SChen Wandun 2308c00b6b96SChen Wandun for (i = 0; i < nodes; i++) { 2309c00b6b96SChen Wandun if (delta) { 2310c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(gfp, 2311c00b6b96SChen Wandun interleave_nodes(pol), NULL, 2312c00b6b96SChen Wandun nr_pages_per_node + 1, NULL, 2313c00b6b96SChen Wandun page_array); 2314c00b6b96SChen Wandun delta--; 2315c00b6b96SChen Wandun } else { 2316c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(gfp, 2317c00b6b96SChen Wandun interleave_nodes(pol), NULL, 2318c00b6b96SChen Wandun nr_pages_per_node, NULL, page_array); 2319c00b6b96SChen Wandun } 2320c00b6b96SChen Wandun 2321c00b6b96SChen Wandun page_array += nr_allocated; 2322c00b6b96SChen Wandun total_allocated += nr_allocated; 2323c00b6b96SChen Wandun } 2324c00b6b96SChen Wandun 2325c00b6b96SChen Wandun return total_allocated; 2326c00b6b96SChen Wandun } 2327c00b6b96SChen Wandun 2328c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, 2329c00b6b96SChen Wandun struct mempolicy *pol, unsigned long nr_pages, 2330c00b6b96SChen Wandun struct page **page_array) 2331c00b6b96SChen Wandun { 2332c00b6b96SChen Wandun gfp_t preferred_gfp; 2333c00b6b96SChen Wandun unsigned long nr_allocated = 0; 2334c00b6b96SChen Wandun 2335c00b6b96SChen Wandun preferred_gfp = gfp | __GFP_NOWARN; 2336c00b6b96SChen Wandun preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2337c00b6b96SChen Wandun 2338c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, 2339c00b6b96SChen Wandun nr_pages, NULL, page_array); 2340c00b6b96SChen Wandun 2341c00b6b96SChen Wandun if (nr_allocated < nr_pages) 2342c00b6b96SChen Wandun nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, 2343c00b6b96SChen Wandun nr_pages - nr_allocated, NULL, 2344c00b6b96SChen Wandun page_array + nr_allocated); 2345c00b6b96SChen Wandun return nr_allocated; 2346c00b6b96SChen Wandun } 2347c00b6b96SChen Wandun 2348c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the 2349c00b6b96SChen Wandun * same time in some situation such as vmalloc. 2350c00b6b96SChen Wandun * 2351c00b6b96SChen Wandun * It can accelerate memory allocation especially interleaving 2352c00b6b96SChen Wandun * allocate memory. 2353c00b6b96SChen Wandun */ 2354c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, 2355c00b6b96SChen Wandun unsigned long nr_pages, struct page **page_array) 2356c00b6b96SChen Wandun { 2357c00b6b96SChen Wandun struct mempolicy *pol = &default_policy; 2358c00b6b96SChen Wandun 2359c00b6b96SChen Wandun if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2360c00b6b96SChen Wandun pol = get_task_policy(current); 2361c00b6b96SChen Wandun 2362c00b6b96SChen Wandun if (pol->mode == MPOL_INTERLEAVE) 2363c00b6b96SChen Wandun return alloc_pages_bulk_array_interleave(gfp, pol, 2364c00b6b96SChen Wandun nr_pages, page_array); 2365c00b6b96SChen Wandun 2366c00b6b96SChen Wandun if (pol->mode == MPOL_PREFERRED_MANY) 2367c00b6b96SChen Wandun return alloc_pages_bulk_array_preferred_many(gfp, 2368c00b6b96SChen Wandun numa_node_id(), pol, nr_pages, page_array); 2369c00b6b96SChen Wandun 2370c00b6b96SChen Wandun return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()), 2371c00b6b96SChen Wandun policy_nodemask(gfp, pol), nr_pages, NULL, 2372c00b6b96SChen Wandun page_array); 2373c00b6b96SChen Wandun } 2374c00b6b96SChen Wandun 2375ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2376ef0855d3SOleg Nesterov { 2377ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2378ef0855d3SOleg Nesterov 2379ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2380ef0855d3SOleg Nesterov return PTR_ERR(pol); 2381ef0855d3SOleg Nesterov dst->vm_policy = pol; 2382ef0855d3SOleg Nesterov return 0; 2383ef0855d3SOleg Nesterov } 2384ef0855d3SOleg Nesterov 23854225399aSPaul Jackson /* 2386846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 23874225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 23884225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 23894225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 23904225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2391708c1bbcSMiao Xie * 2392708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2393708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 23944225399aSPaul Jackson */ 23954225399aSPaul Jackson 2396846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2397846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 23981da177e4SLinus Torvalds { 23991da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 24001da177e4SLinus Torvalds 24011da177e4SLinus Torvalds if (!new) 24021da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2403708c1bbcSMiao Xie 2404708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2405708c1bbcSMiao Xie if (old == current->mempolicy) { 2406708c1bbcSMiao Xie task_lock(current); 2407708c1bbcSMiao Xie *new = *old; 2408708c1bbcSMiao Xie task_unlock(current); 2409708c1bbcSMiao Xie } else 2410708c1bbcSMiao Xie *new = *old; 2411708c1bbcSMiao Xie 24124225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 24134225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2414213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 24154225399aSPaul Jackson } 24161da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 24171da177e4SLinus Torvalds return new; 24181da177e4SLinus Torvalds } 24191da177e4SLinus Torvalds 24201da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2421fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 24221da177e4SLinus Torvalds { 24231da177e4SLinus Torvalds if (!a || !b) 2424fcfb4dccSKOSAKI Motohiro return false; 242545c4745aSLee Schermerhorn if (a->mode != b->mode) 2426fcfb4dccSKOSAKI Motohiro return false; 242719800502SBob Liu if (a->flags != b->flags) 2428fcfb4dccSKOSAKI Motohiro return false; 2429c6018b4bSAneesh Kumar K.V if (a->home_node != b->home_node) 2430c6018b4bSAneesh Kumar K.V return false; 243119800502SBob Liu if (mpol_store_user_nodemask(a)) 243219800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2433fcfb4dccSKOSAKI Motohiro return false; 243419800502SBob Liu 243545c4745aSLee Schermerhorn switch (a->mode) { 243619770b32SMel Gorman case MPOL_BIND: 24371da177e4SLinus Torvalds case MPOL_INTERLEAVE: 24381da177e4SLinus Torvalds case MPOL_PREFERRED: 2439b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2440269fbe72SBen Widawsky return !!nodes_equal(a->nodes, b->nodes); 24417858d7bcSFeng Tang case MPOL_LOCAL: 24427858d7bcSFeng Tang return true; 24431da177e4SLinus Torvalds default: 24441da177e4SLinus Torvalds BUG(); 2445fcfb4dccSKOSAKI Motohiro return false; 24461da177e4SLinus Torvalds } 24471da177e4SLinus Torvalds } 24481da177e4SLinus Torvalds 24491da177e4SLinus Torvalds /* 24501da177e4SLinus Torvalds * Shared memory backing store policy support. 24511da177e4SLinus Torvalds * 24521da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 24531da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 24544a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 24551da177e4SLinus Torvalds * for any accesses to the tree. 24561da177e4SLinus Torvalds */ 24571da177e4SLinus Torvalds 24584a8c7bb5SNathan Zimmer /* 24594a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 24604a8c7bb5SNathan Zimmer * reading or for writing 24614a8c7bb5SNathan Zimmer */ 24621da177e4SLinus Torvalds static struct sp_node * 24631da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 24641da177e4SLinus Torvalds { 24651da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 24661da177e4SLinus Torvalds 24671da177e4SLinus Torvalds while (n) { 24681da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 24691da177e4SLinus Torvalds 24701da177e4SLinus Torvalds if (start >= p->end) 24711da177e4SLinus Torvalds n = n->rb_right; 24721da177e4SLinus Torvalds else if (end <= p->start) 24731da177e4SLinus Torvalds n = n->rb_left; 24741da177e4SLinus Torvalds else 24751da177e4SLinus Torvalds break; 24761da177e4SLinus Torvalds } 24771da177e4SLinus Torvalds if (!n) 24781da177e4SLinus Torvalds return NULL; 24791da177e4SLinus Torvalds for (;;) { 24801da177e4SLinus Torvalds struct sp_node *w = NULL; 24811da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 24821da177e4SLinus Torvalds if (!prev) 24831da177e4SLinus Torvalds break; 24841da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 24851da177e4SLinus Torvalds if (w->end <= start) 24861da177e4SLinus Torvalds break; 24871da177e4SLinus Torvalds n = prev; 24881da177e4SLinus Torvalds } 24891da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 24901da177e4SLinus Torvalds } 24911da177e4SLinus Torvalds 24924a8c7bb5SNathan Zimmer /* 24934a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 24944a8c7bb5SNathan Zimmer * writing. 24954a8c7bb5SNathan Zimmer */ 24961da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 24971da177e4SLinus Torvalds { 24981da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 24991da177e4SLinus Torvalds struct rb_node *parent = NULL; 25001da177e4SLinus Torvalds struct sp_node *nd; 25011da177e4SLinus Torvalds 25021da177e4SLinus Torvalds while (*p) { 25031da177e4SLinus Torvalds parent = *p; 25041da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 25051da177e4SLinus Torvalds if (new->start < nd->start) 25061da177e4SLinus Torvalds p = &(*p)->rb_left; 25071da177e4SLinus Torvalds else if (new->end > nd->end) 25081da177e4SLinus Torvalds p = &(*p)->rb_right; 25091da177e4SLinus Torvalds else 25101da177e4SLinus Torvalds BUG(); 25111da177e4SLinus Torvalds } 25121da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 25131da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2514140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 251545c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 25161da177e4SLinus Torvalds } 25171da177e4SLinus Torvalds 25181da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 25191da177e4SLinus Torvalds struct mempolicy * 25201da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 25211da177e4SLinus Torvalds { 25221da177e4SLinus Torvalds struct mempolicy *pol = NULL; 25231da177e4SLinus Torvalds struct sp_node *sn; 25241da177e4SLinus Torvalds 25251da177e4SLinus Torvalds if (!sp->root.rb_node) 25261da177e4SLinus Torvalds return NULL; 25274a8c7bb5SNathan Zimmer read_lock(&sp->lock); 25281da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 25291da177e4SLinus Torvalds if (sn) { 25301da177e4SLinus Torvalds mpol_get(sn->policy); 25311da177e4SLinus Torvalds pol = sn->policy; 25321da177e4SLinus Torvalds } 25334a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 25341da177e4SLinus Torvalds return pol; 25351da177e4SLinus Torvalds } 25361da177e4SLinus Torvalds 253763f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 253863f74ca2SKOSAKI Motohiro { 253963f74ca2SKOSAKI Motohiro mpol_put(n->policy); 254063f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 254163f74ca2SKOSAKI Motohiro } 254263f74ca2SKOSAKI Motohiro 2543771fb4d8SLee Schermerhorn /** 2544771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2545771fb4d8SLee Schermerhorn * 2546b46e14acSFabian Frederick * @page: page to be checked 2547b46e14acSFabian Frederick * @vma: vm area where page mapped 2548b46e14acSFabian Frederick * @addr: virtual address where page mapped 2549771fb4d8SLee Schermerhorn * 2550771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 25515f076944SMatthew Wilcox (Oracle) * node id. Policy determination "mimics" alloc_page_vma(). 2552771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 25535f076944SMatthew Wilcox (Oracle) * 2554062db293SBaolin Wang * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2555062db293SBaolin Wang * policy, or a suitable node ID to allocate a replacement page from. 2556771fb4d8SLee Schermerhorn */ 2557771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2558771fb4d8SLee Schermerhorn { 2559771fb4d8SLee Schermerhorn struct mempolicy *pol; 2560c33d6c06SMel Gorman struct zoneref *z; 2561771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2562771fb4d8SLee Schermerhorn unsigned long pgoff; 256390572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 256490572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 256598fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2566062db293SBaolin Wang int ret = NUMA_NO_NODE; 2567771fb4d8SLee Schermerhorn 2568dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2569771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2570771fb4d8SLee Schermerhorn goto out; 2571771fb4d8SLee Schermerhorn 2572771fb4d8SLee Schermerhorn switch (pol->mode) { 2573771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2574771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2575771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 257698c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2577771fb4d8SLee Schermerhorn break; 2578771fb4d8SLee Schermerhorn 2579771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2580b27abaccSDave Hansen if (node_isset(curnid, pol->nodes)) 2581b27abaccSDave Hansen goto out; 2582269fbe72SBen Widawsky polnid = first_node(pol->nodes); 2583771fb4d8SLee Schermerhorn break; 2584771fb4d8SLee Schermerhorn 25857858d7bcSFeng Tang case MPOL_LOCAL: 25867858d7bcSFeng Tang polnid = numa_node_id(); 25877858d7bcSFeng Tang break; 25887858d7bcSFeng Tang 2589771fb4d8SLee Schermerhorn case MPOL_BIND: 2590bda420b9SHuang Ying /* Optimize placement among multiple nodes via NUMA balancing */ 2591bda420b9SHuang Ying if (pol->flags & MPOL_F_MORON) { 2592269fbe72SBen Widawsky if (node_isset(thisnid, pol->nodes)) 2593bda420b9SHuang Ying break; 2594bda420b9SHuang Ying goto out; 2595bda420b9SHuang Ying } 2596b27abaccSDave Hansen fallthrough; 2597c33d6c06SMel Gorman 2598b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2599771fb4d8SLee Schermerhorn /* 2600771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2601771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2602771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2603771fb4d8SLee Schermerhorn */ 2604269fbe72SBen Widawsky if (node_isset(curnid, pol->nodes)) 2605771fb4d8SLee Schermerhorn goto out; 2606c33d6c06SMel Gorman z = first_zones_zonelist( 2607771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2608771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2609269fbe72SBen Widawsky &pol->nodes); 2610c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2611771fb4d8SLee Schermerhorn break; 2612771fb4d8SLee Schermerhorn 2613771fb4d8SLee Schermerhorn default: 2614771fb4d8SLee Schermerhorn BUG(); 2615771fb4d8SLee Schermerhorn } 26165606e387SMel Gorman 26175606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2618e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 261990572890SPeter Zijlstra polnid = thisnid; 26205606e387SMel Gorman 262110f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2622de1c9ce6SRik van Riel goto out; 2623de1c9ce6SRik van Riel } 2624e42c8ff2SMel Gorman 2625771fb4d8SLee Schermerhorn if (curnid != polnid) 2626771fb4d8SLee Schermerhorn ret = polnid; 2627771fb4d8SLee Schermerhorn out: 2628771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2629771fb4d8SLee Schermerhorn 2630771fb4d8SLee Schermerhorn return ret; 2631771fb4d8SLee Schermerhorn } 2632771fb4d8SLee Schermerhorn 2633c11600e4SDavid Rientjes /* 2634c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2635c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2636c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2637c11600e4SDavid Rientjes * policy. 2638c11600e4SDavid Rientjes */ 2639c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2640c11600e4SDavid Rientjes { 2641c11600e4SDavid Rientjes struct mempolicy *pol; 2642c11600e4SDavid Rientjes 2643c11600e4SDavid Rientjes task_lock(task); 2644c11600e4SDavid Rientjes pol = task->mempolicy; 2645c11600e4SDavid Rientjes task->mempolicy = NULL; 2646c11600e4SDavid Rientjes task_unlock(task); 2647c11600e4SDavid Rientjes mpol_put(pol); 2648c11600e4SDavid Rientjes } 2649c11600e4SDavid Rientjes 26501da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 26511da177e4SLinus Torvalds { 2652140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 26531da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 265463f74ca2SKOSAKI Motohiro sp_free(n); 26551da177e4SLinus Torvalds } 26561da177e4SLinus Torvalds 265742288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 265842288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 265942288fe3SMel Gorman { 266042288fe3SMel Gorman node->start = start; 266142288fe3SMel Gorman node->end = end; 266242288fe3SMel Gorman node->policy = pol; 266342288fe3SMel Gorman } 266442288fe3SMel Gorman 2665dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2666dbcb0f19SAdrian Bunk struct mempolicy *pol) 26671da177e4SLinus Torvalds { 2668869833f2SKOSAKI Motohiro struct sp_node *n; 2669869833f2SKOSAKI Motohiro struct mempolicy *newpol; 26701da177e4SLinus Torvalds 2671869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 26721da177e4SLinus Torvalds if (!n) 26731da177e4SLinus Torvalds return NULL; 2674869833f2SKOSAKI Motohiro 2675869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2676869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2677869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2678869833f2SKOSAKI Motohiro return NULL; 2679869833f2SKOSAKI Motohiro } 2680869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 268142288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2682869833f2SKOSAKI Motohiro 26831da177e4SLinus Torvalds return n; 26841da177e4SLinus Torvalds } 26851da177e4SLinus Torvalds 26861da177e4SLinus Torvalds /* Replace a policy range. */ 26871da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 26881da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 26891da177e4SLinus Torvalds { 2690b22d127aSMel Gorman struct sp_node *n; 269142288fe3SMel Gorman struct sp_node *n_new = NULL; 269242288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2693b22d127aSMel Gorman int ret = 0; 26941da177e4SLinus Torvalds 269542288fe3SMel Gorman restart: 26964a8c7bb5SNathan Zimmer write_lock(&sp->lock); 26971da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 26981da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 26991da177e4SLinus Torvalds while (n && n->start < end) { 27001da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 27011da177e4SLinus Torvalds if (n->start >= start) { 27021da177e4SLinus Torvalds if (n->end <= end) 27031da177e4SLinus Torvalds sp_delete(sp, n); 27041da177e4SLinus Torvalds else 27051da177e4SLinus Torvalds n->start = end; 27061da177e4SLinus Torvalds } else { 27071da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 27081da177e4SLinus Torvalds if (n->end > end) { 270942288fe3SMel Gorman if (!n_new) 271042288fe3SMel Gorman goto alloc_new; 271142288fe3SMel Gorman 271242288fe3SMel Gorman *mpol_new = *n->policy; 271342288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 27147880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 27151da177e4SLinus Torvalds n->end = start; 27165ca39575SHillf Danton sp_insert(sp, n_new); 271742288fe3SMel Gorman n_new = NULL; 271842288fe3SMel Gorman mpol_new = NULL; 27191da177e4SLinus Torvalds break; 27201da177e4SLinus Torvalds } else 27211da177e4SLinus Torvalds n->end = start; 27221da177e4SLinus Torvalds } 27231da177e4SLinus Torvalds if (!next) 27241da177e4SLinus Torvalds break; 27251da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27261da177e4SLinus Torvalds } 27271da177e4SLinus Torvalds if (new) 27281da177e4SLinus Torvalds sp_insert(sp, new); 27294a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 273042288fe3SMel Gorman ret = 0; 273142288fe3SMel Gorman 273242288fe3SMel Gorman err_out: 273342288fe3SMel Gorman if (mpol_new) 273442288fe3SMel Gorman mpol_put(mpol_new); 273542288fe3SMel Gorman if (n_new) 273642288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 273742288fe3SMel Gorman 2738b22d127aSMel Gorman return ret; 273942288fe3SMel Gorman 274042288fe3SMel Gorman alloc_new: 27414a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 274242288fe3SMel Gorman ret = -ENOMEM; 274342288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 274442288fe3SMel Gorman if (!n_new) 274542288fe3SMel Gorman goto err_out; 274642288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 274742288fe3SMel Gorman if (!mpol_new) 274842288fe3SMel Gorman goto err_out; 27494ad09955SMiaohe Lin atomic_set(&mpol_new->refcnt, 1); 275042288fe3SMel Gorman goto restart; 27511da177e4SLinus Torvalds } 27521da177e4SLinus Torvalds 275371fe804bSLee Schermerhorn /** 275471fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 275571fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 275671fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 275771fe804bSLee Schermerhorn * 275871fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 275971fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 276071fe804bSLee Schermerhorn * This must be released on exit. 27614bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 276271fe804bSLee Schermerhorn */ 276371fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 27647339ff83SRobin Holt { 276558568d2aSMiao Xie int ret; 276658568d2aSMiao Xie 276771fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 27684a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 27697339ff83SRobin Holt 277071fe804bSLee Schermerhorn if (mpol) { 27717339ff83SRobin Holt struct vm_area_struct pvma; 277271fe804bSLee Schermerhorn struct mempolicy *new; 27734bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 27747339ff83SRobin Holt 27754bfc4495SKAMEZAWA Hiroyuki if (!scratch) 27765c0c1654SLee Schermerhorn goto put_mpol; 277771fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 277871fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 277915d77835SLee Schermerhorn if (IS_ERR(new)) 27800cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 278158568d2aSMiao Xie 278258568d2aSMiao Xie task_lock(current); 27834bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 278458568d2aSMiao Xie task_unlock(current); 278515d77835SLee Schermerhorn if (ret) 27865c0c1654SLee Schermerhorn goto put_new; 278771fe804bSLee Schermerhorn 278871fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 27892c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 279071fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 279171fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 279215d77835SLee Schermerhorn 27935c0c1654SLee Schermerhorn put_new: 279471fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 27950cae3457SDan Carpenter free_scratch: 27964bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 27975c0c1654SLee Schermerhorn put_mpol: 27985c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 27997339ff83SRobin Holt } 28007339ff83SRobin Holt } 28017339ff83SRobin Holt 28021da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 28031da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 28041da177e4SLinus Torvalds { 28051da177e4SLinus Torvalds int err; 28061da177e4SLinus Torvalds struct sp_node *new = NULL; 28071da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 28081da177e4SLinus Torvalds 2809028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 28101da177e4SLinus Torvalds vma->vm_pgoff, 281145c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2812028fec41SDavid Rientjes npol ? npol->flags : -1, 2813269fbe72SBen Widawsky npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 28141da177e4SLinus Torvalds 28151da177e4SLinus Torvalds if (npol) { 28161da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 28171da177e4SLinus Torvalds if (!new) 28181da177e4SLinus Torvalds return -ENOMEM; 28191da177e4SLinus Torvalds } 28201da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 28211da177e4SLinus Torvalds if (err && new) 282263f74ca2SKOSAKI Motohiro sp_free(new); 28231da177e4SLinus Torvalds return err; 28241da177e4SLinus Torvalds } 28251da177e4SLinus Torvalds 28261da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 28271da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 28281da177e4SLinus Torvalds { 28291da177e4SLinus Torvalds struct sp_node *n; 28301da177e4SLinus Torvalds struct rb_node *next; 28311da177e4SLinus Torvalds 28321da177e4SLinus Torvalds if (!p->root.rb_node) 28331da177e4SLinus Torvalds return; 28344a8c7bb5SNathan Zimmer write_lock(&p->lock); 28351da177e4SLinus Torvalds next = rb_first(&p->root); 28361da177e4SLinus Torvalds while (next) { 28371da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 28381da177e4SLinus Torvalds next = rb_next(&n->nd); 283963f74ca2SKOSAKI Motohiro sp_delete(p, n); 28401da177e4SLinus Torvalds } 28414a8c7bb5SNathan Zimmer write_unlock(&p->lock); 28421da177e4SLinus Torvalds } 28431da177e4SLinus Torvalds 28441a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2845c297663cSMel Gorman static int __initdata numabalancing_override; 28461a687c2eSMel Gorman 28471a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 28481a687c2eSMel Gorman { 28491a687c2eSMel Gorman bool numabalancing_default = false; 28501a687c2eSMel Gorman 28511a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 28521a687c2eSMel Gorman numabalancing_default = true; 28531a687c2eSMel Gorman 2854c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2855c297663cSMel Gorman if (numabalancing_override) 2856c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2857c297663cSMel Gorman 2858b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2859756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2860c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 28611a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 28621a687c2eSMel Gorman } 28631a687c2eSMel Gorman } 28641a687c2eSMel Gorman 28651a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 28661a687c2eSMel Gorman { 28671a687c2eSMel Gorman int ret = 0; 28681a687c2eSMel Gorman if (!str) 28691a687c2eSMel Gorman goto out; 28701a687c2eSMel Gorman 28711a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2872c297663cSMel Gorman numabalancing_override = 1; 28731a687c2eSMel Gorman ret = 1; 28741a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2875c297663cSMel Gorman numabalancing_override = -1; 28761a687c2eSMel Gorman ret = 1; 28771a687c2eSMel Gorman } 28781a687c2eSMel Gorman out: 28791a687c2eSMel Gorman if (!ret) 28804a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 28811a687c2eSMel Gorman 28821a687c2eSMel Gorman return ret; 28831a687c2eSMel Gorman } 28841a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 28851a687c2eSMel Gorman #else 28861a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 28871a687c2eSMel Gorman { 28881a687c2eSMel Gorman } 28891a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 28901a687c2eSMel Gorman 28911da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 28921da177e4SLinus Torvalds void __init numa_policy_init(void) 28931da177e4SLinus Torvalds { 2894b71636e2SPaul Mundt nodemask_t interleave_nodes; 2895b71636e2SPaul Mundt unsigned long largest = 0; 2896b71636e2SPaul Mundt int nid, prefer = 0; 2897b71636e2SPaul Mundt 28981da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 28991da177e4SLinus Torvalds sizeof(struct mempolicy), 290020c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 29011da177e4SLinus Torvalds 29021da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 29031da177e4SLinus Torvalds sizeof(struct sp_node), 290420c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 29051da177e4SLinus Torvalds 29065606e387SMel Gorman for_each_node(nid) { 29075606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 29085606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 29095606e387SMel Gorman .mode = MPOL_PREFERRED, 29105606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 2911269fbe72SBen Widawsky .nodes = nodemask_of_node(nid), 29125606e387SMel Gorman }; 29135606e387SMel Gorman } 29145606e387SMel Gorman 2915b71636e2SPaul Mundt /* 2916b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2917b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2918b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2919b71636e2SPaul Mundt */ 2920b71636e2SPaul Mundt nodes_clear(interleave_nodes); 292101f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2922b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 29231da177e4SLinus Torvalds 2924b71636e2SPaul Mundt /* Preserve the largest node */ 2925b71636e2SPaul Mundt if (largest < total_pages) { 2926b71636e2SPaul Mundt largest = total_pages; 2927b71636e2SPaul Mundt prefer = nid; 2928b71636e2SPaul Mundt } 2929b71636e2SPaul Mundt 2930b71636e2SPaul Mundt /* Interleave this node? */ 2931b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2932b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2933b71636e2SPaul Mundt } 2934b71636e2SPaul Mundt 2935b71636e2SPaul Mundt /* All too small, use the largest */ 2936b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2937b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2938b71636e2SPaul Mundt 2939028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2940b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 29411a687c2eSMel Gorman 29421a687c2eSMel Gorman check_numabalancing_enable(); 29431da177e4SLinus Torvalds } 29441da177e4SLinus Torvalds 29458bccd85fSChristoph Lameter /* Reset policy of current process to default */ 29461da177e4SLinus Torvalds void numa_default_policy(void) 29471da177e4SLinus Torvalds { 2948028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 29491da177e4SLinus Torvalds } 295068860ec1SPaul Jackson 29514225399aSPaul Jackson /* 2952095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2953095f1fc4SLee Schermerhorn */ 2954095f1fc4SLee Schermerhorn 2955345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2956345ace9cSLee Schermerhorn { 2957345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2958345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2959345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2960345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2961d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2962b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = "prefer (many)", 2963345ace9cSLee Schermerhorn }; 29641a75a6c8SChristoph Lameter 2965095f1fc4SLee Schermerhorn 2966095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2967095f1fc4SLee Schermerhorn /** 2968f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2969095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 297071fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2971095f1fc4SLee Schermerhorn * 2972095f1fc4SLee Schermerhorn * Format of input: 2973095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2974095f1fc4SLee Schermerhorn * 2975dad5b023SRandy Dunlap * Return: %0 on success, else %1 2976095f1fc4SLee Schermerhorn */ 2977a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2978095f1fc4SLee Schermerhorn { 297971fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2980f2a07f40SHugh Dickins unsigned short mode_flags; 298171fe804bSLee Schermerhorn nodemask_t nodes; 2982095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2983095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2984dedf2c73Szhong jiang int err = 1, mode; 2985095f1fc4SLee Schermerhorn 2986c7a91bc7SDan Carpenter if (flags) 2987c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 2988c7a91bc7SDan Carpenter 2989095f1fc4SLee Schermerhorn if (nodelist) { 2990095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2991095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 299271fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2993095f1fc4SLee Schermerhorn goto out; 299401f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2995095f1fc4SLee Schermerhorn goto out; 299671fe804bSLee Schermerhorn } else 299771fe804bSLee Schermerhorn nodes_clear(nodes); 299871fe804bSLee Schermerhorn 2999dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 3000dedf2c73Szhong jiang if (mode < 0) 3001095f1fc4SLee Schermerhorn goto out; 3002095f1fc4SLee Schermerhorn 300371fe804bSLee Schermerhorn switch (mode) { 3004095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 300571fe804bSLee Schermerhorn /* 3006aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 3007aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 3008aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 300971fe804bSLee Schermerhorn */ 3010095f1fc4SLee Schermerhorn if (nodelist) { 3011095f1fc4SLee Schermerhorn char *rest = nodelist; 3012095f1fc4SLee Schermerhorn while (isdigit(*rest)) 3013095f1fc4SLee Schermerhorn rest++; 3014926f2ae0SKOSAKI Motohiro if (*rest) 3015926f2ae0SKOSAKI Motohiro goto out; 3016aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 3017aa9f7d51SRandy Dunlap goto out; 3018095f1fc4SLee Schermerhorn } 3019095f1fc4SLee Schermerhorn break; 3020095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 3021095f1fc4SLee Schermerhorn /* 3022095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 3023095f1fc4SLee Schermerhorn */ 3024095f1fc4SLee Schermerhorn if (!nodelist) 302501f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 30263f226aa1SLee Schermerhorn break; 302771fe804bSLee Schermerhorn case MPOL_LOCAL: 30283f226aa1SLee Schermerhorn /* 302971fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 30303f226aa1SLee Schermerhorn */ 303171fe804bSLee Schermerhorn if (nodelist) 30323f226aa1SLee Schermerhorn goto out; 30333f226aa1SLee Schermerhorn break; 3034413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 3035413b43deSRavikiran G Thirumalai /* 3036413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 3037413b43deSRavikiran G Thirumalai */ 3038413b43deSRavikiran G Thirumalai if (!nodelist) 3039413b43deSRavikiran G Thirumalai err = 0; 3040413b43deSRavikiran G Thirumalai goto out; 3041b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 3042d69b2e63SKOSAKI Motohiro case MPOL_BIND: 304371fe804bSLee Schermerhorn /* 3044d69b2e63SKOSAKI Motohiro * Insist on a nodelist 304571fe804bSLee Schermerhorn */ 3046d69b2e63SKOSAKI Motohiro if (!nodelist) 3047d69b2e63SKOSAKI Motohiro goto out; 3048095f1fc4SLee Schermerhorn } 3049095f1fc4SLee Schermerhorn 305071fe804bSLee Schermerhorn mode_flags = 0; 3051095f1fc4SLee Schermerhorn if (flags) { 3052095f1fc4SLee Schermerhorn /* 3053095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 3054095f1fc4SLee Schermerhorn * mode flags. 3055095f1fc4SLee Schermerhorn */ 3056095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 305771fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 3058095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 305971fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 3060095f1fc4SLee Schermerhorn else 3061926f2ae0SKOSAKI Motohiro goto out; 3062095f1fc4SLee Schermerhorn } 306371fe804bSLee Schermerhorn 306471fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 306571fe804bSLee Schermerhorn if (IS_ERR(new)) 3066926f2ae0SKOSAKI Motohiro goto out; 3067926f2ae0SKOSAKI Motohiro 3068f2a07f40SHugh Dickins /* 3069f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 3070f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 3071f2a07f40SHugh Dickins */ 3072269fbe72SBen Widawsky if (mode != MPOL_PREFERRED) { 3073269fbe72SBen Widawsky new->nodes = nodes; 3074269fbe72SBen Widawsky } else if (nodelist) { 3075269fbe72SBen Widawsky nodes_clear(new->nodes); 3076269fbe72SBen Widawsky node_set(first_node(nodes), new->nodes); 3077269fbe72SBen Widawsky } else { 30787858d7bcSFeng Tang new->mode = MPOL_LOCAL; 3079269fbe72SBen Widawsky } 3080f2a07f40SHugh Dickins 3081f2a07f40SHugh Dickins /* 3082f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 3083f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 3084f2a07f40SHugh Dickins */ 3085e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 3086f2a07f40SHugh Dickins 3087926f2ae0SKOSAKI Motohiro err = 0; 308871fe804bSLee Schermerhorn 3089095f1fc4SLee Schermerhorn out: 3090095f1fc4SLee Schermerhorn /* Restore string for error message */ 3091095f1fc4SLee Schermerhorn if (nodelist) 3092095f1fc4SLee Schermerhorn *--nodelist = ':'; 3093095f1fc4SLee Schermerhorn if (flags) 3094095f1fc4SLee Schermerhorn *--flags = '='; 309571fe804bSLee Schermerhorn if (!err) 309671fe804bSLee Schermerhorn *mpol = new; 3097095f1fc4SLee Schermerhorn return err; 3098095f1fc4SLee Schermerhorn } 3099095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 3100095f1fc4SLee Schermerhorn 310171fe804bSLee Schermerhorn /** 310271fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 310371fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 310471fe804bSLee Schermerhorn * @maxlen: length of @buffer 310571fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 310671fe804bSLee Schermerhorn * 3107948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 3108948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3109948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 31101a75a6c8SChristoph Lameter */ 3111948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 31121a75a6c8SChristoph Lameter { 31131a75a6c8SChristoph Lameter char *p = buffer; 3114948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 3115948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 3116948927eeSDavid Rientjes unsigned short flags = 0; 31171a75a6c8SChristoph Lameter 31188790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3119bea904d5SLee Schermerhorn mode = pol->mode; 3120948927eeSDavid Rientjes flags = pol->flags; 3121948927eeSDavid Rientjes } 3122bea904d5SLee Schermerhorn 31231a75a6c8SChristoph Lameter switch (mode) { 31241a75a6c8SChristoph Lameter case MPOL_DEFAULT: 31257858d7bcSFeng Tang case MPOL_LOCAL: 31261a75a6c8SChristoph Lameter break; 31271a75a6c8SChristoph Lameter case MPOL_PREFERRED: 3128b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 31291a75a6c8SChristoph Lameter case MPOL_BIND: 31301a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 3131269fbe72SBen Widawsky nodes = pol->nodes; 31321a75a6c8SChristoph Lameter break; 31331a75a6c8SChristoph Lameter default: 3134948927eeSDavid Rientjes WARN_ON_ONCE(1); 3135948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 3136948927eeSDavid Rientjes return; 31371a75a6c8SChristoph Lameter } 31381a75a6c8SChristoph Lameter 3139b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 31401a75a6c8SChristoph Lameter 3141fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3142948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3143f5b087b5SDavid Rientjes 31442291990aSLee Schermerhorn /* 31452291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 31462291990aSLee Schermerhorn */ 3147f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 31482291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 31492291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 31502291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3151f5b087b5SDavid Rientjes } 3152f5b087b5SDavid Rientjes 31539e763e0fSTejun Heo if (!nodes_empty(nodes)) 31549e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 31559e763e0fSTejun Heo nodemask_pr_args(&nodes)); 31561a75a6c8SChristoph Lameter } 3157