146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 34b27abaccSDave Hansen * preferred many Try a set of nodes first before normal fallback. This is 35b27abaccSDave Hansen * similar to preferred without the special case. 36b27abaccSDave Hansen * 371da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 381da177e4SLinus Torvalds * use the process policy. This is what Linux always did 391da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 401da177e4SLinus Torvalds * 411da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 421da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 431da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 441da177e4SLinus Torvalds * allocations for a VMA in the VM. 451da177e4SLinus Torvalds * 461da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 471da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 481da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 491da177e4SLinus Torvalds * 501da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 511da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 521da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 531da177e4SLinus Torvalds * Same with GFP_DMA allocations. 541da177e4SLinus Torvalds * 551da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 561da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 571da177e4SLinus Torvalds */ 581da177e4SLinus Torvalds 591da177e4SLinus Torvalds /* Notebook: 601da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 611da177e4SLinus Torvalds object 621da177e4SLinus Torvalds statistics for bigpages 631da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 641da177e4SLinus Torvalds first item above. 651da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 661da177e4SLinus Torvalds grows down? 671da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 681da177e4SLinus Torvalds kernel is not always grateful with that. 691da177e4SLinus Torvalds */ 701da177e4SLinus Torvalds 71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72b1de0d13SMitchel Humpherys 731da177e4SLinus Torvalds #include <linux/mempolicy.h> 74a520110eSChristoph Hellwig #include <linux/pagewalk.h> 751da177e4SLinus Torvalds #include <linux/highmem.h> 761da177e4SLinus Torvalds #include <linux/hugetlb.h> 771da177e4SLinus Torvalds #include <linux/kernel.h> 781da177e4SLinus Torvalds #include <linux/sched.h> 796e84f315SIngo Molnar #include <linux/sched/mm.h> 806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 81f719ff9bSIngo Molnar #include <linux/sched/task.h> 821da177e4SLinus Torvalds #include <linux/nodemask.h> 831da177e4SLinus Torvalds #include <linux/cpuset.h> 841da177e4SLinus Torvalds #include <linux/slab.h> 851da177e4SLinus Torvalds #include <linux/string.h> 86b95f1b31SPaul Gortmaker #include <linux/export.h> 87b488893aSPavel Emelyanov #include <linux/nsproxy.h> 881da177e4SLinus Torvalds #include <linux/interrupt.h> 891da177e4SLinus Torvalds #include <linux/init.h> 901da177e4SLinus Torvalds #include <linux/compat.h> 9131367466SOtto Ebeling #include <linux/ptrace.h> 92dc9aa5b9SChristoph Lameter #include <linux/swap.h> 931a75a6c8SChristoph Lameter #include <linux/seq_file.h> 941a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 95b20a3503SChristoph Lameter #include <linux/migrate.h> 9662b61f61SHugh Dickins #include <linux/ksm.h> 9795a402c3SChristoph Lameter #include <linux/rmap.h> 9886c3a764SDavid Quigley #include <linux/security.h> 99dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 100095f1fc4SLee Schermerhorn #include <linux/ctype.h> 1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 103b1de0d13SMitchel Humpherys #include <linux/printk.h> 104c8633798SNaoya Horiguchi #include <linux/swapops.h> 105dc9aa5b9SChristoph Lameter 1061da177e4SLinus Torvalds #include <asm/tlbflush.h> 1074a18419fSNadav Amit #include <asm/tlb.h> 1087c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1091da177e4SLinus Torvalds 11062695a84SNick Piggin #include "internal.h" 11162695a84SNick Piggin 11238e35860SChristoph Lameter /* Internal flags */ 113dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11438e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 115dc9aa5b9SChristoph Lameter 116fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 117fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1181da177e4SLinus Torvalds 1191da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1201da177e4SLinus Torvalds policied. */ 1216267276fSChristoph Lameter enum zone_type policy_zone = 0; 1221da177e4SLinus Torvalds 123bea904d5SLee Schermerhorn /* 124bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 125bea904d5SLee Schermerhorn */ 126e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1271da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 1287858d7bcSFeng Tang .mode = MPOL_LOCAL, 1291da177e4SLinus Torvalds }; 1301da177e4SLinus Torvalds 1315606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1325606e387SMel Gorman 133b2ca916cSDan Williams /** 134b2ca916cSDan Williams * numa_map_to_online_node - Find closest online node 135f6e92f40SKrzysztof Kozlowski * @node: Node id to start the search 136b2ca916cSDan Williams * 137b2ca916cSDan Williams * Lookup the next closest node by distance if @nid is not online. 138dad5b023SRandy Dunlap * 139dad5b023SRandy Dunlap * Return: this @node if it is online, otherwise the closest node by distance 140b2ca916cSDan Williams */ 141b2ca916cSDan Williams int numa_map_to_online_node(int node) 142b2ca916cSDan Williams { 1434fcbe96eSDan Williams int min_dist = INT_MAX, dist, n, min_node; 144b2ca916cSDan Williams 1454fcbe96eSDan Williams if (node == NUMA_NO_NODE || node_online(node)) 1464fcbe96eSDan Williams return node; 147b2ca916cSDan Williams 148b2ca916cSDan Williams min_node = node; 149b2ca916cSDan Williams for_each_online_node(n) { 150b2ca916cSDan Williams dist = node_distance(node, n); 151b2ca916cSDan Williams if (dist < min_dist) { 152b2ca916cSDan Williams min_dist = dist; 153b2ca916cSDan Williams min_node = n; 154b2ca916cSDan Williams } 155b2ca916cSDan Williams } 156b2ca916cSDan Williams 157b2ca916cSDan Williams return min_node; 158b2ca916cSDan Williams } 159b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node); 160b2ca916cSDan Williams 16174d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1625606e387SMel Gorman { 1635606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 164f15ca78eSOleg Nesterov int node; 1655606e387SMel Gorman 166f15ca78eSOleg Nesterov if (pol) 167f15ca78eSOleg Nesterov return pol; 1685606e387SMel Gorman 169f15ca78eSOleg Nesterov node = numa_node_id(); 1701da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1711da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 172f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 173f15ca78eSOleg Nesterov if (pol->mode) 174f15ca78eSOleg Nesterov return pol; 1751da6f0e1SJianguo Wu } 1765606e387SMel Gorman 177f15ca78eSOleg Nesterov return &default_policy; 1785606e387SMel Gorman } 1795606e387SMel Gorman 18037012946SDavid Rientjes static const struct mempolicy_operations { 18137012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 182213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 18337012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 18437012946SDavid Rientjes 185f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 186f5b087b5SDavid Rientjes { 1876d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1884c50bc01SDavid Rientjes } 1894c50bc01SDavid Rientjes 1904c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1914c50bc01SDavid Rientjes const nodemask_t *rel) 1924c50bc01SDavid Rientjes { 1934c50bc01SDavid Rientjes nodemask_t tmp; 1944c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1954c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 196f5b087b5SDavid Rientjes } 197f5b087b5SDavid Rientjes 198be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 19937012946SDavid Rientjes { 20037012946SDavid Rientjes if (nodes_empty(*nodes)) 20137012946SDavid Rientjes return -EINVAL; 202269fbe72SBen Widawsky pol->nodes = *nodes; 20337012946SDavid Rientjes return 0; 20437012946SDavid Rientjes } 20537012946SDavid Rientjes 20637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 20737012946SDavid Rientjes { 2087858d7bcSFeng Tang if (nodes_empty(*nodes)) 2097858d7bcSFeng Tang return -EINVAL; 210269fbe72SBen Widawsky 211269fbe72SBen Widawsky nodes_clear(pol->nodes); 212269fbe72SBen Widawsky node_set(first_node(*nodes), pol->nodes); 21337012946SDavid Rientjes return 0; 21437012946SDavid Rientjes } 21537012946SDavid Rientjes 21658568d2aSMiao Xie /* 21758568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 21858568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 2197858d7bcSFeng Tang * parameter with respect to the policy mode and flags. 22058568d2aSMiao Xie * 22158568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 222c1e8d7c6SMichel Lespinasse * and mempolicy. May also be called holding the mmap_lock for write. 22358568d2aSMiao Xie */ 2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2254bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22658568d2aSMiao Xie { 22758568d2aSMiao Xie int ret; 22858568d2aSMiao Xie 2297858d7bcSFeng Tang /* 2307858d7bcSFeng Tang * Default (pol==NULL) resp. local memory policies are not a 2317858d7bcSFeng Tang * subject of any remapping. They also do not need any special 2327858d7bcSFeng Tang * constructor. 2337858d7bcSFeng Tang */ 2347858d7bcSFeng Tang if (!pol || pol->mode == MPOL_LOCAL) 23558568d2aSMiao Xie return 0; 2367858d7bcSFeng Tang 23701f13bd6SLai Jiangshan /* Check N_MEMORY */ 2384bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 23901f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 24058568d2aSMiao Xie 24158568d2aSMiao Xie VM_BUG_ON(!nodes); 2427858d7bcSFeng Tang 24358568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2444bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 24558568d2aSMiao Xie else 2464bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2474bfc4495SKAMEZAWA Hiroyuki 24858568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 24958568d2aSMiao Xie pol->w.user_nodemask = *nodes; 25058568d2aSMiao Xie else 2517858d7bcSFeng Tang pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 25258568d2aSMiao Xie 2534bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 25458568d2aSMiao Xie return ret; 25558568d2aSMiao Xie } 25658568d2aSMiao Xie 25758568d2aSMiao Xie /* 25858568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25958568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 26058568d2aSMiao Xie */ 261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 262028fec41SDavid Rientjes nodemask_t *nodes) 2631da177e4SLinus Torvalds { 2641da177e4SLinus Torvalds struct mempolicy *policy; 2651da177e4SLinus Torvalds 266028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26700ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 268140d5a49SPaul Mundt 2693e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2703e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 27137012946SDavid Rientjes return ERR_PTR(-EINVAL); 272d3a71033SLee Schermerhorn return NULL; 27337012946SDavid Rientjes } 2743e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2753e1f0645SDavid Rientjes 2763e1f0645SDavid Rientjes /* 2773e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2783e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2793e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2803e1f0645SDavid Rientjes */ 2813e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2823e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2833e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2843e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2853e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2867858d7bcSFeng Tang 2877858d7bcSFeng Tang mode = MPOL_LOCAL; 2883e1f0645SDavid Rientjes } 289479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2908d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2918d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2928d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 293479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 2943e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2953e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2961da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2971da177e4SLinus Torvalds if (!policy) 2981da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2991da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 30045c4745aSLee Schermerhorn policy->mode = mode; 30137012946SDavid Rientjes policy->flags = flags; 302c6018b4bSAneesh Kumar K.V policy->home_node = NUMA_NO_NODE; 3033e1f0645SDavid Rientjes 30437012946SDavid Rientjes return policy; 30537012946SDavid Rientjes } 30637012946SDavid Rientjes 30752cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30852cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 30952cd3b07SLee Schermerhorn { 31052cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 31152cd3b07SLee Schermerhorn return; 31252cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 31352cd3b07SLee Schermerhorn } 31452cd3b07SLee Schermerhorn 315213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 31637012946SDavid Rientjes { 31737012946SDavid Rientjes } 31837012946SDavid Rientjes 319213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 3201d0d2680SDavid Rientjes { 3211d0d2680SDavid Rientjes nodemask_t tmp; 3221d0d2680SDavid Rientjes 32337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32437012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32537012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32637012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3271d0d2680SDavid Rientjes else { 328269fbe72SBen Widawsky nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 329213980c0SVlastimil Babka *nodes); 33029b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3311d0d2680SDavid Rientjes } 33237012946SDavid Rientjes 333708c1bbcSMiao Xie if (nodes_empty(tmp)) 334708c1bbcSMiao Xie tmp = *nodes; 335708c1bbcSMiao Xie 336269fbe72SBen Widawsky pol->nodes = tmp; 33737012946SDavid Rientjes } 33837012946SDavid Rientjes 33937012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 340213980c0SVlastimil Babka const nodemask_t *nodes) 34137012946SDavid Rientjes { 34237012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3431d0d2680SDavid Rientjes } 34437012946SDavid Rientjes 345708c1bbcSMiao Xie /* 346708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 347708c1bbcSMiao Xie * 348c1e8d7c6SMichel Lespinasse * Per-vma policies are protected by mmap_lock. Allocations using per-task 349213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 350213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 351708c1bbcSMiao Xie */ 352213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35337012946SDavid Rientjes { 354018160adSWang Cheng if (!pol || pol->mode == MPOL_LOCAL) 35537012946SDavid Rientjes return; 3567858d7bcSFeng Tang if (!mpol_store_user_nodemask(pol) && 35737012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35837012946SDavid Rientjes return; 359708c1bbcSMiao Xie 360213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3611d0d2680SDavid Rientjes } 3621d0d2680SDavid Rientjes 3631d0d2680SDavid Rientjes /* 3641d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3651d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36658568d2aSMiao Xie * 36758568d2aSMiao Xie * Called with task's alloc_lock held. 3681d0d2680SDavid Rientjes */ 3691d0d2680SDavid Rientjes 370213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3711d0d2680SDavid Rientjes { 372213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3731d0d2680SDavid Rientjes } 3741d0d2680SDavid Rientjes 3751d0d2680SDavid Rientjes /* 3761d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3771d0d2680SDavid Rientjes * 378c1e8d7c6SMichel Lespinasse * Call holding a reference to mm. Takes mm->mmap_lock during call. 3791d0d2680SDavid Rientjes */ 3801d0d2680SDavid Rientjes 3811d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3821d0d2680SDavid Rientjes { 3831d0d2680SDavid Rientjes struct vm_area_struct *vma; 3841d0d2680SDavid Rientjes 385d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 3861d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 387213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 388d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3891d0d2680SDavid Rientjes } 3901d0d2680SDavid Rientjes 39137012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 39237012946SDavid Rientjes [MPOL_DEFAULT] = { 39337012946SDavid Rientjes .rebind = mpol_rebind_default, 39437012946SDavid Rientjes }, 39537012946SDavid Rientjes [MPOL_INTERLEAVE] = { 396be897d48SFeng Tang .create = mpol_new_nodemask, 39737012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39837012946SDavid Rientjes }, 39937012946SDavid Rientjes [MPOL_PREFERRED] = { 40037012946SDavid Rientjes .create = mpol_new_preferred, 40137012946SDavid Rientjes .rebind = mpol_rebind_preferred, 40237012946SDavid Rientjes }, 40337012946SDavid Rientjes [MPOL_BIND] = { 404be897d48SFeng Tang .create = mpol_new_nodemask, 40537012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40637012946SDavid Rientjes }, 4077858d7bcSFeng Tang [MPOL_LOCAL] = { 4087858d7bcSFeng Tang .rebind = mpol_rebind_default, 4097858d7bcSFeng Tang }, 410b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = { 411be897d48SFeng Tang .create = mpol_new_nodemask, 412b27abaccSDave Hansen .rebind = mpol_rebind_preferred, 413b27abaccSDave Hansen }, 41437012946SDavid Rientjes }; 41537012946SDavid Rientjes 416a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 417fc301289SChristoph Lameter unsigned long flags); 4181a75a6c8SChristoph Lameter 4196f4576e3SNaoya Horiguchi struct queue_pages { 4206f4576e3SNaoya Horiguchi struct list_head *pagelist; 4216f4576e3SNaoya Horiguchi unsigned long flags; 4226f4576e3SNaoya Horiguchi nodemask_t *nmask; 423f18da660SLi Xinhai unsigned long start; 424f18da660SLi Xinhai unsigned long end; 425f18da660SLi Xinhai struct vm_area_struct *first; 4266f4576e3SNaoya Horiguchi }; 4276f4576e3SNaoya Horiguchi 42898094945SNaoya Horiguchi /* 42988aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 43088aaa2a1SNaoya Horiguchi * 43188aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 43288aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 43388aaa2a1SNaoya Horiguchi */ 43488aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 43588aaa2a1SNaoya Horiguchi struct queue_pages *qp) 43688aaa2a1SNaoya Horiguchi { 43788aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 43888aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 43988aaa2a1SNaoya Horiguchi 44088aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 44188aaa2a1SNaoya Horiguchi } 44288aaa2a1SNaoya Horiguchi 443a7f40cfeSYang Shi /* 444bc78b5edSMiaohe Lin * queue_pages_pmd() has three possible return values: 445e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 446e5947d23SYang Shi * special page is met, i.e. huge zero page. 447d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 448d8835445SYang Shi * specified. 449d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 450d8835445SYang Shi * existing page was already on a node that does not follow the 451d8835445SYang Shi * policy. 452a7f40cfeSYang Shi */ 453c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 454c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 455959a7e13SJules Irenge __releases(ptl) 456c8633798SNaoya Horiguchi { 457c8633798SNaoya Horiguchi int ret = 0; 458c8633798SNaoya Horiguchi struct page *page; 459c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 460c8633798SNaoya Horiguchi unsigned long flags; 461c8633798SNaoya Horiguchi 462c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 463a7f40cfeSYang Shi ret = -EIO; 464c8633798SNaoya Horiguchi goto unlock; 465c8633798SNaoya Horiguchi } 466c8633798SNaoya Horiguchi page = pmd_page(*pmd); 467c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 468e5947d23SYang Shi walk->action = ACTION_CONTINUE; 4696d97cf88SMiaohe Lin goto unlock; 470c8633798SNaoya Horiguchi } 471d8835445SYang Shi if (!queue_pages_required(page, qp)) 472c8633798SNaoya Horiguchi goto unlock; 473c8633798SNaoya Horiguchi 474c8633798SNaoya Horiguchi flags = qp->flags; 475c8633798SNaoya Horiguchi /* go to thp migration */ 476a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 477a53190a4SYang Shi if (!vma_migratable(walk->vma) || 478a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 479d8835445SYang Shi ret = 1; 480a7f40cfeSYang Shi goto unlock; 481a7f40cfeSYang Shi } 482a7f40cfeSYang Shi } else 483a7f40cfeSYang Shi ret = -EIO; 484c8633798SNaoya Horiguchi unlock: 485c8633798SNaoya Horiguchi spin_unlock(ptl); 486c8633798SNaoya Horiguchi return ret; 487c8633798SNaoya Horiguchi } 488c8633798SNaoya Horiguchi 48988aaa2a1SNaoya Horiguchi /* 49098094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 49198094945SNaoya Horiguchi * and move them to the pagelist if they do. 492d8835445SYang Shi * 493d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 494e5947d23SYang Shi * 0 - pages are placed on the right node or queued successfully, or 495e5947d23SYang Shi * special page is met, i.e. zero page. 496d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 497d8835445SYang Shi * specified. 498d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 499d8835445SYang Shi * on a node that does not follow the policy. 50098094945SNaoya Horiguchi */ 5016f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 5026f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 5031da177e4SLinus Torvalds { 5046f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5056f4576e3SNaoya Horiguchi struct page *page; 5066f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5076f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 508d8835445SYang Shi bool has_unmovable = false; 5093f088420SShijie Luo pte_t *pte, *mapped_pte; 510705e87c0SHugh Dickins spinlock_t *ptl; 511941150a3SHugh Dickins 512c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 513bc78b5edSMiaohe Lin if (ptl) 514bc78b5edSMiaohe Lin return queue_pages_pmd(pmd, ptl, addr, end, walk); 51591612e0dSHugh Dickins 516337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 517337d9abfSNaoya Horiguchi return 0; 51894723aafSMichal Hocko 5193f088420SShijie Luo mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5206f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 52191612e0dSHugh Dickins if (!pte_present(*pte)) 52291612e0dSHugh Dickins continue; 5236aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5243218f871SAlex Sierra if (!page || is_zone_device_page(page)) 52591612e0dSHugh Dickins continue; 526053837fcSNick Piggin /* 52762b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 52862b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 529053837fcSNick Piggin */ 530b79bc0a0SHugh Dickins if (PageReserved(page)) 531f4598c8bSChristoph Lameter continue; 53288aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 53338e35860SChristoph Lameter continue; 534a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 535d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 536d8835445SYang Shi if (!vma_migratable(vma)) { 537d8835445SYang Shi has_unmovable = true; 538a7f40cfeSYang Shi break; 539d8835445SYang Shi } 540a53190a4SYang Shi 541a53190a4SYang Shi /* 542a53190a4SYang Shi * Do not abort immediately since there may be 543a53190a4SYang Shi * temporary off LRU pages in the range. Still 544a53190a4SYang Shi * need migrate other LRU pages. 545a53190a4SYang Shi */ 546a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 547a53190a4SYang Shi has_unmovable = true; 548a7f40cfeSYang Shi } else 549a7f40cfeSYang Shi break; 5506f4576e3SNaoya Horiguchi } 5513f088420SShijie Luo pte_unmap_unlock(mapped_pte, ptl); 5526f4576e3SNaoya Horiguchi cond_resched(); 553d8835445SYang Shi 554d8835445SYang Shi if (has_unmovable) 555d8835445SYang Shi return 1; 556d8835445SYang Shi 557a7f40cfeSYang Shi return addr != end ? -EIO : 0; 55891612e0dSHugh Dickins } 55991612e0dSHugh Dickins 5606f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5616f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5626f4576e3SNaoya Horiguchi struct mm_walk *walk) 563e2d8cf40SNaoya Horiguchi { 564dcf17635SLi Xinhai int ret = 0; 565e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5666f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 567dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 568e2d8cf40SNaoya Horiguchi struct page *page; 569cb900f41SKirill A. Shutemov spinlock_t *ptl; 570d4c54919SNaoya Horiguchi pte_t entry; 571e2d8cf40SNaoya Horiguchi 5726f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5736f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 574d4c54919SNaoya Horiguchi if (!pte_present(entry)) 575d4c54919SNaoya Horiguchi goto unlock; 576d4c54919SNaoya Horiguchi page = pte_page(entry); 57788aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 578e2d8cf40SNaoya Horiguchi goto unlock; 579dcf17635SLi Xinhai 580dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 581dcf17635SLi Xinhai /* 582dcf17635SLi Xinhai * STRICT alone means only detecting misplaced page and no 583dcf17635SLi Xinhai * need to further check other vma. 584dcf17635SLi Xinhai */ 585dcf17635SLi Xinhai ret = -EIO; 586dcf17635SLi Xinhai goto unlock; 587dcf17635SLi Xinhai } 588dcf17635SLi Xinhai 589dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 590dcf17635SLi Xinhai /* 591dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 592dcf17635SLi Xinhai * stopped walking current vma. 593dcf17635SLi Xinhai * Detecting misplaced page but allow migrating pages which 594dcf17635SLi Xinhai * have been queued. 595dcf17635SLi Xinhai */ 596dcf17635SLi Xinhai ret = 1; 597dcf17635SLi Xinhai goto unlock; 598dcf17635SLi Xinhai } 599dcf17635SLi Xinhai 600e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 601e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 602dcf17635SLi Xinhai (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { 6037ce82f4cSMiaohe Lin if (isolate_hugetlb(page, qp->pagelist) && 604dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 605dcf17635SLi Xinhai /* 606dcf17635SLi Xinhai * Failed to isolate page but allow migrating pages 607dcf17635SLi Xinhai * which have been queued. 608dcf17635SLi Xinhai */ 609dcf17635SLi Xinhai ret = 1; 610dcf17635SLi Xinhai } 611e2d8cf40SNaoya Horiguchi unlock: 612cb900f41SKirill A. Shutemov spin_unlock(ptl); 613e2d8cf40SNaoya Horiguchi #else 614e2d8cf40SNaoya Horiguchi BUG(); 615e2d8cf40SNaoya Horiguchi #endif 616dcf17635SLi Xinhai return ret; 6171da177e4SLinus Torvalds } 6181da177e4SLinus Torvalds 6195877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 620b24f53a0SLee Schermerhorn /* 6214b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6224b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6234b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6244b10e7d5SMel Gorman * 6254b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6264b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6274b10e7d5SMel Gorman * changes to the core. 628b24f53a0SLee Schermerhorn */ 6294b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6304b10e7d5SMel Gorman unsigned long addr, unsigned long end) 631b24f53a0SLee Schermerhorn { 6324a18419fSNadav Amit struct mmu_gather tlb; 6334b10e7d5SMel Gorman int nr_updated; 634b24f53a0SLee Schermerhorn 6354a18419fSNadav Amit tlb_gather_mmu(&tlb, vma->vm_mm); 6364a18419fSNadav Amit 6374a18419fSNadav Amit nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE, 6384a18419fSNadav Amit MM_CP_PROT_NUMA); 63903c5a6e1SMel Gorman if (nr_updated) 64003c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 641b24f53a0SLee Schermerhorn 6424a18419fSNadav Amit tlb_finish_mmu(&tlb); 6434a18419fSNadav Amit 6444b10e7d5SMel Gorman return nr_updated; 645b24f53a0SLee Schermerhorn } 646b24f53a0SLee Schermerhorn #else 647b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 648b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 649b24f53a0SLee Schermerhorn { 650b24f53a0SLee Schermerhorn return 0; 651b24f53a0SLee Schermerhorn } 6525877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 653b24f53a0SLee Schermerhorn 6546f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6556f4576e3SNaoya Horiguchi struct mm_walk *walk) 6561da177e4SLinus Torvalds { 6576f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6586f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6595b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6606f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 661dc9aa5b9SChristoph Lameter 662a18b3ac2SLi Xinhai /* range check first */ 663ce33135cSMiaohe Lin VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 664f18da660SLi Xinhai 665f18da660SLi Xinhai if (!qp->first) { 666f18da660SLi Xinhai qp->first = vma; 667f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 668f18da660SLi Xinhai (qp->start < vma->vm_start)) 669f18da660SLi Xinhai /* hole at head side of range */ 670a18b3ac2SLi Xinhai return -EFAULT; 671a18b3ac2SLi Xinhai } 672f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 673f18da660SLi Xinhai ((vma->vm_end < qp->end) && 674f18da660SLi Xinhai (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) 675f18da660SLi Xinhai /* hole at middle or tail of range */ 676f18da660SLi Xinhai return -EFAULT; 677a18b3ac2SLi Xinhai 678a7f40cfeSYang Shi /* 679a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 680a7f40cfeSYang Shi * regardless of vma_migratable 681a7f40cfeSYang Shi */ 682a7f40cfeSYang Shi if (!vma_migratable(vma) && 683a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 68448684a65SNaoya Horiguchi return 1; 68548684a65SNaoya Horiguchi 6865b952b3cSAndi Kleen if (endvma > end) 6875b952b3cSAndi Kleen endvma = end; 688b24f53a0SLee Schermerhorn 689b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6902c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6913122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 6924355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 693b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6946f4576e3SNaoya Horiguchi return 1; 695b24f53a0SLee Schermerhorn } 696b24f53a0SLee Schermerhorn 6976f4576e3SNaoya Horiguchi /* queue pages from current vma */ 698a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 6996f4576e3SNaoya Horiguchi return 0; 7006f4576e3SNaoya Horiguchi return 1; 7016f4576e3SNaoya Horiguchi } 702b24f53a0SLee Schermerhorn 7037b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 7047b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 7057b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 7067b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 7077b86ac33SChristoph Hellwig }; 7087b86ac33SChristoph Hellwig 7096f4576e3SNaoya Horiguchi /* 7106f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7116f4576e3SNaoya Horiguchi * 7126f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7136f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 714d8835445SYang Shi * passed via @private. 715d8835445SYang Shi * 716d8835445SYang Shi * queue_pages_range() has three possible return values: 717d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 718d8835445SYang Shi * specified. 719d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 720a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 721a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 722a85dfc30SYang Shi * your accessible address space (-EFAULT) 7236f4576e3SNaoya Horiguchi */ 7246f4576e3SNaoya Horiguchi static int 7256f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7266f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 7276f4576e3SNaoya Horiguchi struct list_head *pagelist) 7286f4576e3SNaoya Horiguchi { 729f18da660SLi Xinhai int err; 7306f4576e3SNaoya Horiguchi struct queue_pages qp = { 7316f4576e3SNaoya Horiguchi .pagelist = pagelist, 7326f4576e3SNaoya Horiguchi .flags = flags, 7336f4576e3SNaoya Horiguchi .nmask = nodes, 734f18da660SLi Xinhai .start = start, 735f18da660SLi Xinhai .end = end, 736f18da660SLi Xinhai .first = NULL, 7376f4576e3SNaoya Horiguchi }; 7386f4576e3SNaoya Horiguchi 739f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 740f18da660SLi Xinhai 741f18da660SLi Xinhai if (!qp.first) 742f18da660SLi Xinhai /* whole range in hole */ 743f18da660SLi Xinhai err = -EFAULT; 744f18da660SLi Xinhai 745f18da660SLi Xinhai return err; 7461da177e4SLinus Torvalds } 7471da177e4SLinus Torvalds 748869833f2SKOSAKI Motohiro /* 749869833f2SKOSAKI Motohiro * Apply policy to a single VMA 750c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for writing. 751869833f2SKOSAKI Motohiro */ 752869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 753869833f2SKOSAKI Motohiro struct mempolicy *pol) 7548d34694cSKOSAKI Motohiro { 755869833f2SKOSAKI Motohiro int err; 756869833f2SKOSAKI Motohiro struct mempolicy *old; 757869833f2SKOSAKI Motohiro struct mempolicy *new; 7588d34694cSKOSAKI Motohiro 7598d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7608d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7618d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7628d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7638d34694cSKOSAKI Motohiro 764869833f2SKOSAKI Motohiro new = mpol_dup(pol); 765869833f2SKOSAKI Motohiro if (IS_ERR(new)) 766869833f2SKOSAKI Motohiro return PTR_ERR(new); 767869833f2SKOSAKI Motohiro 768869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7698d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 770869833f2SKOSAKI Motohiro if (err) 771869833f2SKOSAKI Motohiro goto err_out; 7728d34694cSKOSAKI Motohiro } 773869833f2SKOSAKI Motohiro 774869833f2SKOSAKI Motohiro old = vma->vm_policy; 775c1e8d7c6SMichel Lespinasse vma->vm_policy = new; /* protected by mmap_lock */ 776869833f2SKOSAKI Motohiro mpol_put(old); 777869833f2SKOSAKI Motohiro 778869833f2SKOSAKI Motohiro return 0; 779869833f2SKOSAKI Motohiro err_out: 780869833f2SKOSAKI Motohiro mpol_put(new); 7818d34694cSKOSAKI Motohiro return err; 7828d34694cSKOSAKI Motohiro } 7838d34694cSKOSAKI Motohiro 7841da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7859d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7869d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7871da177e4SLinus Torvalds { 7889d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7899d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7909d8cebd4SKOSAKI Motohiro int err = 0; 791e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7929d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7939d8cebd4SKOSAKI Motohiro unsigned long vmend; 7941da177e4SLinus Torvalds 795097d5910SLinus Torvalds vma = find_vma(mm, start); 796f18da660SLi Xinhai VM_BUG_ON(!vma); 7979d8cebd4SKOSAKI Motohiro 798097d5910SLinus Torvalds prev = vma->vm_prev; 799e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 800e26a5114SKOSAKI Motohiro prev = vma; 801e26a5114SKOSAKI Motohiro 8024e090600SHugh Dickins for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) { 8039d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 8049d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 8059d8cebd4SKOSAKI Motohiro 806e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 807e26a5114SKOSAKI Motohiro continue; 808e26a5114SKOSAKI Motohiro 809e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 810e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 8119d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 812e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 8139a10064fSColin Cross new_pol, vma->vm_userfaultfd_ctx, 8145c26f6acSSuren Baghdasaryan anon_vma_name(vma)); 8159d8cebd4SKOSAKI Motohiro if (prev) { 8169d8cebd4SKOSAKI Motohiro vma = prev; 8173964acd0SOleg Nesterov goto replace; 8181da177e4SLinus Torvalds } 8199d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 8209d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 8219d8cebd4SKOSAKI Motohiro if (err) 8229d8cebd4SKOSAKI Motohiro goto out; 8239d8cebd4SKOSAKI Motohiro } 8249d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 8259d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 8269d8cebd4SKOSAKI Motohiro if (err) 8279d8cebd4SKOSAKI Motohiro goto out; 8289d8cebd4SKOSAKI Motohiro } 8293964acd0SOleg Nesterov replace: 830869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 8319d8cebd4SKOSAKI Motohiro if (err) 8329d8cebd4SKOSAKI Motohiro goto out; 8339d8cebd4SKOSAKI Motohiro } 8349d8cebd4SKOSAKI Motohiro 8359d8cebd4SKOSAKI Motohiro out: 8361da177e4SLinus Torvalds return err; 8371da177e4SLinus Torvalds } 8381da177e4SLinus Torvalds 8391da177e4SLinus Torvalds /* Set the process memory policy */ 840028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 841028fec41SDavid Rientjes nodemask_t *nodes) 8421da177e4SLinus Torvalds { 84358568d2aSMiao Xie struct mempolicy *new, *old; 8444bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 84558568d2aSMiao Xie int ret; 8461da177e4SLinus Torvalds 8474bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8484bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 849f4e53d91SLee Schermerhorn 8504bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8514bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8524bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8534bfc4495SKAMEZAWA Hiroyuki goto out; 8544bfc4495SKAMEZAWA Hiroyuki } 8552c7c3a7dSOleg Nesterov 856*12c1dc8eSAbel Wu task_lock(current); 8574bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 85858568d2aSMiao Xie if (ret) { 859*12c1dc8eSAbel Wu task_unlock(current); 86058568d2aSMiao Xie mpol_put(new); 8614bfc4495SKAMEZAWA Hiroyuki goto out; 86258568d2aSMiao Xie } 863*12c1dc8eSAbel Wu 86458568d2aSMiao Xie old = current->mempolicy; 8651da177e4SLinus Torvalds current->mempolicy = new; 86645816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 86745816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 86858568d2aSMiao Xie task_unlock(current); 86958568d2aSMiao Xie mpol_put(old); 8704bfc4495SKAMEZAWA Hiroyuki ret = 0; 8714bfc4495SKAMEZAWA Hiroyuki out: 8724bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8734bfc4495SKAMEZAWA Hiroyuki return ret; 8741da177e4SLinus Torvalds } 8751da177e4SLinus Torvalds 876bea904d5SLee Schermerhorn /* 877bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 87858568d2aSMiao Xie * 87958568d2aSMiao Xie * Called with task's alloc_lock held 880bea904d5SLee Schermerhorn */ 881bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8821da177e4SLinus Torvalds { 883dfcd3c0dSAndi Kleen nodes_clear(*nodes); 884bea904d5SLee Schermerhorn if (p == &default_policy) 885bea904d5SLee Schermerhorn return; 886bea904d5SLee Schermerhorn 88745c4745aSLee Schermerhorn switch (p->mode) { 88819770b32SMel Gorman case MPOL_BIND: 8891da177e4SLinus Torvalds case MPOL_INTERLEAVE: 890269fbe72SBen Widawsky case MPOL_PREFERRED: 891b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 892269fbe72SBen Widawsky *nodes = p->nodes; 8931da177e4SLinus Torvalds break; 8947858d7bcSFeng Tang case MPOL_LOCAL: 8957858d7bcSFeng Tang /* return empty node mask for local allocation */ 8967858d7bcSFeng Tang break; 8971da177e4SLinus Torvalds default: 8981da177e4SLinus Torvalds BUG(); 8991da177e4SLinus Torvalds } 9001da177e4SLinus Torvalds } 9011da177e4SLinus Torvalds 9023b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 9031da177e4SLinus Torvalds { 904ba841078SPeter Xu struct page *p = NULL; 905f728b9c4SJohn Hubbard int ret; 9061da177e4SLinus Torvalds 907f728b9c4SJohn Hubbard ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p); 908f728b9c4SJohn Hubbard if (ret > 0) { 909f728b9c4SJohn Hubbard ret = page_to_nid(p); 9101da177e4SLinus Torvalds put_page(p); 9111da177e4SLinus Torvalds } 912f728b9c4SJohn Hubbard return ret; 9131da177e4SLinus Torvalds } 9141da177e4SLinus Torvalds 9151da177e4SLinus Torvalds /* Retrieve NUMA policy */ 916dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9171da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9181da177e4SLinus Torvalds { 9198bccd85fSChristoph Lameter int err; 9201da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9211da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9223b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9231da177e4SLinus Torvalds 924754af6f5SLee Schermerhorn if (flags & 925754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9261da177e4SLinus Torvalds return -EINVAL; 927754af6f5SLee Schermerhorn 928754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 929754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 930754af6f5SLee Schermerhorn return -EINVAL; 931754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 93258568d2aSMiao Xie task_lock(current); 933754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 93458568d2aSMiao Xie task_unlock(current); 935754af6f5SLee Schermerhorn return 0; 936754af6f5SLee Schermerhorn } 937754af6f5SLee Schermerhorn 9381da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 939bea904d5SLee Schermerhorn /* 940bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 941bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 942bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 943bea904d5SLee Schermerhorn */ 944d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 94533e3575cSLiam Howlett vma = vma_lookup(mm, addr); 9461da177e4SLinus Torvalds if (!vma) { 947d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9481da177e4SLinus Torvalds return -EFAULT; 9491da177e4SLinus Torvalds } 9501da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9511da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9521da177e4SLinus Torvalds else 9531da177e4SLinus Torvalds pol = vma->vm_policy; 9541da177e4SLinus Torvalds } else if (addr) 9551da177e4SLinus Torvalds return -EINVAL; 9561da177e4SLinus Torvalds 9571da177e4SLinus Torvalds if (!pol) 958bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9591da177e4SLinus Torvalds 9601da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9611da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9623b9aadf7SAndrea Arcangeli /* 963f728b9c4SJohn Hubbard * Take a refcount on the mpol, because we are about to 964f728b9c4SJohn Hubbard * drop the mmap_lock, after which only "pol" remains 965f728b9c4SJohn Hubbard * valid, "vma" is stale. 9663b9aadf7SAndrea Arcangeli */ 9673b9aadf7SAndrea Arcangeli pol_refcount = pol; 9683b9aadf7SAndrea Arcangeli vma = NULL; 9693b9aadf7SAndrea Arcangeli mpol_get(pol); 970f728b9c4SJohn Hubbard mmap_read_unlock(mm); 9713b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9721da177e4SLinus Torvalds if (err < 0) 9731da177e4SLinus Torvalds goto out; 9748bccd85fSChristoph Lameter *policy = err; 9751da177e4SLinus Torvalds } else if (pol == current->mempolicy && 97645c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 977269fbe72SBen Widawsky *policy = next_node_in(current->il_prev, pol->nodes); 9781da177e4SLinus Torvalds } else { 9791da177e4SLinus Torvalds err = -EINVAL; 9801da177e4SLinus Torvalds goto out; 9811da177e4SLinus Torvalds } 982bea904d5SLee Schermerhorn } else { 983bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 984bea904d5SLee Schermerhorn pol->mode; 985d79df630SDavid Rientjes /* 986d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 987d79df630SDavid Rientjes * the policy to userspace. 988d79df630SDavid Rientjes */ 989d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 990bea904d5SLee Schermerhorn } 9911da177e4SLinus Torvalds 9921da177e4SLinus Torvalds err = 0; 99358568d2aSMiao Xie if (nmask) { 994c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 995c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 996c6b6ef8bSLee Schermerhorn } else { 99758568d2aSMiao Xie task_lock(current); 998bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 99958568d2aSMiao Xie task_unlock(current); 100058568d2aSMiao Xie } 1001c6b6ef8bSLee Schermerhorn } 10021da177e4SLinus Torvalds 10031da177e4SLinus Torvalds out: 100452cd3b07SLee Schermerhorn mpol_cond_put(pol); 10051da177e4SLinus Torvalds if (vma) 1006d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 10073b9aadf7SAndrea Arcangeli if (pol_refcount) 10083b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10091da177e4SLinus Torvalds return err; 10101da177e4SLinus Torvalds } 10111da177e4SLinus Torvalds 1012b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10138bccd85fSChristoph Lameter /* 1014c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 10156ce3c4c0SChristoph Lameter */ 1016a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1017fc301289SChristoph Lameter unsigned long flags) 10186ce3c4c0SChristoph Lameter { 1019c8633798SNaoya Horiguchi struct page *head = compound_head(page); 10206ce3c4c0SChristoph Lameter /* 1021fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 10226ce3c4c0SChristoph Lameter */ 1023c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1024c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 1025c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 1026c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 10279de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head), 10286c357848SMatthew Wilcox (Oracle) thp_nr_pages(head)); 1029a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1030a53190a4SYang Shi /* 1031a53190a4SYang Shi * Non-movable page may reach here. And, there may be 1032a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 1033a53190a4SYang Shi * Treat them as unmovable pages since they can't be 1034a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1035a53190a4SYang Shi * should return -EIO for this case too. 1036a53190a4SYang Shi */ 1037a53190a4SYang Shi return -EIO; 103862695a84SNick Piggin } 103962695a84SNick Piggin } 1040a53190a4SYang Shi 1041a53190a4SYang Shi return 0; 10426ce3c4c0SChristoph Lameter } 10436ce3c4c0SChristoph Lameter 10446ce3c4c0SChristoph Lameter /* 10457e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10467e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10477e2ab150SChristoph Lameter */ 1048dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1049dbcb0f19SAdrian Bunk int flags) 10507e2ab150SChristoph Lameter { 10517e2ab150SChristoph Lameter nodemask_t nmask; 10527e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10537e2ab150SChristoph Lameter int err = 0; 1054a0976311SJoonsoo Kim struct migration_target_control mtc = { 1055a0976311SJoonsoo Kim .nid = dest, 1056a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1057a0976311SJoonsoo Kim }; 10587e2ab150SChristoph Lameter 10597e2ab150SChristoph Lameter nodes_clear(nmask); 10607e2ab150SChristoph Lameter node_set(source, nmask); 10617e2ab150SChristoph Lameter 106208270807SMinchan Kim /* 106308270807SMinchan Kim * This does not "check" the range but isolates all pages that 106408270807SMinchan Kim * need migration. Between passing in the full user address 106508270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 106608270807SMinchan Kim */ 106708270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 106898094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10697e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10707e2ab150SChristoph Lameter 1071cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1072a0976311SJoonsoo Kim err = migrate_pages(&pagelist, alloc_migration_target, NULL, 10735ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1074cf608ac1SMinchan Kim if (err) 1075e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1076cf608ac1SMinchan Kim } 107795a402c3SChristoph Lameter 10787e2ab150SChristoph Lameter return err; 10797e2ab150SChristoph Lameter } 10807e2ab150SChristoph Lameter 10817e2ab150SChristoph Lameter /* 10827e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10837e2ab150SChristoph Lameter * layout as much as possible. 108439743889SChristoph Lameter * 108539743889SChristoph Lameter * Returns the number of page that could not be moved. 108639743889SChristoph Lameter */ 10870ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10880ce72d4fSAndrew Morton const nodemask_t *to, int flags) 108939743889SChristoph Lameter { 10907e2ab150SChristoph Lameter int busy = 0; 1091f555befdSJan Stancek int err = 0; 10927e2ab150SChristoph Lameter nodemask_t tmp; 109339743889SChristoph Lameter 1094361a2a22SMinchan Kim lru_cache_disable(); 10950aedadf9SChristoph Lameter 1096d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1097d4984711SChristoph Lameter 10987e2ab150SChristoph Lameter /* 10997e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11007e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11017e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11027e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11037e2ab150SChristoph Lameter * 11047e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11057e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11067e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11077e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11087e2ab150SChristoph Lameter * 11097e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11107e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11117e2ab150SChristoph Lameter * (nothing left to migrate). 11127e2ab150SChristoph Lameter * 11137e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11147e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11157e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11167e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11177e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11187e2ab150SChristoph Lameter * 11197e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11207e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11217e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11227e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1123ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11247e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11257e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11267e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11277e2ab150SChristoph Lameter */ 11287e2ab150SChristoph Lameter 11290ce72d4fSAndrew Morton tmp = *from; 11307e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11317e2ab150SChristoph Lameter int s, d; 1132b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11337e2ab150SChristoph Lameter int dest = 0; 11347e2ab150SChristoph Lameter 11357e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11364a5b18ccSLarry Woodman 11374a5b18ccSLarry Woodman /* 11384a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11394a5b18ccSLarry Woodman * node relationship of the pages established between 11404a5b18ccSLarry Woodman * threads and memory areas. 11414a5b18ccSLarry Woodman * 11424a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11434a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11444a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11454a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11464a5b18ccSLarry Woodman * mask. 11474a5b18ccSLarry Woodman * 11484a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11494a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11504a5b18ccSLarry Woodman */ 11514a5b18ccSLarry Woodman 11520ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11530ce72d4fSAndrew Morton (node_isset(s, *to))) 11544a5b18ccSLarry Woodman continue; 11554a5b18ccSLarry Woodman 11560ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11577e2ab150SChristoph Lameter if (s == d) 11587e2ab150SChristoph Lameter continue; 11597e2ab150SChristoph Lameter 11607e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11617e2ab150SChristoph Lameter dest = d; 11627e2ab150SChristoph Lameter 11637e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11647e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11657e2ab150SChristoph Lameter break; 11667e2ab150SChristoph Lameter } 1167b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11687e2ab150SChristoph Lameter break; 11697e2ab150SChristoph Lameter 11707e2ab150SChristoph Lameter node_clear(source, tmp); 11717e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11727e2ab150SChristoph Lameter if (err > 0) 11737e2ab150SChristoph Lameter busy += err; 11747e2ab150SChristoph Lameter if (err < 0) 11757e2ab150SChristoph Lameter break; 117639743889SChristoph Lameter } 1177d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1178d479960eSMinchan Kim 1179361a2a22SMinchan Kim lru_cache_enable(); 11807e2ab150SChristoph Lameter if (err < 0) 11817e2ab150SChristoph Lameter return err; 11827e2ab150SChristoph Lameter return busy; 1183b20a3503SChristoph Lameter 118439743889SChristoph Lameter } 118539743889SChristoph Lameter 11863ad33b24SLee Schermerhorn /* 11873ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1188d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 11893ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 11903ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 11913ad33b24SLee Schermerhorn * is in virtual address order. 11923ad33b24SLee Schermerhorn */ 1193666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 119495a402c3SChristoph Lameter { 1195ec4858e0SMatthew Wilcox (Oracle) struct folio *dst, *src = page_folio(page); 1196d05f0cdcSHugh Dickins struct vm_area_struct *vma; 11973f649ab7SKees Cook unsigned long address; 1198ec4858e0SMatthew Wilcox (Oracle) gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; 119995a402c3SChristoph Lameter 1200d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 12013ad33b24SLee Schermerhorn while (vma) { 12023ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 12033ad33b24SLee Schermerhorn if (address != -EFAULT) 12043ad33b24SLee Schermerhorn break; 12053ad33b24SLee Schermerhorn vma = vma->vm_next; 12063ad33b24SLee Schermerhorn } 12073ad33b24SLee Schermerhorn 1208ec4858e0SMatthew Wilcox (Oracle) if (folio_test_hugetlb(src)) 1209ec4858e0SMatthew Wilcox (Oracle) return alloc_huge_page_vma(page_hstate(&src->page), 1210389c8178SMichal Hocko vma, address); 1211c8633798SNaoya Horiguchi 1212ec4858e0SMatthew Wilcox (Oracle) if (folio_test_large(src)) 1213ec4858e0SMatthew Wilcox (Oracle) gfp = GFP_TRANSHUGE; 1214ec4858e0SMatthew Wilcox (Oracle) 121511c731e8SWanpeng Li /* 1216ec4858e0SMatthew Wilcox (Oracle) * if !vma, vma_alloc_folio() will use task or system default policy 121711c731e8SWanpeng Li */ 1218ec4858e0SMatthew Wilcox (Oracle) dst = vma_alloc_folio(gfp, folio_order(src), vma, address, 1219ec4858e0SMatthew Wilcox (Oracle) folio_test_large(src)); 1220ec4858e0SMatthew Wilcox (Oracle) return &dst->page; 122195a402c3SChristoph Lameter } 1222b20a3503SChristoph Lameter #else 1223b20a3503SChristoph Lameter 1224a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1225b20a3503SChristoph Lameter unsigned long flags) 1226b20a3503SChristoph Lameter { 1227a53190a4SYang Shi return -EIO; 1228b20a3503SChristoph Lameter } 1229b20a3503SChristoph Lameter 12300ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12310ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1232b20a3503SChristoph Lameter { 1233b20a3503SChristoph Lameter return -ENOSYS; 1234b20a3503SChristoph Lameter } 123595a402c3SChristoph Lameter 1236666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 123795a402c3SChristoph Lameter { 123895a402c3SChristoph Lameter return NULL; 123995a402c3SChristoph Lameter } 1240b20a3503SChristoph Lameter #endif 1241b20a3503SChristoph Lameter 1242dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1243028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1244028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12456ce3c4c0SChristoph Lameter { 12466ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12476ce3c4c0SChristoph Lameter struct mempolicy *new; 12486ce3c4c0SChristoph Lameter unsigned long end; 12496ce3c4c0SChristoph Lameter int err; 1250d8835445SYang Shi int ret; 12516ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12526ce3c4c0SChristoph Lameter 1253b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12546ce3c4c0SChristoph Lameter return -EINVAL; 125574c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12566ce3c4c0SChristoph Lameter return -EPERM; 12576ce3c4c0SChristoph Lameter 12586ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12596ce3c4c0SChristoph Lameter return -EINVAL; 12606ce3c4c0SChristoph Lameter 12616ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12626ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12636ce3c4c0SChristoph Lameter 12646ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12656ce3c4c0SChristoph Lameter end = start + len; 12666ce3c4c0SChristoph Lameter 12676ce3c4c0SChristoph Lameter if (end < start) 12686ce3c4c0SChristoph Lameter return -EINVAL; 12696ce3c4c0SChristoph Lameter if (end == start) 12706ce3c4c0SChristoph Lameter return 0; 12716ce3c4c0SChristoph Lameter 1272028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12736ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12746ce3c4c0SChristoph Lameter return PTR_ERR(new); 12756ce3c4c0SChristoph Lameter 1276b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1277b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1278b24f53a0SLee Schermerhorn 12796ce3c4c0SChristoph Lameter /* 12806ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12816ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12826ce3c4c0SChristoph Lameter */ 12836ce3c4c0SChristoph Lameter if (!new) 12846ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 12856ce3c4c0SChristoph Lameter 1286028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1287028fec41SDavid Rientjes start, start + len, mode, mode_flags, 128800ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 12896ce3c4c0SChristoph Lameter 12900aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 12910aedadf9SChristoph Lameter 1292361a2a22SMinchan Kim lru_cache_disable(); 12930aedadf9SChristoph Lameter } 12944bfc4495SKAMEZAWA Hiroyuki { 12954bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 12964bfc4495SKAMEZAWA Hiroyuki if (scratch) { 1297d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 12984bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 12994bfc4495SKAMEZAWA Hiroyuki if (err) 1300d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 13014bfc4495SKAMEZAWA Hiroyuki } else 13024bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13034bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13044bfc4495SKAMEZAWA Hiroyuki } 1305b05ca738SKOSAKI Motohiro if (err) 1306b05ca738SKOSAKI Motohiro goto mpol_out; 1307b05ca738SKOSAKI Motohiro 1308d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13096ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1310d8835445SYang Shi 1311d8835445SYang Shi if (ret < 0) { 1312a85dfc30SYang Shi err = ret; 1313d8835445SYang Shi goto up_out; 1314d8835445SYang Shi } 1315d8835445SYang Shi 13169d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13177e2ab150SChristoph Lameter 1318b24f53a0SLee Schermerhorn if (!err) { 1319b24f53a0SLee Schermerhorn int nr_failed = 0; 1320b24f53a0SLee Schermerhorn 1321cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1322b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1323d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 13245ac95884SYang Shi start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1325cf608ac1SMinchan Kim if (nr_failed) 132674060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1327cf608ac1SMinchan Kim } 13286ce3c4c0SChristoph Lameter 1329d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13306ce3c4c0SChristoph Lameter err = -EIO; 1331a85dfc30SYang Shi } else { 1332d8835445SYang Shi up_out: 1333a85dfc30SYang Shi if (!list_empty(&pagelist)) 1334a85dfc30SYang Shi putback_movable_pages(&pagelist); 1335a85dfc30SYang Shi } 1336a85dfc30SYang Shi 1337d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1338b05ca738SKOSAKI Motohiro mpol_out: 1339f0be3d32SLee Schermerhorn mpol_put(new); 1340d479960eSMinchan Kim if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1341361a2a22SMinchan Kim lru_cache_enable(); 13426ce3c4c0SChristoph Lameter return err; 13436ce3c4c0SChristoph Lameter } 13446ce3c4c0SChristoph Lameter 134539743889SChristoph Lameter /* 13468bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13478bccd85fSChristoph Lameter */ 1348e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, 1349e130242dSArnd Bergmann unsigned long maxnode) 1350e130242dSArnd Bergmann { 1351e130242dSArnd Bergmann unsigned long nlongs = BITS_TO_LONGS(maxnode); 1352e130242dSArnd Bergmann int ret; 1353e130242dSArnd Bergmann 1354e130242dSArnd Bergmann if (in_compat_syscall()) 1355e130242dSArnd Bergmann ret = compat_get_bitmap(mask, 1356e130242dSArnd Bergmann (const compat_ulong_t __user *)nmask, 1357e130242dSArnd Bergmann maxnode); 1358e130242dSArnd Bergmann else 1359e130242dSArnd Bergmann ret = copy_from_user(mask, nmask, 1360e130242dSArnd Bergmann nlongs * sizeof(unsigned long)); 1361e130242dSArnd Bergmann 1362e130242dSArnd Bergmann if (ret) 1363e130242dSArnd Bergmann return -EFAULT; 1364e130242dSArnd Bergmann 1365e130242dSArnd Bergmann if (maxnode % BITS_PER_LONG) 1366e130242dSArnd Bergmann mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; 1367e130242dSArnd Bergmann 1368e130242dSArnd Bergmann return 0; 1369e130242dSArnd Bergmann } 13708bccd85fSChristoph Lameter 13718bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 137239743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13738bccd85fSChristoph Lameter unsigned long maxnode) 13748bccd85fSChristoph Lameter { 13758bccd85fSChristoph Lameter --maxnode; 13768bccd85fSChristoph Lameter nodes_clear(*nodes); 13778bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13788bccd85fSChristoph Lameter return 0; 1379a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1380636f13c1SChris Wright return -EINVAL; 13818bccd85fSChristoph Lameter 138256521e7aSYisheng Xie /* 138356521e7aSYisheng Xie * When the user specified more nodes than supported just check 1384e130242dSArnd Bergmann * if the non supported part is all zero, one word at a time, 1385e130242dSArnd Bergmann * starting at the end. 138656521e7aSYisheng Xie */ 1387e130242dSArnd Bergmann while (maxnode > MAX_NUMNODES) { 1388e130242dSArnd Bergmann unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG); 1389e130242dSArnd Bergmann unsigned long t; 13908bccd85fSChristoph Lameter 1391000eca5dSTianyu Li if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) 139256521e7aSYisheng Xie return -EFAULT; 1393e130242dSArnd Bergmann 1394e130242dSArnd Bergmann if (maxnode - bits >= MAX_NUMNODES) { 1395e130242dSArnd Bergmann maxnode -= bits; 1396e130242dSArnd Bergmann } else { 1397e130242dSArnd Bergmann maxnode = MAX_NUMNODES; 1398e130242dSArnd Bergmann t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 1399e130242dSArnd Bergmann } 1400e130242dSArnd Bergmann if (t) 140156521e7aSYisheng Xie return -EINVAL; 140256521e7aSYisheng Xie } 140356521e7aSYisheng Xie 1404e130242dSArnd Bergmann return get_bitmap(nodes_addr(*nodes), nmask, maxnode); 14058bccd85fSChristoph Lameter } 14068bccd85fSChristoph Lameter 14078bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14088bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14098bccd85fSChristoph Lameter nodemask_t *nodes) 14108bccd85fSChristoph Lameter { 14118bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1412050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 1413e130242dSArnd Bergmann bool compat = in_compat_syscall(); 1414e130242dSArnd Bergmann 1415e130242dSArnd Bergmann if (compat) 1416e130242dSArnd Bergmann nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); 14178bccd85fSChristoph Lameter 14188bccd85fSChristoph Lameter if (copy > nbytes) { 14198bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14208bccd85fSChristoph Lameter return -EINVAL; 14218bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14228bccd85fSChristoph Lameter return -EFAULT; 14238bccd85fSChristoph Lameter copy = nbytes; 1424e130242dSArnd Bergmann maxnode = nr_node_ids; 14258bccd85fSChristoph Lameter } 1426e130242dSArnd Bergmann 1427e130242dSArnd Bergmann if (compat) 1428e130242dSArnd Bergmann return compat_put_bitmap((compat_ulong_t __user *)mask, 1429e130242dSArnd Bergmann nodes_addr(*nodes), maxnode); 1430e130242dSArnd Bergmann 14318bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14328bccd85fSChristoph Lameter } 14338bccd85fSChristoph Lameter 143495837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 143595837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 143695837924SFeng Tang { 143795837924SFeng Tang *flags = *mode & MPOL_MODE_FLAGS; 143895837924SFeng Tang *mode &= ~MPOL_MODE_FLAGS; 1439b27abaccSDave Hansen 1440a38a59fdSBen Widawsky if ((unsigned int)(*mode) >= MPOL_MAX) 144195837924SFeng Tang return -EINVAL; 144295837924SFeng Tang if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 144395837924SFeng Tang return -EINVAL; 14446d2aec9eSEric Dumazet if (*flags & MPOL_F_NUMA_BALANCING) { 14456d2aec9eSEric Dumazet if (*mode != MPOL_BIND) 14466d2aec9eSEric Dumazet return -EINVAL; 14476d2aec9eSEric Dumazet *flags |= (MPOL_F_MOF | MPOL_F_MORON); 14486d2aec9eSEric Dumazet } 144995837924SFeng Tang return 0; 145095837924SFeng Tang } 145195837924SFeng Tang 1452e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1453e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1454e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14558bccd85fSChristoph Lameter { 1456028fec41SDavid Rientjes unsigned short mode_flags; 145795837924SFeng Tang nodemask_t nodes; 145895837924SFeng Tang int lmode = mode; 145995837924SFeng Tang int err; 14608bccd85fSChristoph Lameter 1461057d3389SAndrey Konovalov start = untagged_addr(start); 146295837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 146395837924SFeng Tang if (err) 146495837924SFeng Tang return err; 146595837924SFeng Tang 14668bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14678bccd85fSChristoph Lameter if (err) 14688bccd85fSChristoph Lameter return err; 146995837924SFeng Tang 147095837924SFeng Tang return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 14718bccd85fSChristoph Lameter } 14728bccd85fSChristoph Lameter 1473c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len, 1474c6018b4bSAneesh Kumar K.V unsigned long, home_node, unsigned long, flags) 1475c6018b4bSAneesh Kumar K.V { 1476c6018b4bSAneesh Kumar K.V struct mm_struct *mm = current->mm; 1477c6018b4bSAneesh Kumar K.V struct vm_area_struct *vma; 1478c6018b4bSAneesh Kumar K.V struct mempolicy *new; 1479c6018b4bSAneesh Kumar K.V unsigned long vmstart; 1480c6018b4bSAneesh Kumar K.V unsigned long vmend; 1481c6018b4bSAneesh Kumar K.V unsigned long end; 1482c6018b4bSAneesh Kumar K.V int err = -ENOENT; 1483c6018b4bSAneesh Kumar K.V 1484c6018b4bSAneesh Kumar K.V start = untagged_addr(start); 1485c6018b4bSAneesh Kumar K.V if (start & ~PAGE_MASK) 1486c6018b4bSAneesh Kumar K.V return -EINVAL; 1487c6018b4bSAneesh Kumar K.V /* 1488c6018b4bSAneesh Kumar K.V * flags is used for future extension if any. 1489c6018b4bSAneesh Kumar K.V */ 1490c6018b4bSAneesh Kumar K.V if (flags != 0) 1491c6018b4bSAneesh Kumar K.V return -EINVAL; 1492c6018b4bSAneesh Kumar K.V 1493c6018b4bSAneesh Kumar K.V /* 1494c6018b4bSAneesh Kumar K.V * Check home_node is online to avoid accessing uninitialized 1495c6018b4bSAneesh Kumar K.V * NODE_DATA. 1496c6018b4bSAneesh Kumar K.V */ 1497c6018b4bSAneesh Kumar K.V if (home_node >= MAX_NUMNODES || !node_online(home_node)) 1498c6018b4bSAneesh Kumar K.V return -EINVAL; 1499c6018b4bSAneesh Kumar K.V 1500c6018b4bSAneesh Kumar K.V len = (len + PAGE_SIZE - 1) & PAGE_MASK; 1501c6018b4bSAneesh Kumar K.V end = start + len; 1502c6018b4bSAneesh Kumar K.V 1503c6018b4bSAneesh Kumar K.V if (end < start) 1504c6018b4bSAneesh Kumar K.V return -EINVAL; 1505c6018b4bSAneesh Kumar K.V if (end == start) 1506c6018b4bSAneesh Kumar K.V return 0; 1507c6018b4bSAneesh Kumar K.V mmap_write_lock(mm); 1508c6018b4bSAneesh Kumar K.V vma = find_vma(mm, start); 1509c6018b4bSAneesh Kumar K.V for (; vma && vma->vm_start < end; vma = vma->vm_next) { 1510c6018b4bSAneesh Kumar K.V 1511c6018b4bSAneesh Kumar K.V vmstart = max(start, vma->vm_start); 1512c6018b4bSAneesh Kumar K.V vmend = min(end, vma->vm_end); 1513c6018b4bSAneesh Kumar K.V new = mpol_dup(vma_policy(vma)); 1514c6018b4bSAneesh Kumar K.V if (IS_ERR(new)) { 1515c6018b4bSAneesh Kumar K.V err = PTR_ERR(new); 1516c6018b4bSAneesh Kumar K.V break; 1517c6018b4bSAneesh Kumar K.V } 1518c6018b4bSAneesh Kumar K.V /* 1519c6018b4bSAneesh Kumar K.V * Only update home node if there is an existing vma policy 1520c6018b4bSAneesh Kumar K.V */ 1521c6018b4bSAneesh Kumar K.V if (!new) 1522c6018b4bSAneesh Kumar K.V continue; 1523c6018b4bSAneesh Kumar K.V 1524c6018b4bSAneesh Kumar K.V /* 1525c6018b4bSAneesh Kumar K.V * If any vma in the range got policy other than MPOL_BIND 1526c6018b4bSAneesh Kumar K.V * or MPOL_PREFERRED_MANY we return error. We don't reset 1527c6018b4bSAneesh Kumar K.V * the home node for vmas we already updated before. 1528c6018b4bSAneesh Kumar K.V */ 1529c6018b4bSAneesh Kumar K.V if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) { 1530c6018b4bSAneesh Kumar K.V err = -EOPNOTSUPP; 1531c6018b4bSAneesh Kumar K.V break; 1532c6018b4bSAneesh Kumar K.V } 1533c6018b4bSAneesh Kumar K.V 1534c6018b4bSAneesh Kumar K.V new->home_node = home_node; 1535c6018b4bSAneesh Kumar K.V err = mbind_range(mm, vmstart, vmend, new); 1536c6018b4bSAneesh Kumar K.V mpol_put(new); 1537c6018b4bSAneesh Kumar K.V if (err) 1538c6018b4bSAneesh Kumar K.V break; 1539c6018b4bSAneesh Kumar K.V } 1540c6018b4bSAneesh Kumar K.V mmap_write_unlock(mm); 1541c6018b4bSAneesh Kumar K.V return err; 1542c6018b4bSAneesh Kumar K.V } 1543c6018b4bSAneesh Kumar K.V 1544e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1545e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1546e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1547e7dc9ad6SDominik Brodowski { 1548e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1549e7dc9ad6SDominik Brodowski } 1550e7dc9ad6SDominik Brodowski 15518bccd85fSChristoph Lameter /* Set the process memory policy */ 1552af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1553af03c4acSDominik Brodowski unsigned long maxnode) 15548bccd85fSChristoph Lameter { 155595837924SFeng Tang unsigned short mode_flags; 15568bccd85fSChristoph Lameter nodemask_t nodes; 155795837924SFeng Tang int lmode = mode; 155895837924SFeng Tang int err; 15598bccd85fSChristoph Lameter 156095837924SFeng Tang err = sanitize_mpol_flags(&lmode, &mode_flags); 156195837924SFeng Tang if (err) 156295837924SFeng Tang return err; 156395837924SFeng Tang 15648bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 15658bccd85fSChristoph Lameter if (err) 15668bccd85fSChristoph Lameter return err; 156795837924SFeng Tang 156895837924SFeng Tang return do_set_mempolicy(lmode, mode_flags, &nodes); 15698bccd85fSChristoph Lameter } 15708bccd85fSChristoph Lameter 1571af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1572af03c4acSDominik Brodowski unsigned long, maxnode) 1573af03c4acSDominik Brodowski { 1574af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1575af03c4acSDominik Brodowski } 1576af03c4acSDominik Brodowski 1577b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1578b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1579b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 158039743889SChristoph Lameter { 1581596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 158239743889SChristoph Lameter struct task_struct *task; 158339743889SChristoph Lameter nodemask_t task_nodes; 158439743889SChristoph Lameter int err; 1585596d7cfaSKOSAKI Motohiro nodemask_t *old; 1586596d7cfaSKOSAKI Motohiro nodemask_t *new; 1587596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 158839743889SChristoph Lameter 1589596d7cfaSKOSAKI Motohiro if (!scratch) 1590596d7cfaSKOSAKI Motohiro return -ENOMEM; 159139743889SChristoph Lameter 1592596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1593596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1594596d7cfaSKOSAKI Motohiro 1595596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 159639743889SChristoph Lameter if (err) 1597596d7cfaSKOSAKI Motohiro goto out; 1598596d7cfaSKOSAKI Motohiro 1599596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1600596d7cfaSKOSAKI Motohiro if (err) 1601596d7cfaSKOSAKI Motohiro goto out; 160239743889SChristoph Lameter 160339743889SChristoph Lameter /* Find the mm_struct */ 160455cfaa3cSZeng Zhaoming rcu_read_lock(); 1605228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 160639743889SChristoph Lameter if (!task) { 160755cfaa3cSZeng Zhaoming rcu_read_unlock(); 1608596d7cfaSKOSAKI Motohiro err = -ESRCH; 1609596d7cfaSKOSAKI Motohiro goto out; 161039743889SChristoph Lameter } 16113268c63eSChristoph Lameter get_task_struct(task); 161239743889SChristoph Lameter 1613596d7cfaSKOSAKI Motohiro err = -EINVAL; 161439743889SChristoph Lameter 161539743889SChristoph Lameter /* 161631367466SOtto Ebeling * Check if this process has the right to modify the specified process. 161731367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 161839743889SChristoph Lameter */ 161931367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1620c69e8d9cSDavid Howells rcu_read_unlock(); 162139743889SChristoph Lameter err = -EPERM; 16223268c63eSChristoph Lameter goto out_put; 162339743889SChristoph Lameter } 1624c69e8d9cSDavid Howells rcu_read_unlock(); 162539743889SChristoph Lameter 162639743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 162739743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1628596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 162939743889SChristoph Lameter err = -EPERM; 16303268c63eSChristoph Lameter goto out_put; 163139743889SChristoph Lameter } 163239743889SChristoph Lameter 16330486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 16340486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 16350486a38bSYisheng Xie if (nodes_empty(*new)) 16363268c63eSChristoph Lameter goto out_put; 16370486a38bSYisheng Xie 163886c3a764SDavid Quigley err = security_task_movememory(task); 163986c3a764SDavid Quigley if (err) 16403268c63eSChristoph Lameter goto out_put; 164186c3a764SDavid Quigley 16423268c63eSChristoph Lameter mm = get_task_mm(task); 16433268c63eSChristoph Lameter put_task_struct(task); 1644f2a9ef88SSasha Levin 1645f2a9ef88SSasha Levin if (!mm) { 1646f2a9ef88SSasha Levin err = -EINVAL; 1647f2a9ef88SSasha Levin goto out; 1648f2a9ef88SSasha Levin } 1649f2a9ef88SSasha Levin 1650596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 165174c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 16523268c63eSChristoph Lameter 165339743889SChristoph Lameter mmput(mm); 16543268c63eSChristoph Lameter out: 1655596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1656596d7cfaSKOSAKI Motohiro 165739743889SChristoph Lameter return err; 16583268c63eSChristoph Lameter 16593268c63eSChristoph Lameter out_put: 16603268c63eSChristoph Lameter put_task_struct(task); 16613268c63eSChristoph Lameter goto out; 16623268c63eSChristoph Lameter 166339743889SChristoph Lameter } 166439743889SChristoph Lameter 1665b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1666b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1667b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1668b6e9b0baSDominik Brodowski { 1669b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1670b6e9b0baSDominik Brodowski } 1671b6e9b0baSDominik Brodowski 167239743889SChristoph Lameter 16738bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1674af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1675af03c4acSDominik Brodowski unsigned long __user *nmask, 1676af03c4acSDominik Brodowski unsigned long maxnode, 1677af03c4acSDominik Brodowski unsigned long addr, 1678af03c4acSDominik Brodowski unsigned long flags) 16798bccd85fSChristoph Lameter { 1680dbcb0f19SAdrian Bunk int err; 16813f649ab7SKees Cook int pval; 16828bccd85fSChristoph Lameter nodemask_t nodes; 16838bccd85fSChristoph Lameter 1684050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 16858bccd85fSChristoph Lameter return -EINVAL; 16868bccd85fSChristoph Lameter 16874605f057SWenchao Hao addr = untagged_addr(addr); 16884605f057SWenchao Hao 16898bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 16908bccd85fSChristoph Lameter 16918bccd85fSChristoph Lameter if (err) 16928bccd85fSChristoph Lameter return err; 16938bccd85fSChristoph Lameter 16948bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 16958bccd85fSChristoph Lameter return -EFAULT; 16968bccd85fSChristoph Lameter 16978bccd85fSChristoph Lameter if (nmask) 16988bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 16998bccd85fSChristoph Lameter 17008bccd85fSChristoph Lameter return err; 17018bccd85fSChristoph Lameter } 17028bccd85fSChristoph Lameter 1703af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1704af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1705af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1706af03c4acSDominik Brodowski { 1707af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1708af03c4acSDominik Brodowski } 1709af03c4acSDominik Brodowski 171020ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 171120ca87f2SLi Xinhai { 171220ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 171320ca87f2SLi Xinhai return false; 171420ca87f2SLi Xinhai 171520ca87f2SLi Xinhai /* 171620ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 171720ca87f2SLi Xinhai * incurring periodic faults. 171820ca87f2SLi Xinhai */ 171920ca87f2SLi Xinhai if (vma_is_dax(vma)) 172020ca87f2SLi Xinhai return false; 172120ca87f2SLi Xinhai 172220ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 172320ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 172420ca87f2SLi Xinhai return false; 172520ca87f2SLi Xinhai 172620ca87f2SLi Xinhai /* 172720ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 172820ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 172920ca87f2SLi Xinhai * possible. 173020ca87f2SLi Xinhai */ 173120ca87f2SLi Xinhai if (vma->vm_file && 173220ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 173320ca87f2SLi Xinhai < policy_zone) 173420ca87f2SLi Xinhai return false; 173520ca87f2SLi Xinhai return true; 173620ca87f2SLi Xinhai } 173720ca87f2SLi Xinhai 173874d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 173974d2c3a0SOleg Nesterov unsigned long addr) 17401da177e4SLinus Torvalds { 17418d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17421da177e4SLinus Torvalds 17431da177e4SLinus Torvalds if (vma) { 1744480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17458d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 174600442ad0SMel Gorman } else if (vma->vm_policy) { 17471da177e4SLinus Torvalds pol = vma->vm_policy; 174800442ad0SMel Gorman 174900442ad0SMel Gorman /* 175000442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 175100442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 175200442ad0SMel Gorman * count on these policies which will be dropped by 175300442ad0SMel Gorman * mpol_cond_put() later 175400442ad0SMel Gorman */ 175500442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 175600442ad0SMel Gorman mpol_get(pol); 175700442ad0SMel Gorman } 17581da177e4SLinus Torvalds } 1759f15ca78eSOleg Nesterov 176074d2c3a0SOleg Nesterov return pol; 176174d2c3a0SOleg Nesterov } 176274d2c3a0SOleg Nesterov 176374d2c3a0SOleg Nesterov /* 1764dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 176574d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 176674d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 176774d2c3a0SOleg Nesterov * 176874d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1769dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 177074d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 177174d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 177274d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 177374d2c3a0SOleg Nesterov * extra reference for shared policies. 177474d2c3a0SOleg Nesterov */ 1775ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1776dd6eecb9SOleg Nesterov unsigned long addr) 177774d2c3a0SOleg Nesterov { 177874d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 177974d2c3a0SOleg Nesterov 17808d90274bSOleg Nesterov if (!pol) 1781dd6eecb9SOleg Nesterov pol = get_task_policy(current); 17828d90274bSOleg Nesterov 17831da177e4SLinus Torvalds return pol; 17841da177e4SLinus Torvalds } 17851da177e4SLinus Torvalds 17866b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1787fc314724SMel Gorman { 17886b6482bbSOleg Nesterov struct mempolicy *pol; 1789f15ca78eSOleg Nesterov 1790fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1791fc314724SMel Gorman bool ret = false; 1792fc314724SMel Gorman 1793fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1794fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1795fc314724SMel Gorman ret = true; 1796fc314724SMel Gorman mpol_cond_put(pol); 1797fc314724SMel Gorman 1798fc314724SMel Gorman return ret; 17998d90274bSOleg Nesterov } 18008d90274bSOleg Nesterov 1801fc314724SMel Gorman pol = vma->vm_policy; 18028d90274bSOleg Nesterov if (!pol) 18036b6482bbSOleg Nesterov pol = get_task_policy(current); 1804fc314724SMel Gorman 1805fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1806fc314724SMel Gorman } 1807fc314724SMel Gorman 1808d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1809d3eb1570SLai Jiangshan { 1810d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1811d3eb1570SLai Jiangshan 1812d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1813d3eb1570SLai Jiangshan 1814d3eb1570SLai Jiangshan /* 1815269fbe72SBen Widawsky * if policy->nodes has movable memory only, 1816d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1817d3eb1570SLai Jiangshan * 1818269fbe72SBen Widawsky * policy->nodes is intersect with node_states[N_MEMORY]. 1819f0953a1bSIngo Molnar * so if the following test fails, it implies 1820269fbe72SBen Widawsky * policy->nodes has movable memory only. 1821d3eb1570SLai Jiangshan */ 1822269fbe72SBen Widawsky if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1823d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1824d3eb1570SLai Jiangshan 1825d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1826d3eb1570SLai Jiangshan } 1827d3eb1570SLai Jiangshan 182852cd3b07SLee Schermerhorn /* 182952cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 183052cd3b07SLee Schermerhorn * page allocation 183152cd3b07SLee Schermerhorn */ 18328ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 183319770b32SMel Gorman { 1834b27abaccSDave Hansen int mode = policy->mode; 1835b27abaccSDave Hansen 183619770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1837b27abaccSDave Hansen if (unlikely(mode == MPOL_BIND) && 1838d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 1839269fbe72SBen Widawsky cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1840269fbe72SBen Widawsky return &policy->nodes; 184119770b32SMel Gorman 1842b27abaccSDave Hansen if (mode == MPOL_PREFERRED_MANY) 1843b27abaccSDave Hansen return &policy->nodes; 1844b27abaccSDave Hansen 184519770b32SMel Gorman return NULL; 184619770b32SMel Gorman } 184719770b32SMel Gorman 1848b27abaccSDave Hansen /* 1849b27abaccSDave Hansen * Return the preferred node id for 'prefer' mempolicy, and return 1850b27abaccSDave Hansen * the given id for all other policies. 1851b27abaccSDave Hansen * 1852b27abaccSDave Hansen * policy_node() is always coupled with policy_nodemask(), which 1853b27abaccSDave Hansen * secures the nodemask limit for 'bind' and 'prefer-many' policy. 1854b27abaccSDave Hansen */ 1855f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 18561da177e4SLinus Torvalds { 18577858d7bcSFeng Tang if (policy->mode == MPOL_PREFERRED) { 1858269fbe72SBen Widawsky nd = first_node(policy->nodes); 18597858d7bcSFeng Tang } else { 186019770b32SMel Gorman /* 18616d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18626d840958SMichal Hocko * because we might easily break the expectation to stay on the 18636d840958SMichal Hocko * requested node and not break the policy. 186419770b32SMel Gorman */ 18656d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 18661da177e4SLinus Torvalds } 18676d840958SMichal Hocko 1868c6018b4bSAneesh Kumar K.V if ((policy->mode == MPOL_BIND || 1869c6018b4bSAneesh Kumar K.V policy->mode == MPOL_PREFERRED_MANY) && 1870c6018b4bSAneesh Kumar K.V policy->home_node != NUMA_NO_NODE) 1871c6018b4bSAneesh Kumar K.V return policy->home_node; 1872c6018b4bSAneesh Kumar K.V 187304ec6264SVlastimil Babka return nd; 18741da177e4SLinus Torvalds } 18751da177e4SLinus Torvalds 18761da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 18771da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 18781da177e4SLinus Torvalds { 187945816682SVlastimil Babka unsigned next; 18801da177e4SLinus Torvalds struct task_struct *me = current; 18811da177e4SLinus Torvalds 1882269fbe72SBen Widawsky next = next_node_in(me->il_prev, policy->nodes); 1883f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 188445816682SVlastimil Babka me->il_prev = next; 188545816682SVlastimil Babka return next; 18861da177e4SLinus Torvalds } 18871da177e4SLinus Torvalds 1888dc85da15SChristoph Lameter /* 1889dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1890dc85da15SChristoph Lameter * next slab entry. 1891dc85da15SChristoph Lameter */ 18922a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1893dc85da15SChristoph Lameter { 1894e7b691b0SAndi Kleen struct mempolicy *policy; 18952a389610SDavid Rientjes int node = numa_mem_id(); 1896e7b691b0SAndi Kleen 189738b031ddSVasily Averin if (!in_task()) 18982a389610SDavid Rientjes return node; 1899e7b691b0SAndi Kleen 1900e7b691b0SAndi Kleen policy = current->mempolicy; 19017858d7bcSFeng Tang if (!policy) 19022a389610SDavid Rientjes return node; 1903765c4507SChristoph Lameter 1904bea904d5SLee Schermerhorn switch (policy->mode) { 1905bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1906269fbe72SBen Widawsky return first_node(policy->nodes); 1907bea904d5SLee Schermerhorn 1908dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1909dc85da15SChristoph Lameter return interleave_nodes(policy); 1910dc85da15SChristoph Lameter 1911b27abaccSDave Hansen case MPOL_BIND: 1912b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 1913b27abaccSDave Hansen { 1914c33d6c06SMel Gorman struct zoneref *z; 1915c33d6c06SMel Gorman 1916dc85da15SChristoph Lameter /* 1917dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1918dc85da15SChristoph Lameter * first node. 1919dc85da15SChristoph Lameter */ 192019770b32SMel Gorman struct zonelist *zonelist; 192119770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1922c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1923c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1924269fbe72SBen Widawsky &policy->nodes); 1925c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1926dd1a239fSMel Gorman } 19277858d7bcSFeng Tang case MPOL_LOCAL: 19287858d7bcSFeng Tang return node; 1929dc85da15SChristoph Lameter 1930dc85da15SChristoph Lameter default: 1931bea904d5SLee Schermerhorn BUG(); 1932dc85da15SChristoph Lameter } 1933dc85da15SChristoph Lameter } 1934dc85da15SChristoph Lameter 1935fee83b3aSAndrew Morton /* 1936fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1937269fbe72SBen Widawsky * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1938fee83b3aSAndrew Morton * number of present nodes. 1939fee83b3aSAndrew Morton */ 194098c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19411da177e4SLinus Torvalds { 1942276aeee1Syanghui nodemask_t nodemask = pol->nodes; 1943276aeee1Syanghui unsigned int target, nnodes; 1944fee83b3aSAndrew Morton int i; 1945fee83b3aSAndrew Morton int nid; 1946276aeee1Syanghui /* 1947276aeee1Syanghui * The barrier will stabilize the nodemask in a register or on 1948276aeee1Syanghui * the stack so that it will stop changing under the code. 1949276aeee1Syanghui * 1950276aeee1Syanghui * Between first_node() and next_node(), pol->nodes could be changed 1951276aeee1Syanghui * by other threads. So we put pol->nodes in a local stack. 1952276aeee1Syanghui */ 1953276aeee1Syanghui barrier(); 19541da177e4SLinus Torvalds 1955276aeee1Syanghui nnodes = nodes_weight(nodemask); 1956f5b087b5SDavid Rientjes if (!nnodes) 1957f5b087b5SDavid Rientjes return numa_node_id(); 1958fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1959276aeee1Syanghui nid = first_node(nodemask); 1960fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1961276aeee1Syanghui nid = next_node(nid, nodemask); 19621da177e4SLinus Torvalds return nid; 19631da177e4SLinus Torvalds } 19641da177e4SLinus Torvalds 19655da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19665da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19675da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19685da7ca86SChristoph Lameter { 19695da7ca86SChristoph Lameter if (vma) { 19705da7ca86SChristoph Lameter unsigned long off; 19715da7ca86SChristoph Lameter 19723b98b087SNishanth Aravamudan /* 19733b98b087SNishanth Aravamudan * for small pages, there is no difference between 19743b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 19753b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 19763b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 19773b98b087SNishanth Aravamudan * a useful offset. 19783b98b087SNishanth Aravamudan */ 19793b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 19803b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 19815da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 198298c70baaSLaurent Dufour return offset_il_node(pol, off); 19835da7ca86SChristoph Lameter } else 19845da7ca86SChristoph Lameter return interleave_nodes(pol); 19855da7ca86SChristoph Lameter } 19865da7ca86SChristoph Lameter 198700ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1988480eccf9SLee Schermerhorn /* 198904ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 1990b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1991b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1992b46e14acSFabian Frederick * @gfp_flags: for requested zone 1993b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1994b27abaccSDave Hansen * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 1995480eccf9SLee Schermerhorn * 199604ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 199752cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 1998b27abaccSDave Hansen * If the effective policy is 'bind' or 'prefer-many', returns a pointer 1999b27abaccSDave Hansen * to the mempolicy's @nodemask for filtering the zonelist. 2000c0ff7453SMiao Xie * 2001d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2002480eccf9SLee Schermerhorn */ 200304ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 200404ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20055da7ca86SChristoph Lameter { 200604ec6264SVlastimil Babka int nid; 2007b27abaccSDave Hansen int mode; 20085da7ca86SChristoph Lameter 2009dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 2010b27abaccSDave Hansen *nodemask = NULL; 2011b27abaccSDave Hansen mode = (*mpol)->mode; 20125da7ca86SChristoph Lameter 2013b27abaccSDave Hansen if (unlikely(mode == MPOL_INTERLEAVE)) { 201404ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 201504ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 201652cd3b07SLee Schermerhorn } else { 201704ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 2018b27abaccSDave Hansen if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) 2019269fbe72SBen Widawsky *nodemask = &(*mpol)->nodes; 2020480eccf9SLee Schermerhorn } 202104ec6264SVlastimil Babka return nid; 20225da7ca86SChristoph Lameter } 202306808b08SLee Schermerhorn 202406808b08SLee Schermerhorn /* 202506808b08SLee Schermerhorn * init_nodemask_of_mempolicy 202606808b08SLee Schermerhorn * 202706808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 202806808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 202906808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 203006808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 203106808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 203206808b08SLee Schermerhorn * of non-default mempolicy. 203306808b08SLee Schermerhorn * 203406808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 203506808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 203606808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 203706808b08SLee Schermerhorn * 203806808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 203906808b08SLee Schermerhorn */ 204006808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 204106808b08SLee Schermerhorn { 204206808b08SLee Schermerhorn struct mempolicy *mempolicy; 204306808b08SLee Schermerhorn 204406808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 204506808b08SLee Schermerhorn return false; 204606808b08SLee Schermerhorn 2047c0ff7453SMiao Xie task_lock(current); 204806808b08SLee Schermerhorn mempolicy = current->mempolicy; 204906808b08SLee Schermerhorn switch (mempolicy->mode) { 205006808b08SLee Schermerhorn case MPOL_PREFERRED: 2051b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 205206808b08SLee Schermerhorn case MPOL_BIND: 205306808b08SLee Schermerhorn case MPOL_INTERLEAVE: 2054269fbe72SBen Widawsky *mask = mempolicy->nodes; 205506808b08SLee Schermerhorn break; 205606808b08SLee Schermerhorn 20577858d7bcSFeng Tang case MPOL_LOCAL: 2058269fbe72SBen Widawsky init_nodemask_of_node(mask, numa_node_id()); 20597858d7bcSFeng Tang break; 20607858d7bcSFeng Tang 206106808b08SLee Schermerhorn default: 206206808b08SLee Schermerhorn BUG(); 206306808b08SLee Schermerhorn } 2064c0ff7453SMiao Xie task_unlock(current); 206506808b08SLee Schermerhorn 206606808b08SLee Schermerhorn return true; 206706808b08SLee Schermerhorn } 206800ac59adSChen, Kenneth W #endif 20695da7ca86SChristoph Lameter 20706f48d0ebSDavid Rientjes /* 2071b26e517aSFeng Tang * mempolicy_in_oom_domain 20726f48d0ebSDavid Rientjes * 2073b26e517aSFeng Tang * If tsk's mempolicy is "bind", check for intersection between mask and 2074b26e517aSFeng Tang * the policy nodemask. Otherwise, return true for all other policies 2075b26e517aSFeng Tang * including "interleave", as a tsk with "interleave" policy may have 2076b26e517aSFeng Tang * memory allocated from all nodes in system. 20776f48d0ebSDavid Rientjes * 20786f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 20796f48d0ebSDavid Rientjes */ 2080b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk, 20816f48d0ebSDavid Rientjes const nodemask_t *mask) 20826f48d0ebSDavid Rientjes { 20836f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 20846f48d0ebSDavid Rientjes bool ret = true; 20856f48d0ebSDavid Rientjes 20866f48d0ebSDavid Rientjes if (!mask) 20876f48d0ebSDavid Rientjes return ret; 2088b26e517aSFeng Tang 20896f48d0ebSDavid Rientjes task_lock(tsk); 20906f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 2091b26e517aSFeng Tang if (mempolicy && mempolicy->mode == MPOL_BIND) 2092269fbe72SBen Widawsky ret = nodes_intersects(mempolicy->nodes, *mask); 20936f48d0ebSDavid Rientjes task_unlock(tsk); 2094b26e517aSFeng Tang 20956f48d0ebSDavid Rientjes return ret; 20966f48d0ebSDavid Rientjes } 20976f48d0ebSDavid Rientjes 20981da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 20991da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2100662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2101662f3a0bSAndi Kleen unsigned nid) 21021da177e4SLinus Torvalds { 21031da177e4SLinus Torvalds struct page *page; 21041da177e4SLinus Torvalds 210584172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, nid, NULL); 21064518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21074518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21084518085eSKemi Wang return page; 2109de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2110de55c8b2SAndrey Ryabinin preempt_disable(); 2111f19298b9SMel Gorman __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2112de55c8b2SAndrey Ryabinin preempt_enable(); 2113de55c8b2SAndrey Ryabinin } 21141da177e4SLinus Torvalds return page; 21151da177e4SLinus Torvalds } 21161da177e4SLinus Torvalds 21174c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 21184c54d949SFeng Tang int nid, struct mempolicy *pol) 21194c54d949SFeng Tang { 21204c54d949SFeng Tang struct page *page; 21214c54d949SFeng Tang gfp_t preferred_gfp; 21224c54d949SFeng Tang 21234c54d949SFeng Tang /* 21244c54d949SFeng Tang * This is a two pass approach. The first pass will only try the 21254c54d949SFeng Tang * preferred nodes but skip the direct reclaim and allow the 21264c54d949SFeng Tang * allocation to fail, while the second pass will try all the 21274c54d949SFeng Tang * nodes in system. 21284c54d949SFeng Tang */ 21294c54d949SFeng Tang preferred_gfp = gfp | __GFP_NOWARN; 21304c54d949SFeng Tang preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 21314c54d949SFeng Tang page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); 21324c54d949SFeng Tang if (!page) 2133c0455116SAneesh Kumar K.V page = __alloc_pages(gfp, order, nid, NULL); 21344c54d949SFeng Tang 21354c54d949SFeng Tang return page; 21364c54d949SFeng Tang } 21374c54d949SFeng Tang 21381da177e4SLinus Torvalds /** 2139adf88aa8SMatthew Wilcox (Oracle) * vma_alloc_folio - Allocate a folio for a VMA. 2140eb350739SMatthew Wilcox (Oracle) * @gfp: GFP flags. 2141adf88aa8SMatthew Wilcox (Oracle) * @order: Order of the folio. 21421da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 2143eb350739SMatthew Wilcox (Oracle) * @addr: Virtual address of the allocation. Must be inside @vma. 2144eb350739SMatthew Wilcox (Oracle) * @hugepage: For hugepages try only the preferred node if possible. 21451da177e4SLinus Torvalds * 2146adf88aa8SMatthew Wilcox (Oracle) * Allocate a folio for a specific address in @vma, using the appropriate 2147eb350739SMatthew Wilcox (Oracle) * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2148eb350739SMatthew Wilcox (Oracle) * of the mm_struct of the VMA to prevent it from going away. Should be 2149adf88aa8SMatthew Wilcox (Oracle) * used for all allocations for folios that will be mapped into user space. 2150eb350739SMatthew Wilcox (Oracle) * 2151adf88aa8SMatthew Wilcox (Oracle) * Return: The folio on success or NULL if allocation fails. 21521da177e4SLinus Torvalds */ 2153adf88aa8SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, 2154be1a13ebSMichal Hocko unsigned long addr, bool hugepage) 21551da177e4SLinus Torvalds { 2156cc9a6c87SMel Gorman struct mempolicy *pol; 2157be1a13ebSMichal Hocko int node = numa_node_id(); 2158adf88aa8SMatthew Wilcox (Oracle) struct folio *folio; 215904ec6264SVlastimil Babka int preferred_nid; 2160be97a41bSVlastimil Babka nodemask_t *nmask; 21611da177e4SLinus Torvalds 2162dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2163cc9a6c87SMel Gorman 2164be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 2165adf88aa8SMatthew Wilcox (Oracle) struct page *page; 21661da177e4SLinus Torvalds unsigned nid; 21675da7ca86SChristoph Lameter 21688eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 216952cd3b07SLee Schermerhorn mpol_cond_put(pol); 2170adf88aa8SMatthew Wilcox (Oracle) gfp |= __GFP_COMP; 21710bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2172adf88aa8SMatthew Wilcox (Oracle) if (page && order > 1) 2173adf88aa8SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2174adf88aa8SMatthew Wilcox (Oracle) folio = (struct folio *)page; 2175be97a41bSVlastimil Babka goto out; 21761da177e4SLinus Torvalds } 21771da177e4SLinus Torvalds 21784c54d949SFeng Tang if (pol->mode == MPOL_PREFERRED_MANY) { 2179adf88aa8SMatthew Wilcox (Oracle) struct page *page; 2180adf88aa8SMatthew Wilcox (Oracle) 2181c0455116SAneesh Kumar K.V node = policy_node(gfp, pol, node); 2182adf88aa8SMatthew Wilcox (Oracle) gfp |= __GFP_COMP; 21834c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, node, pol); 21844c54d949SFeng Tang mpol_cond_put(pol); 2185adf88aa8SMatthew Wilcox (Oracle) if (page && order > 1) 2186adf88aa8SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2187adf88aa8SMatthew Wilcox (Oracle) folio = (struct folio *)page; 21884c54d949SFeng Tang goto out; 21894c54d949SFeng Tang } 21904c54d949SFeng Tang 219119deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 219219deb769SDavid Rientjes int hpage_node = node; 219319deb769SDavid Rientjes 219419deb769SDavid Rientjes /* 219519deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 219619deb769SDavid Rientjes * allows the current node (or other explicitly preferred 219719deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 219819deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 219919deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 220019deb769SDavid Rientjes * 2201b27abaccSDave Hansen * If the policy is interleave or does not allow the current 220219deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 220319deb769SDavid Rientjes */ 22047858d7bcSFeng Tang if (pol->mode == MPOL_PREFERRED) 2205269fbe72SBen Widawsky hpage_node = first_node(pol->nodes); 220619deb769SDavid Rientjes 220719deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 220819deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 220919deb769SDavid Rientjes mpol_cond_put(pol); 2210cc638f32SVlastimil Babka /* 2211cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2212cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2213cc638f32SVlastimil Babka */ 2214adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc_node(gfp | __GFP_THISNODE | 2215adf88aa8SMatthew Wilcox (Oracle) __GFP_NORETRY, order, hpage_node); 221676e654ccSDavid Rientjes 221776e654ccSDavid Rientjes /* 221876e654ccSDavid Rientjes * If hugepage allocations are configured to always 221976e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 222076e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2221cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 222276e654ccSDavid Rientjes */ 2223adf88aa8SMatthew Wilcox (Oracle) if (!folio && (gfp & __GFP_DIRECT_RECLAIM)) 2224adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, order, hpage_node, 2225adf88aa8SMatthew Wilcox (Oracle) nmask); 222676e654ccSDavid Rientjes 222719deb769SDavid Rientjes goto out; 222819deb769SDavid Rientjes } 222919deb769SDavid Rientjes } 223019deb769SDavid Rientjes 2231077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 223204ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 2233adf88aa8SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, order, preferred_nid, nmask); 2234d51e9894SVlastimil Babka mpol_cond_put(pol); 2235be97a41bSVlastimil Babka out: 2236f584b680SMatthew Wilcox (Oracle) return folio; 2237f584b680SMatthew Wilcox (Oracle) } 2238adf88aa8SMatthew Wilcox (Oracle) EXPORT_SYMBOL(vma_alloc_folio); 2239f584b680SMatthew Wilcox (Oracle) 22401da177e4SLinus Torvalds /** 2241d7f946d0SMatthew Wilcox (Oracle) * alloc_pages - Allocate pages. 22426421ec76SMatthew Wilcox (Oracle) * @gfp: GFP flags. 22436421ec76SMatthew Wilcox (Oracle) * @order: Power of two of number of pages to allocate. 22441da177e4SLinus Torvalds * 22456421ec76SMatthew Wilcox (Oracle) * Allocate 1 << @order contiguous pages. The physical address of the 22466421ec76SMatthew Wilcox (Oracle) * first page is naturally aligned (eg an order-3 allocation will be aligned 22476421ec76SMatthew Wilcox (Oracle) * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 22486421ec76SMatthew Wilcox (Oracle) * process is honoured when in process context. 22491da177e4SLinus Torvalds * 22506421ec76SMatthew Wilcox (Oracle) * Context: Can be called from any context, providing the appropriate GFP 22516421ec76SMatthew Wilcox (Oracle) * flags are used. 22526421ec76SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 22531da177e4SLinus Torvalds */ 2254d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order) 22551da177e4SLinus Torvalds { 22568d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2257c0ff7453SMiao Xie struct page *page; 22581da177e4SLinus Torvalds 22598d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22608d90274bSOleg Nesterov pol = get_task_policy(current); 226152cd3b07SLee Schermerhorn 226252cd3b07SLee Schermerhorn /* 226352cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 226452cd3b07SLee Schermerhorn * nor system default_policy 226552cd3b07SLee Schermerhorn */ 226645c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2267c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 22684c54d949SFeng Tang else if (pol->mode == MPOL_PREFERRED_MANY) 22694c54d949SFeng Tang page = alloc_pages_preferred_many(gfp, order, 2270c0455116SAneesh Kumar K.V policy_node(gfp, pol, numa_node_id()), pol); 2271c0ff7453SMiao Xie else 227284172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, 227304ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 22745c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2275cc9a6c87SMel Gorman 2276c0ff7453SMiao Xie return page; 22771da177e4SLinus Torvalds } 2278d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages); 22791da177e4SLinus Torvalds 2280cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order) 2281cc09cb13SMatthew Wilcox (Oracle) { 2282cc09cb13SMatthew Wilcox (Oracle) struct page *page = alloc_pages(gfp | __GFP_COMP, order); 2283cc09cb13SMatthew Wilcox (Oracle) 2284cc09cb13SMatthew Wilcox (Oracle) if (page && order > 1) 2285cc09cb13SMatthew Wilcox (Oracle) prep_transhuge_page(page); 2286cc09cb13SMatthew Wilcox (Oracle) return (struct folio *)page; 2287cc09cb13SMatthew Wilcox (Oracle) } 2288cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc); 2289cc09cb13SMatthew Wilcox (Oracle) 2290c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, 2291c00b6b96SChen Wandun struct mempolicy *pol, unsigned long nr_pages, 2292c00b6b96SChen Wandun struct page **page_array) 2293c00b6b96SChen Wandun { 2294c00b6b96SChen Wandun int nodes; 2295c00b6b96SChen Wandun unsigned long nr_pages_per_node; 2296c00b6b96SChen Wandun int delta; 2297c00b6b96SChen Wandun int i; 2298c00b6b96SChen Wandun unsigned long nr_allocated; 2299c00b6b96SChen Wandun unsigned long total_allocated = 0; 2300c00b6b96SChen Wandun 2301c00b6b96SChen Wandun nodes = nodes_weight(pol->nodes); 2302c00b6b96SChen Wandun nr_pages_per_node = nr_pages / nodes; 2303c00b6b96SChen Wandun delta = nr_pages - nodes * nr_pages_per_node; 2304c00b6b96SChen Wandun 2305c00b6b96SChen Wandun for (i = 0; i < nodes; i++) { 2306c00b6b96SChen Wandun if (delta) { 2307c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(gfp, 2308c00b6b96SChen Wandun interleave_nodes(pol), NULL, 2309c00b6b96SChen Wandun nr_pages_per_node + 1, NULL, 2310c00b6b96SChen Wandun page_array); 2311c00b6b96SChen Wandun delta--; 2312c00b6b96SChen Wandun } else { 2313c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(gfp, 2314c00b6b96SChen Wandun interleave_nodes(pol), NULL, 2315c00b6b96SChen Wandun nr_pages_per_node, NULL, page_array); 2316c00b6b96SChen Wandun } 2317c00b6b96SChen Wandun 2318c00b6b96SChen Wandun page_array += nr_allocated; 2319c00b6b96SChen Wandun total_allocated += nr_allocated; 2320c00b6b96SChen Wandun } 2321c00b6b96SChen Wandun 2322c00b6b96SChen Wandun return total_allocated; 2323c00b6b96SChen Wandun } 2324c00b6b96SChen Wandun 2325c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, 2326c00b6b96SChen Wandun struct mempolicy *pol, unsigned long nr_pages, 2327c00b6b96SChen Wandun struct page **page_array) 2328c00b6b96SChen Wandun { 2329c00b6b96SChen Wandun gfp_t preferred_gfp; 2330c00b6b96SChen Wandun unsigned long nr_allocated = 0; 2331c00b6b96SChen Wandun 2332c00b6b96SChen Wandun preferred_gfp = gfp | __GFP_NOWARN; 2333c00b6b96SChen Wandun preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2334c00b6b96SChen Wandun 2335c00b6b96SChen Wandun nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, 2336c00b6b96SChen Wandun nr_pages, NULL, page_array); 2337c00b6b96SChen Wandun 2338c00b6b96SChen Wandun if (nr_allocated < nr_pages) 2339c00b6b96SChen Wandun nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, 2340c00b6b96SChen Wandun nr_pages - nr_allocated, NULL, 2341c00b6b96SChen Wandun page_array + nr_allocated); 2342c00b6b96SChen Wandun return nr_allocated; 2343c00b6b96SChen Wandun } 2344c00b6b96SChen Wandun 2345c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the 2346c00b6b96SChen Wandun * same time in some situation such as vmalloc. 2347c00b6b96SChen Wandun * 2348c00b6b96SChen Wandun * It can accelerate memory allocation especially interleaving 2349c00b6b96SChen Wandun * allocate memory. 2350c00b6b96SChen Wandun */ 2351c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, 2352c00b6b96SChen Wandun unsigned long nr_pages, struct page **page_array) 2353c00b6b96SChen Wandun { 2354c00b6b96SChen Wandun struct mempolicy *pol = &default_policy; 2355c00b6b96SChen Wandun 2356c00b6b96SChen Wandun if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2357c00b6b96SChen Wandun pol = get_task_policy(current); 2358c00b6b96SChen Wandun 2359c00b6b96SChen Wandun if (pol->mode == MPOL_INTERLEAVE) 2360c00b6b96SChen Wandun return alloc_pages_bulk_array_interleave(gfp, pol, 2361c00b6b96SChen Wandun nr_pages, page_array); 2362c00b6b96SChen Wandun 2363c00b6b96SChen Wandun if (pol->mode == MPOL_PREFERRED_MANY) 2364c00b6b96SChen Wandun return alloc_pages_bulk_array_preferred_many(gfp, 2365c00b6b96SChen Wandun numa_node_id(), pol, nr_pages, page_array); 2366c00b6b96SChen Wandun 2367c00b6b96SChen Wandun return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()), 2368c00b6b96SChen Wandun policy_nodemask(gfp, pol), nr_pages, NULL, 2369c00b6b96SChen Wandun page_array); 2370c00b6b96SChen Wandun } 2371c00b6b96SChen Wandun 2372ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2373ef0855d3SOleg Nesterov { 2374ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2375ef0855d3SOleg Nesterov 2376ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2377ef0855d3SOleg Nesterov return PTR_ERR(pol); 2378ef0855d3SOleg Nesterov dst->vm_policy = pol; 2379ef0855d3SOleg Nesterov return 0; 2380ef0855d3SOleg Nesterov } 2381ef0855d3SOleg Nesterov 23824225399aSPaul Jackson /* 2383846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 23844225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 23854225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 23864225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 23874225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2388708c1bbcSMiao Xie * 2389708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2390708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 23914225399aSPaul Jackson */ 23924225399aSPaul Jackson 2393846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2394846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 23951da177e4SLinus Torvalds { 23961da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 23971da177e4SLinus Torvalds 23981da177e4SLinus Torvalds if (!new) 23991da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2400708c1bbcSMiao Xie 2401708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2402708c1bbcSMiao Xie if (old == current->mempolicy) { 2403708c1bbcSMiao Xie task_lock(current); 2404708c1bbcSMiao Xie *new = *old; 2405708c1bbcSMiao Xie task_unlock(current); 2406708c1bbcSMiao Xie } else 2407708c1bbcSMiao Xie *new = *old; 2408708c1bbcSMiao Xie 24094225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 24104225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2411213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 24124225399aSPaul Jackson } 24131da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 24141da177e4SLinus Torvalds return new; 24151da177e4SLinus Torvalds } 24161da177e4SLinus Torvalds 24171da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2418fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 24191da177e4SLinus Torvalds { 24201da177e4SLinus Torvalds if (!a || !b) 2421fcfb4dccSKOSAKI Motohiro return false; 242245c4745aSLee Schermerhorn if (a->mode != b->mode) 2423fcfb4dccSKOSAKI Motohiro return false; 242419800502SBob Liu if (a->flags != b->flags) 2425fcfb4dccSKOSAKI Motohiro return false; 2426c6018b4bSAneesh Kumar K.V if (a->home_node != b->home_node) 2427c6018b4bSAneesh Kumar K.V return false; 242819800502SBob Liu if (mpol_store_user_nodemask(a)) 242919800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2430fcfb4dccSKOSAKI Motohiro return false; 243119800502SBob Liu 243245c4745aSLee Schermerhorn switch (a->mode) { 243319770b32SMel Gorman case MPOL_BIND: 24341da177e4SLinus Torvalds case MPOL_INTERLEAVE: 24351da177e4SLinus Torvalds case MPOL_PREFERRED: 2436b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2437269fbe72SBen Widawsky return !!nodes_equal(a->nodes, b->nodes); 24387858d7bcSFeng Tang case MPOL_LOCAL: 24397858d7bcSFeng Tang return true; 24401da177e4SLinus Torvalds default: 24411da177e4SLinus Torvalds BUG(); 2442fcfb4dccSKOSAKI Motohiro return false; 24431da177e4SLinus Torvalds } 24441da177e4SLinus Torvalds } 24451da177e4SLinus Torvalds 24461da177e4SLinus Torvalds /* 24471da177e4SLinus Torvalds * Shared memory backing store policy support. 24481da177e4SLinus Torvalds * 24491da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 24501da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 24514a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 24521da177e4SLinus Torvalds * for any accesses to the tree. 24531da177e4SLinus Torvalds */ 24541da177e4SLinus Torvalds 24554a8c7bb5SNathan Zimmer /* 24564a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 24574a8c7bb5SNathan Zimmer * reading or for writing 24584a8c7bb5SNathan Zimmer */ 24591da177e4SLinus Torvalds static struct sp_node * 24601da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 24611da177e4SLinus Torvalds { 24621da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 24631da177e4SLinus Torvalds 24641da177e4SLinus Torvalds while (n) { 24651da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 24661da177e4SLinus Torvalds 24671da177e4SLinus Torvalds if (start >= p->end) 24681da177e4SLinus Torvalds n = n->rb_right; 24691da177e4SLinus Torvalds else if (end <= p->start) 24701da177e4SLinus Torvalds n = n->rb_left; 24711da177e4SLinus Torvalds else 24721da177e4SLinus Torvalds break; 24731da177e4SLinus Torvalds } 24741da177e4SLinus Torvalds if (!n) 24751da177e4SLinus Torvalds return NULL; 24761da177e4SLinus Torvalds for (;;) { 24771da177e4SLinus Torvalds struct sp_node *w = NULL; 24781da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 24791da177e4SLinus Torvalds if (!prev) 24801da177e4SLinus Torvalds break; 24811da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 24821da177e4SLinus Torvalds if (w->end <= start) 24831da177e4SLinus Torvalds break; 24841da177e4SLinus Torvalds n = prev; 24851da177e4SLinus Torvalds } 24861da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 24871da177e4SLinus Torvalds } 24881da177e4SLinus Torvalds 24894a8c7bb5SNathan Zimmer /* 24904a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 24914a8c7bb5SNathan Zimmer * writing. 24924a8c7bb5SNathan Zimmer */ 24931da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 24941da177e4SLinus Torvalds { 24951da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 24961da177e4SLinus Torvalds struct rb_node *parent = NULL; 24971da177e4SLinus Torvalds struct sp_node *nd; 24981da177e4SLinus Torvalds 24991da177e4SLinus Torvalds while (*p) { 25001da177e4SLinus Torvalds parent = *p; 25011da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 25021da177e4SLinus Torvalds if (new->start < nd->start) 25031da177e4SLinus Torvalds p = &(*p)->rb_left; 25041da177e4SLinus Torvalds else if (new->end > nd->end) 25051da177e4SLinus Torvalds p = &(*p)->rb_right; 25061da177e4SLinus Torvalds else 25071da177e4SLinus Torvalds BUG(); 25081da177e4SLinus Torvalds } 25091da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 25101da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2511140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 251245c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 25131da177e4SLinus Torvalds } 25141da177e4SLinus Torvalds 25151da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 25161da177e4SLinus Torvalds struct mempolicy * 25171da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 25181da177e4SLinus Torvalds { 25191da177e4SLinus Torvalds struct mempolicy *pol = NULL; 25201da177e4SLinus Torvalds struct sp_node *sn; 25211da177e4SLinus Torvalds 25221da177e4SLinus Torvalds if (!sp->root.rb_node) 25231da177e4SLinus Torvalds return NULL; 25244a8c7bb5SNathan Zimmer read_lock(&sp->lock); 25251da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 25261da177e4SLinus Torvalds if (sn) { 25271da177e4SLinus Torvalds mpol_get(sn->policy); 25281da177e4SLinus Torvalds pol = sn->policy; 25291da177e4SLinus Torvalds } 25304a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 25311da177e4SLinus Torvalds return pol; 25321da177e4SLinus Torvalds } 25331da177e4SLinus Torvalds 253463f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 253563f74ca2SKOSAKI Motohiro { 253663f74ca2SKOSAKI Motohiro mpol_put(n->policy); 253763f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 253863f74ca2SKOSAKI Motohiro } 253963f74ca2SKOSAKI Motohiro 2540771fb4d8SLee Schermerhorn /** 2541771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2542771fb4d8SLee Schermerhorn * 2543b46e14acSFabian Frederick * @page: page to be checked 2544b46e14acSFabian Frederick * @vma: vm area where page mapped 2545b46e14acSFabian Frederick * @addr: virtual address where page mapped 2546771fb4d8SLee Schermerhorn * 2547771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 25485f076944SMatthew Wilcox (Oracle) * node id. Policy determination "mimics" alloc_page_vma(). 2549771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 25505f076944SMatthew Wilcox (Oracle) * 2551062db293SBaolin Wang * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2552062db293SBaolin Wang * policy, or a suitable node ID to allocate a replacement page from. 2553771fb4d8SLee Schermerhorn */ 2554771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2555771fb4d8SLee Schermerhorn { 2556771fb4d8SLee Schermerhorn struct mempolicy *pol; 2557c33d6c06SMel Gorman struct zoneref *z; 2558771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2559771fb4d8SLee Schermerhorn unsigned long pgoff; 256090572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 256190572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 256298fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2563062db293SBaolin Wang int ret = NUMA_NO_NODE; 2564771fb4d8SLee Schermerhorn 2565dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2566771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2567771fb4d8SLee Schermerhorn goto out; 2568771fb4d8SLee Schermerhorn 2569771fb4d8SLee Schermerhorn switch (pol->mode) { 2570771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2571771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2572771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 257398c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2574771fb4d8SLee Schermerhorn break; 2575771fb4d8SLee Schermerhorn 2576771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2577b27abaccSDave Hansen if (node_isset(curnid, pol->nodes)) 2578b27abaccSDave Hansen goto out; 2579269fbe72SBen Widawsky polnid = first_node(pol->nodes); 2580771fb4d8SLee Schermerhorn break; 2581771fb4d8SLee Schermerhorn 25827858d7bcSFeng Tang case MPOL_LOCAL: 25837858d7bcSFeng Tang polnid = numa_node_id(); 25847858d7bcSFeng Tang break; 25857858d7bcSFeng Tang 2586771fb4d8SLee Schermerhorn case MPOL_BIND: 2587bda420b9SHuang Ying /* Optimize placement among multiple nodes via NUMA balancing */ 2588bda420b9SHuang Ying if (pol->flags & MPOL_F_MORON) { 2589269fbe72SBen Widawsky if (node_isset(thisnid, pol->nodes)) 2590bda420b9SHuang Ying break; 2591bda420b9SHuang Ying goto out; 2592bda420b9SHuang Ying } 2593b27abaccSDave Hansen fallthrough; 2594c33d6c06SMel Gorman 2595b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 2596771fb4d8SLee Schermerhorn /* 2597771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2598771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2599771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2600771fb4d8SLee Schermerhorn */ 2601269fbe72SBen Widawsky if (node_isset(curnid, pol->nodes)) 2602771fb4d8SLee Schermerhorn goto out; 2603c33d6c06SMel Gorman z = first_zones_zonelist( 2604771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2605771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2606269fbe72SBen Widawsky &pol->nodes); 2607c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2608771fb4d8SLee Schermerhorn break; 2609771fb4d8SLee Schermerhorn 2610771fb4d8SLee Schermerhorn default: 2611771fb4d8SLee Schermerhorn BUG(); 2612771fb4d8SLee Schermerhorn } 26135606e387SMel Gorman 26145606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2615e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 261690572890SPeter Zijlstra polnid = thisnid; 26175606e387SMel Gorman 261810f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2619de1c9ce6SRik van Riel goto out; 2620de1c9ce6SRik van Riel } 2621e42c8ff2SMel Gorman 2622771fb4d8SLee Schermerhorn if (curnid != polnid) 2623771fb4d8SLee Schermerhorn ret = polnid; 2624771fb4d8SLee Schermerhorn out: 2625771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2626771fb4d8SLee Schermerhorn 2627771fb4d8SLee Schermerhorn return ret; 2628771fb4d8SLee Schermerhorn } 2629771fb4d8SLee Schermerhorn 2630c11600e4SDavid Rientjes /* 2631c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2632c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2633c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2634c11600e4SDavid Rientjes * policy. 2635c11600e4SDavid Rientjes */ 2636c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2637c11600e4SDavid Rientjes { 2638c11600e4SDavid Rientjes struct mempolicy *pol; 2639c11600e4SDavid Rientjes 2640c11600e4SDavid Rientjes task_lock(task); 2641c11600e4SDavid Rientjes pol = task->mempolicy; 2642c11600e4SDavid Rientjes task->mempolicy = NULL; 2643c11600e4SDavid Rientjes task_unlock(task); 2644c11600e4SDavid Rientjes mpol_put(pol); 2645c11600e4SDavid Rientjes } 2646c11600e4SDavid Rientjes 26471da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 26481da177e4SLinus Torvalds { 2649140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 26501da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 265163f74ca2SKOSAKI Motohiro sp_free(n); 26521da177e4SLinus Torvalds } 26531da177e4SLinus Torvalds 265442288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 265542288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 265642288fe3SMel Gorman { 265742288fe3SMel Gorman node->start = start; 265842288fe3SMel Gorman node->end = end; 265942288fe3SMel Gorman node->policy = pol; 266042288fe3SMel Gorman } 266142288fe3SMel Gorman 2662dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2663dbcb0f19SAdrian Bunk struct mempolicy *pol) 26641da177e4SLinus Torvalds { 2665869833f2SKOSAKI Motohiro struct sp_node *n; 2666869833f2SKOSAKI Motohiro struct mempolicy *newpol; 26671da177e4SLinus Torvalds 2668869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 26691da177e4SLinus Torvalds if (!n) 26701da177e4SLinus Torvalds return NULL; 2671869833f2SKOSAKI Motohiro 2672869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2673869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2674869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2675869833f2SKOSAKI Motohiro return NULL; 2676869833f2SKOSAKI Motohiro } 2677869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 267842288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2679869833f2SKOSAKI Motohiro 26801da177e4SLinus Torvalds return n; 26811da177e4SLinus Torvalds } 26821da177e4SLinus Torvalds 26831da177e4SLinus Torvalds /* Replace a policy range. */ 26841da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 26851da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 26861da177e4SLinus Torvalds { 2687b22d127aSMel Gorman struct sp_node *n; 268842288fe3SMel Gorman struct sp_node *n_new = NULL; 268942288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2690b22d127aSMel Gorman int ret = 0; 26911da177e4SLinus Torvalds 269242288fe3SMel Gorman restart: 26934a8c7bb5SNathan Zimmer write_lock(&sp->lock); 26941da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 26951da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 26961da177e4SLinus Torvalds while (n && n->start < end) { 26971da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 26981da177e4SLinus Torvalds if (n->start >= start) { 26991da177e4SLinus Torvalds if (n->end <= end) 27001da177e4SLinus Torvalds sp_delete(sp, n); 27011da177e4SLinus Torvalds else 27021da177e4SLinus Torvalds n->start = end; 27031da177e4SLinus Torvalds } else { 27041da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 27051da177e4SLinus Torvalds if (n->end > end) { 270642288fe3SMel Gorman if (!n_new) 270742288fe3SMel Gorman goto alloc_new; 270842288fe3SMel Gorman 270942288fe3SMel Gorman *mpol_new = *n->policy; 271042288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 27117880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 27121da177e4SLinus Torvalds n->end = start; 27135ca39575SHillf Danton sp_insert(sp, n_new); 271442288fe3SMel Gorman n_new = NULL; 271542288fe3SMel Gorman mpol_new = NULL; 27161da177e4SLinus Torvalds break; 27171da177e4SLinus Torvalds } else 27181da177e4SLinus Torvalds n->end = start; 27191da177e4SLinus Torvalds } 27201da177e4SLinus Torvalds if (!next) 27211da177e4SLinus Torvalds break; 27221da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27231da177e4SLinus Torvalds } 27241da177e4SLinus Torvalds if (new) 27251da177e4SLinus Torvalds sp_insert(sp, new); 27264a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 272742288fe3SMel Gorman ret = 0; 272842288fe3SMel Gorman 272942288fe3SMel Gorman err_out: 273042288fe3SMel Gorman if (mpol_new) 273142288fe3SMel Gorman mpol_put(mpol_new); 273242288fe3SMel Gorman if (n_new) 273342288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 273442288fe3SMel Gorman 2735b22d127aSMel Gorman return ret; 273642288fe3SMel Gorman 273742288fe3SMel Gorman alloc_new: 27384a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 273942288fe3SMel Gorman ret = -ENOMEM; 274042288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 274142288fe3SMel Gorman if (!n_new) 274242288fe3SMel Gorman goto err_out; 274342288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 274442288fe3SMel Gorman if (!mpol_new) 274542288fe3SMel Gorman goto err_out; 27464ad09955SMiaohe Lin atomic_set(&mpol_new->refcnt, 1); 274742288fe3SMel Gorman goto restart; 27481da177e4SLinus Torvalds } 27491da177e4SLinus Torvalds 275071fe804bSLee Schermerhorn /** 275171fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 275271fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 275371fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 275471fe804bSLee Schermerhorn * 275571fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 275671fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 275771fe804bSLee Schermerhorn * This must be released on exit. 27584bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 275971fe804bSLee Schermerhorn */ 276071fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 27617339ff83SRobin Holt { 276258568d2aSMiao Xie int ret; 276358568d2aSMiao Xie 276471fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 27654a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 27667339ff83SRobin Holt 276771fe804bSLee Schermerhorn if (mpol) { 27687339ff83SRobin Holt struct vm_area_struct pvma; 276971fe804bSLee Schermerhorn struct mempolicy *new; 27704bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 27717339ff83SRobin Holt 27724bfc4495SKAMEZAWA Hiroyuki if (!scratch) 27735c0c1654SLee Schermerhorn goto put_mpol; 277471fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 277571fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 277615d77835SLee Schermerhorn if (IS_ERR(new)) 27770cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 277858568d2aSMiao Xie 277958568d2aSMiao Xie task_lock(current); 27804bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 278158568d2aSMiao Xie task_unlock(current); 278215d77835SLee Schermerhorn if (ret) 27835c0c1654SLee Schermerhorn goto put_new; 278471fe804bSLee Schermerhorn 278571fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 27862c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 278771fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 278871fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 278915d77835SLee Schermerhorn 27905c0c1654SLee Schermerhorn put_new: 279171fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 27920cae3457SDan Carpenter free_scratch: 27934bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 27945c0c1654SLee Schermerhorn put_mpol: 27955c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 27967339ff83SRobin Holt } 27977339ff83SRobin Holt } 27987339ff83SRobin Holt 27991da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 28001da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 28011da177e4SLinus Torvalds { 28021da177e4SLinus Torvalds int err; 28031da177e4SLinus Torvalds struct sp_node *new = NULL; 28041da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 28051da177e4SLinus Torvalds 2806028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 28071da177e4SLinus Torvalds vma->vm_pgoff, 280845c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2809028fec41SDavid Rientjes npol ? npol->flags : -1, 2810269fbe72SBen Widawsky npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 28111da177e4SLinus Torvalds 28121da177e4SLinus Torvalds if (npol) { 28131da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 28141da177e4SLinus Torvalds if (!new) 28151da177e4SLinus Torvalds return -ENOMEM; 28161da177e4SLinus Torvalds } 28171da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 28181da177e4SLinus Torvalds if (err && new) 281963f74ca2SKOSAKI Motohiro sp_free(new); 28201da177e4SLinus Torvalds return err; 28211da177e4SLinus Torvalds } 28221da177e4SLinus Torvalds 28231da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 28241da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 28251da177e4SLinus Torvalds { 28261da177e4SLinus Torvalds struct sp_node *n; 28271da177e4SLinus Torvalds struct rb_node *next; 28281da177e4SLinus Torvalds 28291da177e4SLinus Torvalds if (!p->root.rb_node) 28301da177e4SLinus Torvalds return; 28314a8c7bb5SNathan Zimmer write_lock(&p->lock); 28321da177e4SLinus Torvalds next = rb_first(&p->root); 28331da177e4SLinus Torvalds while (next) { 28341da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 28351da177e4SLinus Torvalds next = rb_next(&n->nd); 283663f74ca2SKOSAKI Motohiro sp_delete(p, n); 28371da177e4SLinus Torvalds } 28384a8c7bb5SNathan Zimmer write_unlock(&p->lock); 28391da177e4SLinus Torvalds } 28401da177e4SLinus Torvalds 28411a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2842c297663cSMel Gorman static int __initdata numabalancing_override; 28431a687c2eSMel Gorman 28441a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 28451a687c2eSMel Gorman { 28461a687c2eSMel Gorman bool numabalancing_default = false; 28471a687c2eSMel Gorman 28481a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 28491a687c2eSMel Gorman numabalancing_default = true; 28501a687c2eSMel Gorman 2851c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2852c297663cSMel Gorman if (numabalancing_override) 2853c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2854c297663cSMel Gorman 2855b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2856756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2857c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 28581a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 28591a687c2eSMel Gorman } 28601a687c2eSMel Gorman } 28611a687c2eSMel Gorman 28621a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 28631a687c2eSMel Gorman { 28641a687c2eSMel Gorman int ret = 0; 28651a687c2eSMel Gorman if (!str) 28661a687c2eSMel Gorman goto out; 28671a687c2eSMel Gorman 28681a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2869c297663cSMel Gorman numabalancing_override = 1; 28701a687c2eSMel Gorman ret = 1; 28711a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2872c297663cSMel Gorman numabalancing_override = -1; 28731a687c2eSMel Gorman ret = 1; 28741a687c2eSMel Gorman } 28751a687c2eSMel Gorman out: 28761a687c2eSMel Gorman if (!ret) 28774a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 28781a687c2eSMel Gorman 28791a687c2eSMel Gorman return ret; 28801a687c2eSMel Gorman } 28811a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 28821a687c2eSMel Gorman #else 28831a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 28841a687c2eSMel Gorman { 28851a687c2eSMel Gorman } 28861a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 28871a687c2eSMel Gorman 28881da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 28891da177e4SLinus Torvalds void __init numa_policy_init(void) 28901da177e4SLinus Torvalds { 2891b71636e2SPaul Mundt nodemask_t interleave_nodes; 2892b71636e2SPaul Mundt unsigned long largest = 0; 2893b71636e2SPaul Mundt int nid, prefer = 0; 2894b71636e2SPaul Mundt 28951da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 28961da177e4SLinus Torvalds sizeof(struct mempolicy), 289720c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 28981da177e4SLinus Torvalds 28991da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 29001da177e4SLinus Torvalds sizeof(struct sp_node), 290120c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 29021da177e4SLinus Torvalds 29035606e387SMel Gorman for_each_node(nid) { 29045606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 29055606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 29065606e387SMel Gorman .mode = MPOL_PREFERRED, 29075606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 2908269fbe72SBen Widawsky .nodes = nodemask_of_node(nid), 29095606e387SMel Gorman }; 29105606e387SMel Gorman } 29115606e387SMel Gorman 2912b71636e2SPaul Mundt /* 2913b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2914b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2915b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2916b71636e2SPaul Mundt */ 2917b71636e2SPaul Mundt nodes_clear(interleave_nodes); 291801f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2919b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 29201da177e4SLinus Torvalds 2921b71636e2SPaul Mundt /* Preserve the largest node */ 2922b71636e2SPaul Mundt if (largest < total_pages) { 2923b71636e2SPaul Mundt largest = total_pages; 2924b71636e2SPaul Mundt prefer = nid; 2925b71636e2SPaul Mundt } 2926b71636e2SPaul Mundt 2927b71636e2SPaul Mundt /* Interleave this node? */ 2928b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2929b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2930b71636e2SPaul Mundt } 2931b71636e2SPaul Mundt 2932b71636e2SPaul Mundt /* All too small, use the largest */ 2933b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2934b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2935b71636e2SPaul Mundt 2936028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2937b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 29381a687c2eSMel Gorman 29391a687c2eSMel Gorman check_numabalancing_enable(); 29401da177e4SLinus Torvalds } 29411da177e4SLinus Torvalds 29428bccd85fSChristoph Lameter /* Reset policy of current process to default */ 29431da177e4SLinus Torvalds void numa_default_policy(void) 29441da177e4SLinus Torvalds { 2945028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 29461da177e4SLinus Torvalds } 294768860ec1SPaul Jackson 29484225399aSPaul Jackson /* 2949095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2950095f1fc4SLee Schermerhorn */ 2951095f1fc4SLee Schermerhorn 2952345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2953345ace9cSLee Schermerhorn { 2954345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2955345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2956345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2957345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2958d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2959b27abaccSDave Hansen [MPOL_PREFERRED_MANY] = "prefer (many)", 2960345ace9cSLee Schermerhorn }; 29611a75a6c8SChristoph Lameter 2962095f1fc4SLee Schermerhorn 2963095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2964095f1fc4SLee Schermerhorn /** 2965f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2966095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 296771fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2968095f1fc4SLee Schermerhorn * 2969095f1fc4SLee Schermerhorn * Format of input: 2970095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2971095f1fc4SLee Schermerhorn * 2972dad5b023SRandy Dunlap * Return: %0 on success, else %1 2973095f1fc4SLee Schermerhorn */ 2974a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2975095f1fc4SLee Schermerhorn { 297671fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2977f2a07f40SHugh Dickins unsigned short mode_flags; 297871fe804bSLee Schermerhorn nodemask_t nodes; 2979095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2980095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2981dedf2c73Szhong jiang int err = 1, mode; 2982095f1fc4SLee Schermerhorn 2983c7a91bc7SDan Carpenter if (flags) 2984c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 2985c7a91bc7SDan Carpenter 2986095f1fc4SLee Schermerhorn if (nodelist) { 2987095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2988095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 298971fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2990095f1fc4SLee Schermerhorn goto out; 299101f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2992095f1fc4SLee Schermerhorn goto out; 299371fe804bSLee Schermerhorn } else 299471fe804bSLee Schermerhorn nodes_clear(nodes); 299571fe804bSLee Schermerhorn 2996dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 2997dedf2c73Szhong jiang if (mode < 0) 2998095f1fc4SLee Schermerhorn goto out; 2999095f1fc4SLee Schermerhorn 300071fe804bSLee Schermerhorn switch (mode) { 3001095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 300271fe804bSLee Schermerhorn /* 3003aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 3004aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 3005aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 300671fe804bSLee Schermerhorn */ 3007095f1fc4SLee Schermerhorn if (nodelist) { 3008095f1fc4SLee Schermerhorn char *rest = nodelist; 3009095f1fc4SLee Schermerhorn while (isdigit(*rest)) 3010095f1fc4SLee Schermerhorn rest++; 3011926f2ae0SKOSAKI Motohiro if (*rest) 3012926f2ae0SKOSAKI Motohiro goto out; 3013aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 3014aa9f7d51SRandy Dunlap goto out; 3015095f1fc4SLee Schermerhorn } 3016095f1fc4SLee Schermerhorn break; 3017095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 3018095f1fc4SLee Schermerhorn /* 3019095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 3020095f1fc4SLee Schermerhorn */ 3021095f1fc4SLee Schermerhorn if (!nodelist) 302201f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 30233f226aa1SLee Schermerhorn break; 302471fe804bSLee Schermerhorn case MPOL_LOCAL: 30253f226aa1SLee Schermerhorn /* 302671fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 30273f226aa1SLee Schermerhorn */ 302871fe804bSLee Schermerhorn if (nodelist) 30293f226aa1SLee Schermerhorn goto out; 30303f226aa1SLee Schermerhorn break; 3031413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 3032413b43deSRavikiran G Thirumalai /* 3033413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 3034413b43deSRavikiran G Thirumalai */ 3035413b43deSRavikiran G Thirumalai if (!nodelist) 3036413b43deSRavikiran G Thirumalai err = 0; 3037413b43deSRavikiran G Thirumalai goto out; 3038b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 3039d69b2e63SKOSAKI Motohiro case MPOL_BIND: 304071fe804bSLee Schermerhorn /* 3041d69b2e63SKOSAKI Motohiro * Insist on a nodelist 304271fe804bSLee Schermerhorn */ 3043d69b2e63SKOSAKI Motohiro if (!nodelist) 3044d69b2e63SKOSAKI Motohiro goto out; 3045095f1fc4SLee Schermerhorn } 3046095f1fc4SLee Schermerhorn 304771fe804bSLee Schermerhorn mode_flags = 0; 3048095f1fc4SLee Schermerhorn if (flags) { 3049095f1fc4SLee Schermerhorn /* 3050095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 3051095f1fc4SLee Schermerhorn * mode flags. 3052095f1fc4SLee Schermerhorn */ 3053095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 305471fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 3055095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 305671fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 3057095f1fc4SLee Schermerhorn else 3058926f2ae0SKOSAKI Motohiro goto out; 3059095f1fc4SLee Schermerhorn } 306071fe804bSLee Schermerhorn 306171fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 306271fe804bSLee Schermerhorn if (IS_ERR(new)) 3063926f2ae0SKOSAKI Motohiro goto out; 3064926f2ae0SKOSAKI Motohiro 3065f2a07f40SHugh Dickins /* 3066f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 3067f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 3068f2a07f40SHugh Dickins */ 3069269fbe72SBen Widawsky if (mode != MPOL_PREFERRED) { 3070269fbe72SBen Widawsky new->nodes = nodes; 3071269fbe72SBen Widawsky } else if (nodelist) { 3072269fbe72SBen Widawsky nodes_clear(new->nodes); 3073269fbe72SBen Widawsky node_set(first_node(nodes), new->nodes); 3074269fbe72SBen Widawsky } else { 30757858d7bcSFeng Tang new->mode = MPOL_LOCAL; 3076269fbe72SBen Widawsky } 3077f2a07f40SHugh Dickins 3078f2a07f40SHugh Dickins /* 3079f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 3080f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 3081f2a07f40SHugh Dickins */ 3082e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 3083f2a07f40SHugh Dickins 3084926f2ae0SKOSAKI Motohiro err = 0; 308571fe804bSLee Schermerhorn 3086095f1fc4SLee Schermerhorn out: 3087095f1fc4SLee Schermerhorn /* Restore string for error message */ 3088095f1fc4SLee Schermerhorn if (nodelist) 3089095f1fc4SLee Schermerhorn *--nodelist = ':'; 3090095f1fc4SLee Schermerhorn if (flags) 3091095f1fc4SLee Schermerhorn *--flags = '='; 309271fe804bSLee Schermerhorn if (!err) 309371fe804bSLee Schermerhorn *mpol = new; 3094095f1fc4SLee Schermerhorn return err; 3095095f1fc4SLee Schermerhorn } 3096095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 3097095f1fc4SLee Schermerhorn 309871fe804bSLee Schermerhorn /** 309971fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 310071fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 310171fe804bSLee Schermerhorn * @maxlen: length of @buffer 310271fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 310371fe804bSLee Schermerhorn * 3104948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 3105948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3106948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 31071a75a6c8SChristoph Lameter */ 3108948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 31091a75a6c8SChristoph Lameter { 31101a75a6c8SChristoph Lameter char *p = buffer; 3111948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 3112948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 3113948927eeSDavid Rientjes unsigned short flags = 0; 31141a75a6c8SChristoph Lameter 31158790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3116bea904d5SLee Schermerhorn mode = pol->mode; 3117948927eeSDavid Rientjes flags = pol->flags; 3118948927eeSDavid Rientjes } 3119bea904d5SLee Schermerhorn 31201a75a6c8SChristoph Lameter switch (mode) { 31211a75a6c8SChristoph Lameter case MPOL_DEFAULT: 31227858d7bcSFeng Tang case MPOL_LOCAL: 31231a75a6c8SChristoph Lameter break; 31241a75a6c8SChristoph Lameter case MPOL_PREFERRED: 3125b27abaccSDave Hansen case MPOL_PREFERRED_MANY: 31261a75a6c8SChristoph Lameter case MPOL_BIND: 31271a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 3128269fbe72SBen Widawsky nodes = pol->nodes; 31291a75a6c8SChristoph Lameter break; 31301a75a6c8SChristoph Lameter default: 3131948927eeSDavid Rientjes WARN_ON_ONCE(1); 3132948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 3133948927eeSDavid Rientjes return; 31341a75a6c8SChristoph Lameter } 31351a75a6c8SChristoph Lameter 3136b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 31371a75a6c8SChristoph Lameter 3138fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3139948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3140f5b087b5SDavid Rientjes 31412291990aSLee Schermerhorn /* 31422291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 31432291990aSLee Schermerhorn */ 3144f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 31452291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 31462291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 31472291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3148f5b087b5SDavid Rientjes } 3149f5b087b5SDavid Rientjes 31509e763e0fSTejun Heo if (!nodes_empty(nodes)) 31519e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 31529e763e0fSTejun Heo nodemask_pr_args(&nodes)); 31531a75a6c8SChristoph Lameter } 3154