11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 711da177e4SLinus Torvalds #include <linux/mm.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 766e84f315SIngo Molnar #include <linux/sched/mm.h> 776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 78f719ff9bSIngo Molnar #include <linux/sched/task.h> 791da177e4SLinus Torvalds #include <linux/nodemask.h> 801da177e4SLinus Torvalds #include <linux/cpuset.h> 811da177e4SLinus Torvalds #include <linux/slab.h> 821da177e4SLinus Torvalds #include <linux/string.h> 83b95f1b31SPaul Gortmaker #include <linux/export.h> 84b488893aSPavel Emelyanov #include <linux/nsproxy.h> 851da177e4SLinus Torvalds #include <linux/interrupt.h> 861da177e4SLinus Torvalds #include <linux/init.h> 871da177e4SLinus Torvalds #include <linux/compat.h> 88dc9aa5b9SChristoph Lameter #include <linux/swap.h> 891a75a6c8SChristoph Lameter #include <linux/seq_file.h> 901a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 91b20a3503SChristoph Lameter #include <linux/migrate.h> 9262b61f61SHugh Dickins #include <linux/ksm.h> 9395a402c3SChristoph Lameter #include <linux/rmap.h> 9486c3a764SDavid Quigley #include <linux/security.h> 95dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 96095f1fc4SLee Schermerhorn #include <linux/ctype.h> 976d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 98b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 99b1de0d13SMitchel Humpherys #include <linux/printk.h> 100dc9aa5b9SChristoph Lameter 1011da177e4SLinus Torvalds #include <asm/tlbflush.h> 1027c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1031da177e4SLinus Torvalds 10462695a84SNick Piggin #include "internal.h" 10562695a84SNick Piggin 10638e35860SChristoph Lameter /* Internal flags */ 107dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10838e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 109dc9aa5b9SChristoph Lameter 110fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 111fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1141da177e4SLinus Torvalds policied. */ 1156267276fSChristoph Lameter enum zone_type policy_zone = 0; 1161da177e4SLinus Torvalds 117bea904d5SLee Schermerhorn /* 118bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 119bea904d5SLee Schermerhorn */ 120e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1211da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 122bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 123fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1241da177e4SLinus Torvalds }; 1251da177e4SLinus Torvalds 1265606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1275606e387SMel Gorman 12874d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1295606e387SMel Gorman { 1305606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 131f15ca78eSOleg Nesterov int node; 1325606e387SMel Gorman 133f15ca78eSOleg Nesterov if (pol) 134f15ca78eSOleg Nesterov return pol; 1355606e387SMel Gorman 136f15ca78eSOleg Nesterov node = numa_node_id(); 1371da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1381da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 139f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 140f15ca78eSOleg Nesterov if (pol->mode) 141f15ca78eSOleg Nesterov return pol; 1421da6f0e1SJianguo Wu } 1435606e387SMel Gorman 144f15ca78eSOleg Nesterov return &default_policy; 1455606e387SMel Gorman } 1465606e387SMel Gorman 14737012946SDavid Rientjes static const struct mempolicy_operations { 14837012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 149213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 15037012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 15137012946SDavid Rientjes 152f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 153f5b087b5SDavid Rientjes { 1546d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1554c50bc01SDavid Rientjes } 1564c50bc01SDavid Rientjes 1574c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1584c50bc01SDavid Rientjes const nodemask_t *rel) 1594c50bc01SDavid Rientjes { 1604c50bc01SDavid Rientjes nodemask_t tmp; 1614c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1624c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 163f5b087b5SDavid Rientjes } 164f5b087b5SDavid Rientjes 16537012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 16637012946SDavid Rientjes { 16737012946SDavid Rientjes if (nodes_empty(*nodes)) 16837012946SDavid Rientjes return -EINVAL; 16937012946SDavid Rientjes pol->v.nodes = *nodes; 17037012946SDavid Rientjes return 0; 17137012946SDavid Rientjes } 17237012946SDavid Rientjes 17337012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 17437012946SDavid Rientjes { 17537012946SDavid Rientjes if (!nodes) 176fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 17737012946SDavid Rientjes else if (nodes_empty(*nodes)) 17837012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 17937012946SDavid Rientjes else 18037012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 18137012946SDavid Rientjes return 0; 18237012946SDavid Rientjes } 18337012946SDavid Rientjes 18437012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 18537012946SDavid Rientjes { 186859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 18737012946SDavid Rientjes return -EINVAL; 18837012946SDavid Rientjes pol->v.nodes = *nodes; 18937012946SDavid Rientjes return 0; 19037012946SDavid Rientjes } 19137012946SDavid Rientjes 19258568d2aSMiao Xie /* 19358568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 19458568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 19558568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 19658568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 19758568d2aSMiao Xie * 19858568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 19958568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 20058568d2aSMiao Xie */ 2014bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2024bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 20358568d2aSMiao Xie { 20458568d2aSMiao Xie int ret; 20558568d2aSMiao Xie 20658568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 20758568d2aSMiao Xie if (pol == NULL) 20858568d2aSMiao Xie return 0; 20901f13bd6SLai Jiangshan /* Check N_MEMORY */ 2104bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 21101f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 21258568d2aSMiao Xie 21358568d2aSMiao Xie VM_BUG_ON(!nodes); 21458568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 21558568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 21658568d2aSMiao Xie else { 21758568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2184bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 21958568d2aSMiao Xie else 2204bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2214bfc4495SKAMEZAWA Hiroyuki 22258568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 22358568d2aSMiao Xie pol->w.user_nodemask = *nodes; 22458568d2aSMiao Xie else 22558568d2aSMiao Xie pol->w.cpuset_mems_allowed = 22658568d2aSMiao Xie cpuset_current_mems_allowed; 22758568d2aSMiao Xie } 22858568d2aSMiao Xie 2294bfc4495SKAMEZAWA Hiroyuki if (nodes) 2304bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2314bfc4495SKAMEZAWA Hiroyuki else 2324bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 23358568d2aSMiao Xie return ret; 23458568d2aSMiao Xie } 23558568d2aSMiao Xie 23658568d2aSMiao Xie /* 23758568d2aSMiao Xie * This function just creates a new policy, does some check and simple 23858568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 23958568d2aSMiao Xie */ 240028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 241028fec41SDavid Rientjes nodemask_t *nodes) 2421da177e4SLinus Torvalds { 2431da177e4SLinus Torvalds struct mempolicy *policy; 2441da177e4SLinus Torvalds 245028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 24600ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 247140d5a49SPaul Mundt 2483e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2493e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 25037012946SDavid Rientjes return ERR_PTR(-EINVAL); 251d3a71033SLee Schermerhorn return NULL; 25237012946SDavid Rientjes } 2533e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2543e1f0645SDavid Rientjes 2553e1f0645SDavid Rientjes /* 2563e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2573e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2583e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2593e1f0645SDavid Rientjes */ 2603e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2613e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2623e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2633e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2643e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2653e1f0645SDavid Rientjes } 266479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2678d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2688d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2698d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 270479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 271479e2802SPeter Zijlstra mode = MPOL_PREFERRED; 2723e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2733e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2741da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2751da177e4SLinus Torvalds if (!policy) 2761da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2771da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 27845c4745aSLee Schermerhorn policy->mode = mode; 27937012946SDavid Rientjes policy->flags = flags; 2803e1f0645SDavid Rientjes 28137012946SDavid Rientjes return policy; 28237012946SDavid Rientjes } 28337012946SDavid Rientjes 28452cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 28552cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 28652cd3b07SLee Schermerhorn { 28752cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 28852cd3b07SLee Schermerhorn return; 28952cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 29052cd3b07SLee Schermerhorn } 29152cd3b07SLee Schermerhorn 292213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 29337012946SDavid Rientjes { 29437012946SDavid Rientjes } 29537012946SDavid Rientjes 296213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 2971d0d2680SDavid Rientjes { 2981d0d2680SDavid Rientjes nodemask_t tmp; 2991d0d2680SDavid Rientjes 30037012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 30137012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 30237012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 30337012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3041d0d2680SDavid Rientjes else { 305213980c0SVlastimil Babka nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, 306213980c0SVlastimil Babka *nodes); 307213980c0SVlastimil Babka pol->w.cpuset_mems_allowed = tmp; 3081d0d2680SDavid Rientjes } 30937012946SDavid Rientjes 310708c1bbcSMiao Xie if (nodes_empty(tmp)) 311708c1bbcSMiao Xie tmp = *nodes; 312708c1bbcSMiao Xie 3131d0d2680SDavid Rientjes pol->v.nodes = tmp; 31437012946SDavid Rientjes } 31537012946SDavid Rientjes 31637012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 317213980c0SVlastimil Babka const nodemask_t *nodes) 31837012946SDavid Rientjes { 31937012946SDavid Rientjes nodemask_t tmp; 32037012946SDavid Rientjes 32137012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3221d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3231d0d2680SDavid Rientjes 324fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3251d0d2680SDavid Rientjes pol->v.preferred_node = node; 326fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 327fc36b8d3SLee Schermerhorn } else 328fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 32937012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 33037012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3311d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 332fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3331d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 33437012946SDavid Rientjes pol->w.cpuset_mems_allowed, 33537012946SDavid Rientjes *nodes); 33637012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3371d0d2680SDavid Rientjes } 3381d0d2680SDavid Rientjes } 33937012946SDavid Rientjes 340708c1bbcSMiao Xie /* 341708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 342708c1bbcSMiao Xie * 343213980c0SVlastimil Babka * Per-vma policies are protected by mmap_sem. Allocations using per-task 344213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 345213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 346708c1bbcSMiao Xie */ 347213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 34837012946SDavid Rientjes { 34937012946SDavid Rientjes if (!pol) 35037012946SDavid Rientjes return; 351213980c0SVlastimil Babka if (!mpol_store_user_nodemask(pol) && 35237012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35337012946SDavid Rientjes return; 354708c1bbcSMiao Xie 355213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3561d0d2680SDavid Rientjes } 3571d0d2680SDavid Rientjes 3581d0d2680SDavid Rientjes /* 3591d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3601d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36158568d2aSMiao Xie * 36258568d2aSMiao Xie * Called with task's alloc_lock held. 3631d0d2680SDavid Rientjes */ 3641d0d2680SDavid Rientjes 365213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3661d0d2680SDavid Rientjes { 367213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3681d0d2680SDavid Rientjes } 3691d0d2680SDavid Rientjes 3701d0d2680SDavid Rientjes /* 3711d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3721d0d2680SDavid Rientjes * 3731d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 3741d0d2680SDavid Rientjes */ 3751d0d2680SDavid Rientjes 3761d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3771d0d2680SDavid Rientjes { 3781d0d2680SDavid Rientjes struct vm_area_struct *vma; 3791d0d2680SDavid Rientjes 3801d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 3811d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 382213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 3831d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 3841d0d2680SDavid Rientjes } 3851d0d2680SDavid Rientjes 38637012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 38737012946SDavid Rientjes [MPOL_DEFAULT] = { 38837012946SDavid Rientjes .rebind = mpol_rebind_default, 38937012946SDavid Rientjes }, 39037012946SDavid Rientjes [MPOL_INTERLEAVE] = { 39137012946SDavid Rientjes .create = mpol_new_interleave, 39237012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39337012946SDavid Rientjes }, 39437012946SDavid Rientjes [MPOL_PREFERRED] = { 39537012946SDavid Rientjes .create = mpol_new_preferred, 39637012946SDavid Rientjes .rebind = mpol_rebind_preferred, 39737012946SDavid Rientjes }, 39837012946SDavid Rientjes [MPOL_BIND] = { 39937012946SDavid Rientjes .create = mpol_new_bind, 40037012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40137012946SDavid Rientjes }, 40237012946SDavid Rientjes }; 40337012946SDavid Rientjes 404fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 405fc301289SChristoph Lameter unsigned long flags); 4061a75a6c8SChristoph Lameter 4076f4576e3SNaoya Horiguchi struct queue_pages { 4086f4576e3SNaoya Horiguchi struct list_head *pagelist; 4096f4576e3SNaoya Horiguchi unsigned long flags; 4106f4576e3SNaoya Horiguchi nodemask_t *nmask; 4116f4576e3SNaoya Horiguchi struct vm_area_struct *prev; 4126f4576e3SNaoya Horiguchi }; 4136f4576e3SNaoya Horiguchi 41498094945SNaoya Horiguchi /* 415*88aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 416*88aaa2a1SNaoya Horiguchi * 417*88aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 418*88aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 419*88aaa2a1SNaoya Horiguchi */ 420*88aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 421*88aaa2a1SNaoya Horiguchi struct queue_pages *qp) 422*88aaa2a1SNaoya Horiguchi { 423*88aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 424*88aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 425*88aaa2a1SNaoya Horiguchi 426*88aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 427*88aaa2a1SNaoya Horiguchi } 428*88aaa2a1SNaoya Horiguchi 429*88aaa2a1SNaoya Horiguchi /* 43098094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 43198094945SNaoya Horiguchi * and move them to the pagelist if they do. 43298094945SNaoya Horiguchi */ 4336f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 4346f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 4351da177e4SLinus Torvalds { 4366f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 4376f4576e3SNaoya Horiguchi struct page *page; 4386f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 4396f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 440248db92dSKirill A. Shutemov int nid, ret; 44191612e0dSHugh Dickins pte_t *pte; 442705e87c0SHugh Dickins spinlock_t *ptl; 443941150a3SHugh Dickins 444248db92dSKirill A. Shutemov if (pmd_trans_huge(*pmd)) { 445248db92dSKirill A. Shutemov ptl = pmd_lock(walk->mm, pmd); 446248db92dSKirill A. Shutemov if (pmd_trans_huge(*pmd)) { 447248db92dSKirill A. Shutemov page = pmd_page(*pmd); 448248db92dSKirill A. Shutemov if (is_huge_zero_page(page)) { 449248db92dSKirill A. Shutemov spin_unlock(ptl); 450fd60775aSDavid Rientjes __split_huge_pmd(vma, pmd, addr, false, NULL); 451248db92dSKirill A. Shutemov } else { 452248db92dSKirill A. Shutemov get_page(page); 453248db92dSKirill A. Shutemov spin_unlock(ptl); 454248db92dSKirill A. Shutemov lock_page(page); 455248db92dSKirill A. Shutemov ret = split_huge_page(page); 456248db92dSKirill A. Shutemov unlock_page(page); 457248db92dSKirill A. Shutemov put_page(page); 458248db92dSKirill A. Shutemov if (ret) 4596f4576e3SNaoya Horiguchi return 0; 460248db92dSKirill A. Shutemov } 461248db92dSKirill A. Shutemov } else { 462248db92dSKirill A. Shutemov spin_unlock(ptl); 463248db92dSKirill A. Shutemov } 464248db92dSKirill A. Shutemov } 46591612e0dSHugh Dickins 466337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 467337d9abfSNaoya Horiguchi return 0; 468248db92dSKirill A. Shutemov retry: 4696f4576e3SNaoya Horiguchi pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 4706f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 47191612e0dSHugh Dickins if (!pte_present(*pte)) 47291612e0dSHugh Dickins continue; 4736aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 4746aab341eSLinus Torvalds if (!page) 47591612e0dSHugh Dickins continue; 476053837fcSNick Piggin /* 47762b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 47862b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 479053837fcSNick Piggin */ 480b79bc0a0SHugh Dickins if (PageReserved(page)) 481f4598c8bSChristoph Lameter continue; 482*88aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 48338e35860SChristoph Lameter continue; 484800d8c63SKirill A. Shutemov if (PageTransCompound(page)) { 485248db92dSKirill A. Shutemov get_page(page); 486248db92dSKirill A. Shutemov pte_unmap_unlock(pte, ptl); 487248db92dSKirill A. Shutemov lock_page(page); 488248db92dSKirill A. Shutemov ret = split_huge_page(page); 489248db92dSKirill A. Shutemov unlock_page(page); 490248db92dSKirill A. Shutemov put_page(page); 491248db92dSKirill A. Shutemov /* Failed to split -- skip. */ 492248db92dSKirill A. Shutemov if (ret) { 493248db92dSKirill A. Shutemov pte = pte_offset_map_lock(walk->mm, pmd, 494248db92dSKirill A. Shutemov addr, &ptl); 495248db92dSKirill A. Shutemov continue; 496248db92dSKirill A. Shutemov } 497248db92dSKirill A. Shutemov goto retry; 498248db92dSKirill A. Shutemov } 49938e35860SChristoph Lameter 5006f4576e3SNaoya Horiguchi migrate_page_add(page, qp->pagelist, flags); 5016f4576e3SNaoya Horiguchi } 5026f4576e3SNaoya Horiguchi pte_unmap_unlock(pte - 1, ptl); 5036f4576e3SNaoya Horiguchi cond_resched(); 5046f4576e3SNaoya Horiguchi return 0; 50591612e0dSHugh Dickins } 50691612e0dSHugh Dickins 5076f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5086f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5096f4576e3SNaoya Horiguchi struct mm_walk *walk) 510e2d8cf40SNaoya Horiguchi { 511e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5126f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5136f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 514e2d8cf40SNaoya Horiguchi struct page *page; 515cb900f41SKirill A. Shutemov spinlock_t *ptl; 516d4c54919SNaoya Horiguchi pte_t entry; 517e2d8cf40SNaoya Horiguchi 5186f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5196f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 520d4c54919SNaoya Horiguchi if (!pte_present(entry)) 521d4c54919SNaoya Horiguchi goto unlock; 522d4c54919SNaoya Horiguchi page = pte_page(entry); 523*88aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 524e2d8cf40SNaoya Horiguchi goto unlock; 525e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 526e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 527e2d8cf40SNaoya Horiguchi (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 5286f4576e3SNaoya Horiguchi isolate_huge_page(page, qp->pagelist); 529e2d8cf40SNaoya Horiguchi unlock: 530cb900f41SKirill A. Shutemov spin_unlock(ptl); 531e2d8cf40SNaoya Horiguchi #else 532e2d8cf40SNaoya Horiguchi BUG(); 533e2d8cf40SNaoya Horiguchi #endif 53491612e0dSHugh Dickins return 0; 5351da177e4SLinus Torvalds } 5361da177e4SLinus Torvalds 5375877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 538b24f53a0SLee Schermerhorn /* 5394b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 5404b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 5414b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 5424b10e7d5SMel Gorman * 5434b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 5444b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 5454b10e7d5SMel Gorman * changes to the core. 546b24f53a0SLee Schermerhorn */ 5474b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 5484b10e7d5SMel Gorman unsigned long addr, unsigned long end) 549b24f53a0SLee Schermerhorn { 5504b10e7d5SMel Gorman int nr_updated; 551b24f53a0SLee Schermerhorn 5524d942466SMel Gorman nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); 55303c5a6e1SMel Gorman if (nr_updated) 55403c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 555b24f53a0SLee Schermerhorn 5564b10e7d5SMel Gorman return nr_updated; 557b24f53a0SLee Schermerhorn } 558b24f53a0SLee Schermerhorn #else 559b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 560b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 561b24f53a0SLee Schermerhorn { 562b24f53a0SLee Schermerhorn return 0; 563b24f53a0SLee Schermerhorn } 5645877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 565b24f53a0SLee Schermerhorn 5666f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 5676f4576e3SNaoya Horiguchi struct mm_walk *walk) 5681da177e4SLinus Torvalds { 5696f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5706f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5715b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 5726f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 573dc9aa5b9SChristoph Lameter 57477bf45e7SKirill A. Shutemov if (!vma_migratable(vma)) 57548684a65SNaoya Horiguchi return 1; 57648684a65SNaoya Horiguchi 5775b952b3cSAndi Kleen if (endvma > end) 5785b952b3cSAndi Kleen endvma = end; 5795b952b3cSAndi Kleen if (vma->vm_start > start) 5805b952b3cSAndi Kleen start = vma->vm_start; 581b24f53a0SLee Schermerhorn 582b24f53a0SLee Schermerhorn if (!(flags & MPOL_MF_DISCONTIG_OK)) { 583b24f53a0SLee Schermerhorn if (!vma->vm_next && vma->vm_end < end) 584d05f0cdcSHugh Dickins return -EFAULT; 5856f4576e3SNaoya Horiguchi if (qp->prev && qp->prev->vm_end < vma->vm_start) 586d05f0cdcSHugh Dickins return -EFAULT; 587b24f53a0SLee Schermerhorn } 588b24f53a0SLee Schermerhorn 5896f4576e3SNaoya Horiguchi qp->prev = vma; 5906f4576e3SNaoya Horiguchi 591b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 5922c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 5934355c018SLiang Chen if (!is_vm_hugetlb_page(vma) && 5944355c018SLiang Chen (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && 5954355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 596b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 5976f4576e3SNaoya Horiguchi return 1; 598b24f53a0SLee Schermerhorn } 599b24f53a0SLee Schermerhorn 6006f4576e3SNaoya Horiguchi /* queue pages from current vma */ 60177bf45e7SKirill A. Shutemov if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 6026f4576e3SNaoya Horiguchi return 0; 6036f4576e3SNaoya Horiguchi return 1; 6046f4576e3SNaoya Horiguchi } 605b24f53a0SLee Schermerhorn 6066f4576e3SNaoya Horiguchi /* 6076f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 6086f4576e3SNaoya Horiguchi * 6096f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 6106f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 6116f4576e3SNaoya Horiguchi * passed via @private.) 6126f4576e3SNaoya Horiguchi */ 6136f4576e3SNaoya Horiguchi static int 6146f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 6156f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 6166f4576e3SNaoya Horiguchi struct list_head *pagelist) 6176f4576e3SNaoya Horiguchi { 6186f4576e3SNaoya Horiguchi struct queue_pages qp = { 6196f4576e3SNaoya Horiguchi .pagelist = pagelist, 6206f4576e3SNaoya Horiguchi .flags = flags, 6216f4576e3SNaoya Horiguchi .nmask = nodes, 6226f4576e3SNaoya Horiguchi .prev = NULL, 6236f4576e3SNaoya Horiguchi }; 6246f4576e3SNaoya Horiguchi struct mm_walk queue_pages_walk = { 6256f4576e3SNaoya Horiguchi .hugetlb_entry = queue_pages_hugetlb, 6266f4576e3SNaoya Horiguchi .pmd_entry = queue_pages_pte_range, 6276f4576e3SNaoya Horiguchi .test_walk = queue_pages_test_walk, 6286f4576e3SNaoya Horiguchi .mm = mm, 6296f4576e3SNaoya Horiguchi .private = &qp, 6306f4576e3SNaoya Horiguchi }; 6316f4576e3SNaoya Horiguchi 6326f4576e3SNaoya Horiguchi return walk_page_range(start, end, &queue_pages_walk); 6331da177e4SLinus Torvalds } 6341da177e4SLinus Torvalds 635869833f2SKOSAKI Motohiro /* 636869833f2SKOSAKI Motohiro * Apply policy to a single VMA 637869833f2SKOSAKI Motohiro * This must be called with the mmap_sem held for writing. 638869833f2SKOSAKI Motohiro */ 639869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 640869833f2SKOSAKI Motohiro struct mempolicy *pol) 6418d34694cSKOSAKI Motohiro { 642869833f2SKOSAKI Motohiro int err; 643869833f2SKOSAKI Motohiro struct mempolicy *old; 644869833f2SKOSAKI Motohiro struct mempolicy *new; 6458d34694cSKOSAKI Motohiro 6468d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 6478d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 6488d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 6498d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 6508d34694cSKOSAKI Motohiro 651869833f2SKOSAKI Motohiro new = mpol_dup(pol); 652869833f2SKOSAKI Motohiro if (IS_ERR(new)) 653869833f2SKOSAKI Motohiro return PTR_ERR(new); 654869833f2SKOSAKI Motohiro 655869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 6568d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 657869833f2SKOSAKI Motohiro if (err) 658869833f2SKOSAKI Motohiro goto err_out; 6598d34694cSKOSAKI Motohiro } 660869833f2SKOSAKI Motohiro 661869833f2SKOSAKI Motohiro old = vma->vm_policy; 662869833f2SKOSAKI Motohiro vma->vm_policy = new; /* protected by mmap_sem */ 663869833f2SKOSAKI Motohiro mpol_put(old); 664869833f2SKOSAKI Motohiro 665869833f2SKOSAKI Motohiro return 0; 666869833f2SKOSAKI Motohiro err_out: 667869833f2SKOSAKI Motohiro mpol_put(new); 6688d34694cSKOSAKI Motohiro return err; 6698d34694cSKOSAKI Motohiro } 6708d34694cSKOSAKI Motohiro 6711da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 6729d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 6739d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 6741da177e4SLinus Torvalds { 6751da177e4SLinus Torvalds struct vm_area_struct *next; 6769d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 6779d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 6789d8cebd4SKOSAKI Motohiro int err = 0; 679e26a5114SKOSAKI Motohiro pgoff_t pgoff; 6809d8cebd4SKOSAKI Motohiro unsigned long vmstart; 6819d8cebd4SKOSAKI Motohiro unsigned long vmend; 6821da177e4SLinus Torvalds 683097d5910SLinus Torvalds vma = find_vma(mm, start); 6849d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 6859d8cebd4SKOSAKI Motohiro return -EFAULT; 6869d8cebd4SKOSAKI Motohiro 687097d5910SLinus Torvalds prev = vma->vm_prev; 688e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 689e26a5114SKOSAKI Motohiro prev = vma; 690e26a5114SKOSAKI Motohiro 6919d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 6921da177e4SLinus Torvalds next = vma->vm_next; 6939d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 6949d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 6959d8cebd4SKOSAKI Motohiro 696e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 697e26a5114SKOSAKI Motohiro continue; 698e26a5114SKOSAKI Motohiro 699e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 700e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 7019d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 702e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 70319a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 7049d8cebd4SKOSAKI Motohiro if (prev) { 7059d8cebd4SKOSAKI Motohiro vma = prev; 7069d8cebd4SKOSAKI Motohiro next = vma->vm_next; 7073964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 7089d8cebd4SKOSAKI Motohiro continue; 7093964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 7103964acd0SOleg Nesterov goto replace; 7111da177e4SLinus Torvalds } 7129d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 7139d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 7149d8cebd4SKOSAKI Motohiro if (err) 7159d8cebd4SKOSAKI Motohiro goto out; 7169d8cebd4SKOSAKI Motohiro } 7179d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 7189d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 7199d8cebd4SKOSAKI Motohiro if (err) 7209d8cebd4SKOSAKI Motohiro goto out; 7219d8cebd4SKOSAKI Motohiro } 7223964acd0SOleg Nesterov replace: 723869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 7249d8cebd4SKOSAKI Motohiro if (err) 7259d8cebd4SKOSAKI Motohiro goto out; 7269d8cebd4SKOSAKI Motohiro } 7279d8cebd4SKOSAKI Motohiro 7289d8cebd4SKOSAKI Motohiro out: 7291da177e4SLinus Torvalds return err; 7301da177e4SLinus Torvalds } 7311da177e4SLinus Torvalds 7321da177e4SLinus Torvalds /* Set the process memory policy */ 733028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 734028fec41SDavid Rientjes nodemask_t *nodes) 7351da177e4SLinus Torvalds { 73658568d2aSMiao Xie struct mempolicy *new, *old; 7374bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 73858568d2aSMiao Xie int ret; 7391da177e4SLinus Torvalds 7404bfc4495SKAMEZAWA Hiroyuki if (!scratch) 7414bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 742f4e53d91SLee Schermerhorn 7434bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 7444bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 7454bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 7464bfc4495SKAMEZAWA Hiroyuki goto out; 7474bfc4495SKAMEZAWA Hiroyuki } 7482c7c3a7dSOleg Nesterov 74958568d2aSMiao Xie task_lock(current); 7504bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 75158568d2aSMiao Xie if (ret) { 75258568d2aSMiao Xie task_unlock(current); 75358568d2aSMiao Xie mpol_put(new); 7544bfc4495SKAMEZAWA Hiroyuki goto out; 75558568d2aSMiao Xie } 75658568d2aSMiao Xie old = current->mempolicy; 7571da177e4SLinus Torvalds current->mempolicy = new; 75845816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 75945816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 76058568d2aSMiao Xie task_unlock(current); 76158568d2aSMiao Xie mpol_put(old); 7624bfc4495SKAMEZAWA Hiroyuki ret = 0; 7634bfc4495SKAMEZAWA Hiroyuki out: 7644bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 7654bfc4495SKAMEZAWA Hiroyuki return ret; 7661da177e4SLinus Torvalds } 7671da177e4SLinus Torvalds 768bea904d5SLee Schermerhorn /* 769bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 77058568d2aSMiao Xie * 77158568d2aSMiao Xie * Called with task's alloc_lock held 772bea904d5SLee Schermerhorn */ 773bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 7741da177e4SLinus Torvalds { 775dfcd3c0dSAndi Kleen nodes_clear(*nodes); 776bea904d5SLee Schermerhorn if (p == &default_policy) 777bea904d5SLee Schermerhorn return; 778bea904d5SLee Schermerhorn 77945c4745aSLee Schermerhorn switch (p->mode) { 78019770b32SMel Gorman case MPOL_BIND: 78119770b32SMel Gorman /* Fall through */ 7821da177e4SLinus Torvalds case MPOL_INTERLEAVE: 783dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 7841da177e4SLinus Torvalds break; 7851da177e4SLinus Torvalds case MPOL_PREFERRED: 786fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 787dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 78853f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 7891da177e4SLinus Torvalds break; 7901da177e4SLinus Torvalds default: 7911da177e4SLinus Torvalds BUG(); 7921da177e4SLinus Torvalds } 7931da177e4SLinus Torvalds } 7941da177e4SLinus Torvalds 795d4edcf0dSDave Hansen static int lookup_node(unsigned long addr) 7961da177e4SLinus Torvalds { 7971da177e4SLinus Torvalds struct page *p; 7981da177e4SLinus Torvalds int err; 7991da177e4SLinus Torvalds 800768ae309SLorenzo Stoakes err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL); 8011da177e4SLinus Torvalds if (err >= 0) { 8021da177e4SLinus Torvalds err = page_to_nid(p); 8031da177e4SLinus Torvalds put_page(p); 8041da177e4SLinus Torvalds } 8051da177e4SLinus Torvalds return err; 8061da177e4SLinus Torvalds } 8071da177e4SLinus Torvalds 8081da177e4SLinus Torvalds /* Retrieve NUMA policy */ 809dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8101da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8111da177e4SLinus Torvalds { 8128bccd85fSChristoph Lameter int err; 8131da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8141da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8151da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 8161da177e4SLinus Torvalds 817754af6f5SLee Schermerhorn if (flags & 818754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8191da177e4SLinus Torvalds return -EINVAL; 820754af6f5SLee Schermerhorn 821754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 822754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 823754af6f5SLee Schermerhorn return -EINVAL; 824754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 82558568d2aSMiao Xie task_lock(current); 826754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 82758568d2aSMiao Xie task_unlock(current); 828754af6f5SLee Schermerhorn return 0; 829754af6f5SLee Schermerhorn } 830754af6f5SLee Schermerhorn 8311da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 832bea904d5SLee Schermerhorn /* 833bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 834bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 835bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 836bea904d5SLee Schermerhorn */ 8371da177e4SLinus Torvalds down_read(&mm->mmap_sem); 8381da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 8391da177e4SLinus Torvalds if (!vma) { 8401da177e4SLinus Torvalds up_read(&mm->mmap_sem); 8411da177e4SLinus Torvalds return -EFAULT; 8421da177e4SLinus Torvalds } 8431da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 8441da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 8451da177e4SLinus Torvalds else 8461da177e4SLinus Torvalds pol = vma->vm_policy; 8471da177e4SLinus Torvalds } else if (addr) 8481da177e4SLinus Torvalds return -EINVAL; 8491da177e4SLinus Torvalds 8501da177e4SLinus Torvalds if (!pol) 851bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 8521da177e4SLinus Torvalds 8531da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 8541da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 855d4edcf0dSDave Hansen err = lookup_node(addr); 8561da177e4SLinus Torvalds if (err < 0) 8571da177e4SLinus Torvalds goto out; 8588bccd85fSChristoph Lameter *policy = err; 8591da177e4SLinus Torvalds } else if (pol == current->mempolicy && 86045c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 86145816682SVlastimil Babka *policy = next_node_in(current->il_prev, pol->v.nodes); 8621da177e4SLinus Torvalds } else { 8631da177e4SLinus Torvalds err = -EINVAL; 8641da177e4SLinus Torvalds goto out; 8651da177e4SLinus Torvalds } 866bea904d5SLee Schermerhorn } else { 867bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 868bea904d5SLee Schermerhorn pol->mode; 869d79df630SDavid Rientjes /* 870d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 871d79df630SDavid Rientjes * the policy to userspace. 872d79df630SDavid Rientjes */ 873d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 874bea904d5SLee Schermerhorn } 8751da177e4SLinus Torvalds 8761da177e4SLinus Torvalds err = 0; 87758568d2aSMiao Xie if (nmask) { 878c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 879c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 880c6b6ef8bSLee Schermerhorn } else { 88158568d2aSMiao Xie task_lock(current); 882bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 88358568d2aSMiao Xie task_unlock(current); 88458568d2aSMiao Xie } 885c6b6ef8bSLee Schermerhorn } 8861da177e4SLinus Torvalds 8871da177e4SLinus Torvalds out: 88852cd3b07SLee Schermerhorn mpol_cond_put(pol); 8891da177e4SLinus Torvalds if (vma) 8901da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 8911da177e4SLinus Torvalds return err; 8921da177e4SLinus Torvalds } 8931da177e4SLinus Torvalds 894b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 8958bccd85fSChristoph Lameter /* 8966ce3c4c0SChristoph Lameter * page migration 8976ce3c4c0SChristoph Lameter */ 898fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 899fc301289SChristoph Lameter unsigned long flags) 9006ce3c4c0SChristoph Lameter { 9016ce3c4c0SChristoph Lameter /* 902fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9036ce3c4c0SChristoph Lameter */ 90462695a84SNick Piggin if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 90562695a84SNick Piggin if (!isolate_lru_page(page)) { 90662695a84SNick Piggin list_add_tail(&page->lru, pagelist); 907599d0c95SMel Gorman inc_node_page_state(page, NR_ISOLATED_ANON + 9086d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 90962695a84SNick Piggin } 91062695a84SNick Piggin } 9116ce3c4c0SChristoph Lameter } 9126ce3c4c0SChristoph Lameter 913742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 91495a402c3SChristoph Lameter { 915e2d8cf40SNaoya Horiguchi if (PageHuge(page)) 916e2d8cf40SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(page)), 917e2d8cf40SNaoya Horiguchi node); 918e2d8cf40SNaoya Horiguchi else 91996db800fSVlastimil Babka return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | 920b360edb4SDavid Rientjes __GFP_THISNODE, 0); 92195a402c3SChristoph Lameter } 92295a402c3SChristoph Lameter 9236ce3c4c0SChristoph Lameter /* 9247e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 9257e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 9267e2ab150SChristoph Lameter */ 927dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 928dbcb0f19SAdrian Bunk int flags) 9297e2ab150SChristoph Lameter { 9307e2ab150SChristoph Lameter nodemask_t nmask; 9317e2ab150SChristoph Lameter LIST_HEAD(pagelist); 9327e2ab150SChristoph Lameter int err = 0; 9337e2ab150SChristoph Lameter 9347e2ab150SChristoph Lameter nodes_clear(nmask); 9357e2ab150SChristoph Lameter node_set(source, nmask); 9367e2ab150SChristoph Lameter 93708270807SMinchan Kim /* 93808270807SMinchan Kim * This does not "check" the range but isolates all pages that 93908270807SMinchan Kim * need migration. Between passing in the full user address 94008270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 94108270807SMinchan Kim */ 94208270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 94398094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 9447e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 9457e2ab150SChristoph Lameter 946cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 94768711a74SDavid Rientjes err = migrate_pages(&pagelist, new_node_page, NULL, dest, 9489c620e2bSHugh Dickins MIGRATE_SYNC, MR_SYSCALL); 949cf608ac1SMinchan Kim if (err) 950e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 951cf608ac1SMinchan Kim } 95295a402c3SChristoph Lameter 9537e2ab150SChristoph Lameter return err; 9547e2ab150SChristoph Lameter } 9557e2ab150SChristoph Lameter 9567e2ab150SChristoph Lameter /* 9577e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 9587e2ab150SChristoph Lameter * layout as much as possible. 95939743889SChristoph Lameter * 96039743889SChristoph Lameter * Returns the number of page that could not be moved. 96139743889SChristoph Lameter */ 9620ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 9630ce72d4fSAndrew Morton const nodemask_t *to, int flags) 96439743889SChristoph Lameter { 9657e2ab150SChristoph Lameter int busy = 0; 9660aedadf9SChristoph Lameter int err; 9677e2ab150SChristoph Lameter nodemask_t tmp; 96839743889SChristoph Lameter 9690aedadf9SChristoph Lameter err = migrate_prep(); 9700aedadf9SChristoph Lameter if (err) 9710aedadf9SChristoph Lameter return err; 9720aedadf9SChristoph Lameter 97339743889SChristoph Lameter down_read(&mm->mmap_sem); 974d4984711SChristoph Lameter 9757e2ab150SChristoph Lameter /* 9767e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 9777e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 9787e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 9797e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 9807e2ab150SChristoph Lameter * 9817e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 9827e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 9837e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 9847e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 9857e2ab150SChristoph Lameter * 9867e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 9877e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 9887e2ab150SChristoph Lameter * (nothing left to migrate). 9897e2ab150SChristoph Lameter * 9907e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 9917e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 9927e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 9937e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 9947e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 9957e2ab150SChristoph Lameter * 9967e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 9977e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 9987e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 9997e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1000ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 10017e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 10027e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 10037e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 10047e2ab150SChristoph Lameter */ 10057e2ab150SChristoph Lameter 10060ce72d4fSAndrew Morton tmp = *from; 10077e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 10087e2ab150SChristoph Lameter int s,d; 1009b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 10107e2ab150SChristoph Lameter int dest = 0; 10117e2ab150SChristoph Lameter 10127e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 10134a5b18ccSLarry Woodman 10144a5b18ccSLarry Woodman /* 10154a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 10164a5b18ccSLarry Woodman * node relationship of the pages established between 10174a5b18ccSLarry Woodman * threads and memory areas. 10184a5b18ccSLarry Woodman * 10194a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 10204a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 10214a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 10224a5b18ccSLarry Woodman * copying memory from a node that is in the destination 10234a5b18ccSLarry Woodman * mask. 10244a5b18ccSLarry Woodman * 10254a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 10264a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 10274a5b18ccSLarry Woodman */ 10284a5b18ccSLarry Woodman 10290ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 10300ce72d4fSAndrew Morton (node_isset(s, *to))) 10314a5b18ccSLarry Woodman continue; 10324a5b18ccSLarry Woodman 10330ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 10347e2ab150SChristoph Lameter if (s == d) 10357e2ab150SChristoph Lameter continue; 10367e2ab150SChristoph Lameter 10377e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 10387e2ab150SChristoph Lameter dest = d; 10397e2ab150SChristoph Lameter 10407e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 10417e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 10427e2ab150SChristoph Lameter break; 10437e2ab150SChristoph Lameter } 1044b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 10457e2ab150SChristoph Lameter break; 10467e2ab150SChristoph Lameter 10477e2ab150SChristoph Lameter node_clear(source, tmp); 10487e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 10497e2ab150SChristoph Lameter if (err > 0) 10507e2ab150SChristoph Lameter busy += err; 10517e2ab150SChristoph Lameter if (err < 0) 10527e2ab150SChristoph Lameter break; 105339743889SChristoph Lameter } 105439743889SChristoph Lameter up_read(&mm->mmap_sem); 10557e2ab150SChristoph Lameter if (err < 0) 10567e2ab150SChristoph Lameter return err; 10577e2ab150SChristoph Lameter return busy; 1058b20a3503SChristoph Lameter 105939743889SChristoph Lameter } 106039743889SChristoph Lameter 10613ad33b24SLee Schermerhorn /* 10623ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1063d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 10643ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 10653ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 10663ad33b24SLee Schermerhorn * is in virtual address order. 10673ad33b24SLee Schermerhorn */ 1068d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 106995a402c3SChristoph Lameter { 1070d05f0cdcSHugh Dickins struct vm_area_struct *vma; 10713ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 107295a402c3SChristoph Lameter 1073d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 10743ad33b24SLee Schermerhorn while (vma) { 10753ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 10763ad33b24SLee Schermerhorn if (address != -EFAULT) 10773ad33b24SLee Schermerhorn break; 10783ad33b24SLee Schermerhorn vma = vma->vm_next; 10793ad33b24SLee Schermerhorn } 10803ad33b24SLee Schermerhorn 108111c731e8SWanpeng Li if (PageHuge(page)) { 1082cc81717eSMichal Hocko BUG_ON(!vma); 108374060e4dSNaoya Horiguchi return alloc_huge_page_noerr(vma, address, 1); 108411c731e8SWanpeng Li } 108511c731e8SWanpeng Li /* 108611c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 108711c731e8SWanpeng Li */ 10880f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 10890f556856SMichal Hocko vma, address); 109095a402c3SChristoph Lameter } 1091b20a3503SChristoph Lameter #else 1092b20a3503SChristoph Lameter 1093b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1094b20a3503SChristoph Lameter unsigned long flags) 1095b20a3503SChristoph Lameter { 1096b20a3503SChristoph Lameter } 1097b20a3503SChristoph Lameter 10980ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10990ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1100b20a3503SChristoph Lameter { 1101b20a3503SChristoph Lameter return -ENOSYS; 1102b20a3503SChristoph Lameter } 110395a402c3SChristoph Lameter 1104d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x) 110595a402c3SChristoph Lameter { 110695a402c3SChristoph Lameter return NULL; 110795a402c3SChristoph Lameter } 1108b20a3503SChristoph Lameter #endif 1109b20a3503SChristoph Lameter 1110dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1111028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1112028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 11136ce3c4c0SChristoph Lameter { 11146ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 11156ce3c4c0SChristoph Lameter struct mempolicy *new; 11166ce3c4c0SChristoph Lameter unsigned long end; 11176ce3c4c0SChristoph Lameter int err; 11186ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 11196ce3c4c0SChristoph Lameter 1120b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 11216ce3c4c0SChristoph Lameter return -EINVAL; 112274c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 11236ce3c4c0SChristoph Lameter return -EPERM; 11246ce3c4c0SChristoph Lameter 11256ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 11266ce3c4c0SChristoph Lameter return -EINVAL; 11276ce3c4c0SChristoph Lameter 11286ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 11296ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 11306ce3c4c0SChristoph Lameter 11316ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 11326ce3c4c0SChristoph Lameter end = start + len; 11336ce3c4c0SChristoph Lameter 11346ce3c4c0SChristoph Lameter if (end < start) 11356ce3c4c0SChristoph Lameter return -EINVAL; 11366ce3c4c0SChristoph Lameter if (end == start) 11376ce3c4c0SChristoph Lameter return 0; 11386ce3c4c0SChristoph Lameter 1139028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 11406ce3c4c0SChristoph Lameter if (IS_ERR(new)) 11416ce3c4c0SChristoph Lameter return PTR_ERR(new); 11426ce3c4c0SChristoph Lameter 1143b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1144b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1145b24f53a0SLee Schermerhorn 11466ce3c4c0SChristoph Lameter /* 11476ce3c4c0SChristoph Lameter * If we are using the default policy then operation 11486ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 11496ce3c4c0SChristoph Lameter */ 11506ce3c4c0SChristoph Lameter if (!new) 11516ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 11526ce3c4c0SChristoph Lameter 1153028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1154028fec41SDavid Rientjes start, start + len, mode, mode_flags, 115500ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 11566ce3c4c0SChristoph Lameter 11570aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 11580aedadf9SChristoph Lameter 11590aedadf9SChristoph Lameter err = migrate_prep(); 11600aedadf9SChristoph Lameter if (err) 1161b05ca738SKOSAKI Motohiro goto mpol_out; 11620aedadf9SChristoph Lameter } 11634bfc4495SKAMEZAWA Hiroyuki { 11644bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 11654bfc4495SKAMEZAWA Hiroyuki if (scratch) { 11666ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 116758568d2aSMiao Xie task_lock(current); 11684bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 116958568d2aSMiao Xie task_unlock(current); 11704bfc4495SKAMEZAWA Hiroyuki if (err) 117158568d2aSMiao Xie up_write(&mm->mmap_sem); 11724bfc4495SKAMEZAWA Hiroyuki } else 11734bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 11744bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 11754bfc4495SKAMEZAWA Hiroyuki } 1176b05ca738SKOSAKI Motohiro if (err) 1177b05ca738SKOSAKI Motohiro goto mpol_out; 1178b05ca738SKOSAKI Motohiro 1179d05f0cdcSHugh Dickins err = queue_pages_range(mm, start, end, nmask, 11806ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1181d05f0cdcSHugh Dickins if (!err) 11829d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 11837e2ab150SChristoph Lameter 1184b24f53a0SLee Schermerhorn if (!err) { 1185b24f53a0SLee Schermerhorn int nr_failed = 0; 1186b24f53a0SLee Schermerhorn 1187cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1188b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1189d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1190d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1191cf608ac1SMinchan Kim if (nr_failed) 119274060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1193cf608ac1SMinchan Kim } 11946ce3c4c0SChristoph Lameter 1195b24f53a0SLee Schermerhorn if (nr_failed && (flags & MPOL_MF_STRICT)) 11966ce3c4c0SChristoph Lameter err = -EIO; 1197ab8a3e14SKOSAKI Motohiro } else 1198b0e5fd73SJoonsoo Kim putback_movable_pages(&pagelist); 1199b20a3503SChristoph Lameter 12006ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1201b05ca738SKOSAKI Motohiro mpol_out: 1202f0be3d32SLee Schermerhorn mpol_put(new); 12036ce3c4c0SChristoph Lameter return err; 12046ce3c4c0SChristoph Lameter } 12056ce3c4c0SChristoph Lameter 120639743889SChristoph Lameter /* 12078bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 12088bccd85fSChristoph Lameter */ 12098bccd85fSChristoph Lameter 12108bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 121139743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 12128bccd85fSChristoph Lameter unsigned long maxnode) 12138bccd85fSChristoph Lameter { 12148bccd85fSChristoph Lameter unsigned long k; 12158bccd85fSChristoph Lameter unsigned long nlongs; 12168bccd85fSChristoph Lameter unsigned long endmask; 12178bccd85fSChristoph Lameter 12188bccd85fSChristoph Lameter --maxnode; 12198bccd85fSChristoph Lameter nodes_clear(*nodes); 12208bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 12218bccd85fSChristoph Lameter return 0; 1222a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1223636f13c1SChris Wright return -EINVAL; 12248bccd85fSChristoph Lameter 12258bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 12268bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 12278bccd85fSChristoph Lameter endmask = ~0UL; 12288bccd85fSChristoph Lameter else 12298bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 12308bccd85fSChristoph Lameter 12318bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 12328bccd85fSChristoph Lameter if the non supported part is all zero. */ 12338bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 12348bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 12358bccd85fSChristoph Lameter return -EINVAL; 12368bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 12378bccd85fSChristoph Lameter unsigned long t; 12388bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 12398bccd85fSChristoph Lameter return -EFAULT; 12408bccd85fSChristoph Lameter if (k == nlongs - 1) { 12418bccd85fSChristoph Lameter if (t & endmask) 12428bccd85fSChristoph Lameter return -EINVAL; 12438bccd85fSChristoph Lameter } else if (t) 12448bccd85fSChristoph Lameter return -EINVAL; 12458bccd85fSChristoph Lameter } 12468bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 12478bccd85fSChristoph Lameter endmask = ~0UL; 12488bccd85fSChristoph Lameter } 12498bccd85fSChristoph Lameter 12508bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 12518bccd85fSChristoph Lameter return -EFAULT; 12528bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 12538bccd85fSChristoph Lameter return 0; 12548bccd85fSChristoph Lameter } 12558bccd85fSChristoph Lameter 12568bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 12578bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 12588bccd85fSChristoph Lameter nodemask_t *nodes) 12598bccd85fSChristoph Lameter { 12608bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 12618bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 12628bccd85fSChristoph Lameter 12638bccd85fSChristoph Lameter if (copy > nbytes) { 12648bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 12658bccd85fSChristoph Lameter return -EINVAL; 12668bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 12678bccd85fSChristoph Lameter return -EFAULT; 12688bccd85fSChristoph Lameter copy = nbytes; 12698bccd85fSChristoph Lameter } 12708bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 12718bccd85fSChristoph Lameter } 12728bccd85fSChristoph Lameter 1273938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1274f7f28ca9SRasmus Villemoes unsigned long, mode, const unsigned long __user *, nmask, 1275938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 12768bccd85fSChristoph Lameter { 12778bccd85fSChristoph Lameter nodemask_t nodes; 12788bccd85fSChristoph Lameter int err; 1279028fec41SDavid Rientjes unsigned short mode_flags; 12808bccd85fSChristoph Lameter 1281028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1282028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1283a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1284a3b51e01SDavid Rientjes return -EINVAL; 12854c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 12864c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 12874c50bc01SDavid Rientjes return -EINVAL; 12888bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 12898bccd85fSChristoph Lameter if (err) 12908bccd85fSChristoph Lameter return err; 1291028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 12928bccd85fSChristoph Lameter } 12938bccd85fSChristoph Lameter 12948bccd85fSChristoph Lameter /* Set the process memory policy */ 129523c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1296938bb9f5SHeiko Carstens unsigned long, maxnode) 12978bccd85fSChristoph Lameter { 12988bccd85fSChristoph Lameter int err; 12998bccd85fSChristoph Lameter nodemask_t nodes; 1300028fec41SDavid Rientjes unsigned short flags; 13018bccd85fSChristoph Lameter 1302028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1303028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1304028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 13058bccd85fSChristoph Lameter return -EINVAL; 13064c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 13074c50bc01SDavid Rientjes return -EINVAL; 13088bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13098bccd85fSChristoph Lameter if (err) 13108bccd85fSChristoph Lameter return err; 1311028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 13128bccd85fSChristoph Lameter } 13138bccd85fSChristoph Lameter 1314938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1315938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1316938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 131739743889SChristoph Lameter { 1318c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 1319596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 132039743889SChristoph Lameter struct task_struct *task; 132139743889SChristoph Lameter nodemask_t task_nodes; 132239743889SChristoph Lameter int err; 1323596d7cfaSKOSAKI Motohiro nodemask_t *old; 1324596d7cfaSKOSAKI Motohiro nodemask_t *new; 1325596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 132639743889SChristoph Lameter 1327596d7cfaSKOSAKI Motohiro if (!scratch) 1328596d7cfaSKOSAKI Motohiro return -ENOMEM; 132939743889SChristoph Lameter 1330596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1331596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1332596d7cfaSKOSAKI Motohiro 1333596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 133439743889SChristoph Lameter if (err) 1335596d7cfaSKOSAKI Motohiro goto out; 1336596d7cfaSKOSAKI Motohiro 1337596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1338596d7cfaSKOSAKI Motohiro if (err) 1339596d7cfaSKOSAKI Motohiro goto out; 134039743889SChristoph Lameter 134139743889SChristoph Lameter /* Find the mm_struct */ 134255cfaa3cSZeng Zhaoming rcu_read_lock(); 1343228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 134439743889SChristoph Lameter if (!task) { 134555cfaa3cSZeng Zhaoming rcu_read_unlock(); 1346596d7cfaSKOSAKI Motohiro err = -ESRCH; 1347596d7cfaSKOSAKI Motohiro goto out; 134839743889SChristoph Lameter } 13493268c63eSChristoph Lameter get_task_struct(task); 135039743889SChristoph Lameter 1351596d7cfaSKOSAKI Motohiro err = -EINVAL; 135239743889SChristoph Lameter 135339743889SChristoph Lameter /* 135439743889SChristoph Lameter * Check if this process has the right to modify the specified 135539743889SChristoph Lameter * process. The right exists if the process has administrative 13567f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 135739743889SChristoph Lameter * userid as the target process. 135839743889SChristoph Lameter */ 1359c69e8d9cSDavid Howells tcred = __task_cred(task); 1360b38a86ebSEric W. Biederman if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1361b38a86ebSEric W. Biederman !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 136274c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1363c69e8d9cSDavid Howells rcu_read_unlock(); 136439743889SChristoph Lameter err = -EPERM; 13653268c63eSChristoph Lameter goto out_put; 136639743889SChristoph Lameter } 1367c69e8d9cSDavid Howells rcu_read_unlock(); 136839743889SChristoph Lameter 136939743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 137039743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1371596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 137239743889SChristoph Lameter err = -EPERM; 13733268c63eSChristoph Lameter goto out_put; 137439743889SChristoph Lameter } 137539743889SChristoph Lameter 137601f13bd6SLai Jiangshan if (!nodes_subset(*new, node_states[N_MEMORY])) { 13773b42d28bSChristoph Lameter err = -EINVAL; 13783268c63eSChristoph Lameter goto out_put; 13793b42d28bSChristoph Lameter } 13803b42d28bSChristoph Lameter 138186c3a764SDavid Quigley err = security_task_movememory(task); 138286c3a764SDavid Quigley if (err) 13833268c63eSChristoph Lameter goto out_put; 138486c3a764SDavid Quigley 13853268c63eSChristoph Lameter mm = get_task_mm(task); 13863268c63eSChristoph Lameter put_task_struct(task); 1387f2a9ef88SSasha Levin 1388f2a9ef88SSasha Levin if (!mm) { 1389f2a9ef88SSasha Levin err = -EINVAL; 1390f2a9ef88SSasha Levin goto out; 1391f2a9ef88SSasha Levin } 1392f2a9ef88SSasha Levin 1393596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 139474c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 13953268c63eSChristoph Lameter 139639743889SChristoph Lameter mmput(mm); 13973268c63eSChristoph Lameter out: 1398596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1399596d7cfaSKOSAKI Motohiro 140039743889SChristoph Lameter return err; 14013268c63eSChristoph Lameter 14023268c63eSChristoph Lameter out_put: 14033268c63eSChristoph Lameter put_task_struct(task); 14043268c63eSChristoph Lameter goto out; 14053268c63eSChristoph Lameter 140639743889SChristoph Lameter } 140739743889SChristoph Lameter 140839743889SChristoph Lameter 14098bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1410938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1411938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1412938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 14138bccd85fSChristoph Lameter { 1414dbcb0f19SAdrian Bunk int err; 1415dbcb0f19SAdrian Bunk int uninitialized_var(pval); 14168bccd85fSChristoph Lameter nodemask_t nodes; 14178bccd85fSChristoph Lameter 14188bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 14198bccd85fSChristoph Lameter return -EINVAL; 14208bccd85fSChristoph Lameter 14218bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 14228bccd85fSChristoph Lameter 14238bccd85fSChristoph Lameter if (err) 14248bccd85fSChristoph Lameter return err; 14258bccd85fSChristoph Lameter 14268bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 14278bccd85fSChristoph Lameter return -EFAULT; 14288bccd85fSChristoph Lameter 14298bccd85fSChristoph Lameter if (nmask) 14308bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 14318bccd85fSChristoph Lameter 14328bccd85fSChristoph Lameter return err; 14338bccd85fSChristoph Lameter } 14348bccd85fSChristoph Lameter 14351da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 14361da177e4SLinus Torvalds 1437c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1438c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1439c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1440c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 14411da177e4SLinus Torvalds { 14421da177e4SLinus Torvalds long err; 14431da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14441da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 14451da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 14461da177e4SLinus Torvalds 14471da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 14481da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 14491da177e4SLinus Torvalds 14501da177e4SLinus Torvalds if (nmask) 14511da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 14521da177e4SLinus Torvalds 14531da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 14541da177e4SLinus Torvalds 14551da177e4SLinus Torvalds if (!err && nmask) { 14562bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 14572bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 14582bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 14591da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 14601da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 14611da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 14621da177e4SLinus Torvalds } 14631da177e4SLinus Torvalds 14641da177e4SLinus Torvalds return err; 14651da177e4SLinus Torvalds } 14661da177e4SLinus Torvalds 1467c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1468c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 14691da177e4SLinus Torvalds { 14701da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14711da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 14721da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 14731da177e4SLinus Torvalds 14741da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 14751da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 14761da177e4SLinus Torvalds 14771da177e4SLinus Torvalds if (nmask) { 1478cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 14791da177e4SLinus Torvalds return -EFAULT; 1480cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1481cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1482cf01fb99SChris Salls return -EFAULT; 1483cf01fb99SChris Salls } 14841da177e4SLinus Torvalds 14851da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 14861da177e4SLinus Torvalds } 14871da177e4SLinus Torvalds 1488c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1489c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1490c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 14911da177e4SLinus Torvalds { 14921da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14931da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1494dfcd3c0dSAndi Kleen nodemask_t bm; 14951da177e4SLinus Torvalds 14961da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 14971da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 14981da177e4SLinus Torvalds 14991da177e4SLinus Torvalds if (nmask) { 1500cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 15011da177e4SLinus Torvalds return -EFAULT; 1502cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1503cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1504cf01fb99SChris Salls return -EFAULT; 1505cf01fb99SChris Salls } 15061da177e4SLinus Torvalds 15071da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 15081da177e4SLinus Torvalds } 15091da177e4SLinus Torvalds 15101da177e4SLinus Torvalds #endif 15111da177e4SLinus Torvalds 151274d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 151374d2c3a0SOleg Nesterov unsigned long addr) 15141da177e4SLinus Torvalds { 15158d90274bSOleg Nesterov struct mempolicy *pol = NULL; 15161da177e4SLinus Torvalds 15171da177e4SLinus Torvalds if (vma) { 1518480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 15198d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 152000442ad0SMel Gorman } else if (vma->vm_policy) { 15211da177e4SLinus Torvalds pol = vma->vm_policy; 152200442ad0SMel Gorman 152300442ad0SMel Gorman /* 152400442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 152500442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 152600442ad0SMel Gorman * count on these policies which will be dropped by 152700442ad0SMel Gorman * mpol_cond_put() later 152800442ad0SMel Gorman */ 152900442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 153000442ad0SMel Gorman mpol_get(pol); 153100442ad0SMel Gorman } 15321da177e4SLinus Torvalds } 1533f15ca78eSOleg Nesterov 153474d2c3a0SOleg Nesterov return pol; 153574d2c3a0SOleg Nesterov } 153674d2c3a0SOleg Nesterov 153774d2c3a0SOleg Nesterov /* 1538dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 153974d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 154074d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 154174d2c3a0SOleg Nesterov * 154274d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1543dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 154474d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 154574d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 154674d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 154774d2c3a0SOleg Nesterov * extra reference for shared policies. 154874d2c3a0SOleg Nesterov */ 1549dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1550dd6eecb9SOleg Nesterov unsigned long addr) 155174d2c3a0SOleg Nesterov { 155274d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 155374d2c3a0SOleg Nesterov 15548d90274bSOleg Nesterov if (!pol) 1555dd6eecb9SOleg Nesterov pol = get_task_policy(current); 15568d90274bSOleg Nesterov 15571da177e4SLinus Torvalds return pol; 15581da177e4SLinus Torvalds } 15591da177e4SLinus Torvalds 15606b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1561fc314724SMel Gorman { 15626b6482bbSOleg Nesterov struct mempolicy *pol; 1563f15ca78eSOleg Nesterov 1564fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1565fc314724SMel Gorman bool ret = false; 1566fc314724SMel Gorman 1567fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1568fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1569fc314724SMel Gorman ret = true; 1570fc314724SMel Gorman mpol_cond_put(pol); 1571fc314724SMel Gorman 1572fc314724SMel Gorman return ret; 15738d90274bSOleg Nesterov } 15748d90274bSOleg Nesterov 1575fc314724SMel Gorman pol = vma->vm_policy; 15768d90274bSOleg Nesterov if (!pol) 15776b6482bbSOleg Nesterov pol = get_task_policy(current); 1578fc314724SMel Gorman 1579fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1580fc314724SMel Gorman } 1581fc314724SMel Gorman 1582d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1583d3eb1570SLai Jiangshan { 1584d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1585d3eb1570SLai Jiangshan 1586d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1587d3eb1570SLai Jiangshan 1588d3eb1570SLai Jiangshan /* 1589d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1590d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1591d3eb1570SLai Jiangshan * 1592d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1593d3eb1570SLai Jiangshan * so if the following test faile, it implies 1594d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1595d3eb1570SLai Jiangshan */ 1596d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1597d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1598d3eb1570SLai Jiangshan 1599d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1600d3eb1570SLai Jiangshan } 1601d3eb1570SLai Jiangshan 160252cd3b07SLee Schermerhorn /* 160352cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 160452cd3b07SLee Schermerhorn * page allocation 160552cd3b07SLee Schermerhorn */ 160652cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 160719770b32SMel Gorman { 160819770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 160945c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1610d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 161119770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 161219770b32SMel Gorman return &policy->v.nodes; 161319770b32SMel Gorman 161419770b32SMel Gorman return NULL; 161519770b32SMel Gorman } 161619770b32SMel Gorman 161704ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */ 161804ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy, 16192f5f9486SAndi Kleen int nd) 16201da177e4SLinus Torvalds { 16216d840958SMichal Hocko if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) 16221da177e4SLinus Torvalds nd = policy->v.preferred_node; 16236d840958SMichal Hocko else { 162419770b32SMel Gorman /* 16256d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 16266d840958SMichal Hocko * because we might easily break the expectation to stay on the 16276d840958SMichal Hocko * requested node and not break the policy. 162819770b32SMel Gorman */ 16296d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 16301da177e4SLinus Torvalds } 16316d840958SMichal Hocko 163204ec6264SVlastimil Babka return nd; 16331da177e4SLinus Torvalds } 16341da177e4SLinus Torvalds 16351da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 16361da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 16371da177e4SLinus Torvalds { 163845816682SVlastimil Babka unsigned next; 16391da177e4SLinus Torvalds struct task_struct *me = current; 16401da177e4SLinus Torvalds 164145816682SVlastimil Babka next = next_node_in(me->il_prev, policy->v.nodes); 1642f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 164345816682SVlastimil Babka me->il_prev = next; 164445816682SVlastimil Babka return next; 16451da177e4SLinus Torvalds } 16461da177e4SLinus Torvalds 1647dc85da15SChristoph Lameter /* 1648dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1649dc85da15SChristoph Lameter * next slab entry. 1650dc85da15SChristoph Lameter */ 16512a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1652dc85da15SChristoph Lameter { 1653e7b691b0SAndi Kleen struct mempolicy *policy; 16542a389610SDavid Rientjes int node = numa_mem_id(); 1655e7b691b0SAndi Kleen 1656e7b691b0SAndi Kleen if (in_interrupt()) 16572a389610SDavid Rientjes return node; 1658e7b691b0SAndi Kleen 1659e7b691b0SAndi Kleen policy = current->mempolicy; 1660fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 16612a389610SDavid Rientjes return node; 1662765c4507SChristoph Lameter 1663bea904d5SLee Schermerhorn switch (policy->mode) { 1664bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1665fc36b8d3SLee Schermerhorn /* 1666fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1667fc36b8d3SLee Schermerhorn */ 1668bea904d5SLee Schermerhorn return policy->v.preferred_node; 1669bea904d5SLee Schermerhorn 1670dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1671dc85da15SChristoph Lameter return interleave_nodes(policy); 1672dc85da15SChristoph Lameter 1673dd1a239fSMel Gorman case MPOL_BIND: { 1674c33d6c06SMel Gorman struct zoneref *z; 1675c33d6c06SMel Gorman 1676dc85da15SChristoph Lameter /* 1677dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1678dc85da15SChristoph Lameter * first node. 1679dc85da15SChristoph Lameter */ 168019770b32SMel Gorman struct zonelist *zonelist; 168119770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1682c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1683c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1684c33d6c06SMel Gorman &policy->v.nodes); 1685c33d6c06SMel Gorman return z->zone ? z->zone->node : node; 1686dd1a239fSMel Gorman } 1687dc85da15SChristoph Lameter 1688dc85da15SChristoph Lameter default: 1689bea904d5SLee Schermerhorn BUG(); 1690dc85da15SChristoph Lameter } 1691dc85da15SChristoph Lameter } 1692dc85da15SChristoph Lameter 1693fee83b3aSAndrew Morton /* 1694fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1695fee83b3aSAndrew Morton * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1696fee83b3aSAndrew Morton * number of present nodes. 1697fee83b3aSAndrew Morton */ 16981da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 1699fee83b3aSAndrew Morton struct vm_area_struct *vma, unsigned long n) 17001da177e4SLinus Torvalds { 1701dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1702f5b087b5SDavid Rientjes unsigned target; 1703fee83b3aSAndrew Morton int i; 1704fee83b3aSAndrew Morton int nid; 17051da177e4SLinus Torvalds 1706f5b087b5SDavid Rientjes if (!nnodes) 1707f5b087b5SDavid Rientjes return numa_node_id(); 1708fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1709fee83b3aSAndrew Morton nid = first_node(pol->v.nodes); 1710fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1711dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 17121da177e4SLinus Torvalds return nid; 17131da177e4SLinus Torvalds } 17141da177e4SLinus Torvalds 17155da7ca86SChristoph Lameter /* Determine a node number for interleave */ 17165da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 17175da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 17185da7ca86SChristoph Lameter { 17195da7ca86SChristoph Lameter if (vma) { 17205da7ca86SChristoph Lameter unsigned long off; 17215da7ca86SChristoph Lameter 17223b98b087SNishanth Aravamudan /* 17233b98b087SNishanth Aravamudan * for small pages, there is no difference between 17243b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 17253b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 17263b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 17273b98b087SNishanth Aravamudan * a useful offset. 17283b98b087SNishanth Aravamudan */ 17293b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 17303b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 17315da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 17325da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 17335da7ca86SChristoph Lameter } else 17345da7ca86SChristoph Lameter return interleave_nodes(pol); 17355da7ca86SChristoph Lameter } 17365da7ca86SChristoph Lameter 173700ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1738480eccf9SLee Schermerhorn /* 173904ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 1740b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1741b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1742b46e14acSFabian Frederick * @gfp_flags: for requested zone 1743b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1744b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1745480eccf9SLee Schermerhorn * 174604ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 174752cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 174852cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 174952cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1750c0ff7453SMiao Xie * 1751d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 1752480eccf9SLee Schermerhorn */ 175304ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 175404ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 17555da7ca86SChristoph Lameter { 175604ec6264SVlastimil Babka int nid; 17575da7ca86SChristoph Lameter 1758dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 175919770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 17605da7ca86SChristoph Lameter 176152cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 176204ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 176304ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 176452cd3b07SLee Schermerhorn } else { 176504ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 176652cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 176752cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1768480eccf9SLee Schermerhorn } 176904ec6264SVlastimil Babka return nid; 17705da7ca86SChristoph Lameter } 177106808b08SLee Schermerhorn 177206808b08SLee Schermerhorn /* 177306808b08SLee Schermerhorn * init_nodemask_of_mempolicy 177406808b08SLee Schermerhorn * 177506808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 177606808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 177706808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 177806808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 177906808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 178006808b08SLee Schermerhorn * of non-default mempolicy. 178106808b08SLee Schermerhorn * 178206808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 178306808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 178406808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 178506808b08SLee Schermerhorn * 178606808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 178706808b08SLee Schermerhorn */ 178806808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 178906808b08SLee Schermerhorn { 179006808b08SLee Schermerhorn struct mempolicy *mempolicy; 179106808b08SLee Schermerhorn int nid; 179206808b08SLee Schermerhorn 179306808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 179406808b08SLee Schermerhorn return false; 179506808b08SLee Schermerhorn 1796c0ff7453SMiao Xie task_lock(current); 179706808b08SLee Schermerhorn mempolicy = current->mempolicy; 179806808b08SLee Schermerhorn switch (mempolicy->mode) { 179906808b08SLee Schermerhorn case MPOL_PREFERRED: 180006808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 180106808b08SLee Schermerhorn nid = numa_node_id(); 180206808b08SLee Schermerhorn else 180306808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 180406808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 180506808b08SLee Schermerhorn break; 180606808b08SLee Schermerhorn 180706808b08SLee Schermerhorn case MPOL_BIND: 180806808b08SLee Schermerhorn /* Fall through */ 180906808b08SLee Schermerhorn case MPOL_INTERLEAVE: 181006808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 181106808b08SLee Schermerhorn break; 181206808b08SLee Schermerhorn 181306808b08SLee Schermerhorn default: 181406808b08SLee Schermerhorn BUG(); 181506808b08SLee Schermerhorn } 1816c0ff7453SMiao Xie task_unlock(current); 181706808b08SLee Schermerhorn 181806808b08SLee Schermerhorn return true; 181906808b08SLee Schermerhorn } 182000ac59adSChen, Kenneth W #endif 18215da7ca86SChristoph Lameter 18226f48d0ebSDavid Rientjes /* 18236f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 18246f48d0ebSDavid Rientjes * 18256f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 18266f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 18276f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 18286f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 18296f48d0ebSDavid Rientjes * 18306f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 18316f48d0ebSDavid Rientjes */ 18326f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 18336f48d0ebSDavid Rientjes const nodemask_t *mask) 18346f48d0ebSDavid Rientjes { 18356f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 18366f48d0ebSDavid Rientjes bool ret = true; 18376f48d0ebSDavid Rientjes 18386f48d0ebSDavid Rientjes if (!mask) 18396f48d0ebSDavid Rientjes return ret; 18406f48d0ebSDavid Rientjes task_lock(tsk); 18416f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 18426f48d0ebSDavid Rientjes if (!mempolicy) 18436f48d0ebSDavid Rientjes goto out; 18446f48d0ebSDavid Rientjes 18456f48d0ebSDavid Rientjes switch (mempolicy->mode) { 18466f48d0ebSDavid Rientjes case MPOL_PREFERRED: 18476f48d0ebSDavid Rientjes /* 18486f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 18496f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 18506f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 18516f48d0ebSDavid Rientjes * nodes in mask. 18526f48d0ebSDavid Rientjes */ 18536f48d0ebSDavid Rientjes break; 18546f48d0ebSDavid Rientjes case MPOL_BIND: 18556f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 18566f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 18576f48d0ebSDavid Rientjes break; 18586f48d0ebSDavid Rientjes default: 18596f48d0ebSDavid Rientjes BUG(); 18606f48d0ebSDavid Rientjes } 18616f48d0ebSDavid Rientjes out: 18626f48d0ebSDavid Rientjes task_unlock(tsk); 18636f48d0ebSDavid Rientjes return ret; 18646f48d0ebSDavid Rientjes } 18656f48d0ebSDavid Rientjes 18661da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 18671da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1868662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1869662f3a0bSAndi Kleen unsigned nid) 18701da177e4SLinus Torvalds { 18711da177e4SLinus Torvalds struct page *page; 18721da177e4SLinus Torvalds 187304ec6264SVlastimil Babka page = __alloc_pages(gfp, order, nid); 187404ec6264SVlastimil Babka if (page && page_to_nid(page) == nid) 1875ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 18761da177e4SLinus Torvalds return page; 18771da177e4SLinus Torvalds } 18781da177e4SLinus Torvalds 18791da177e4SLinus Torvalds /** 18800bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 18811da177e4SLinus Torvalds * 18821da177e4SLinus Torvalds * @gfp: 18831da177e4SLinus Torvalds * %GFP_USER user allocation. 18841da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 18851da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 18861da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 18871da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 18881da177e4SLinus Torvalds * 18890bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 18901da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 18911da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 1892be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 1893be97a41bSVlastimil Babka * @hugepage: for hugepages try only the preferred node if possible 18941da177e4SLinus Torvalds * 18951da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 18961da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 18971da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 18981da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 1899be97a41bSVlastimil Babka * all allocations for pages that will be mapped into user space. Returns 1900be97a41bSVlastimil Babka * NULL when no page can be allocated. 19011da177e4SLinus Torvalds */ 19021da177e4SLinus Torvalds struct page * 19030bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1904be97a41bSVlastimil Babka unsigned long addr, int node, bool hugepage) 19051da177e4SLinus Torvalds { 1906cc9a6c87SMel Gorman struct mempolicy *pol; 1907c0ff7453SMiao Xie struct page *page; 190804ec6264SVlastimil Babka int preferred_nid; 1909be97a41bSVlastimil Babka nodemask_t *nmask; 19101da177e4SLinus Torvalds 1911dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 1912cc9a6c87SMel Gorman 1913be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 19141da177e4SLinus Torvalds unsigned nid; 19155da7ca86SChristoph Lameter 19168eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 191752cd3b07SLee Schermerhorn mpol_cond_put(pol); 19180bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 1919be97a41bSVlastimil Babka goto out; 19201da177e4SLinus Torvalds } 19211da177e4SLinus Torvalds 19220867a57cSVlastimil Babka if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 19230867a57cSVlastimil Babka int hpage_node = node; 19240867a57cSVlastimil Babka 19250867a57cSVlastimil Babka /* 19260867a57cSVlastimil Babka * For hugepage allocation and non-interleave policy which 19270867a57cSVlastimil Babka * allows the current node (or other explicitly preferred 19280867a57cSVlastimil Babka * node) we only try to allocate from the current/preferred 19290867a57cSVlastimil Babka * node and don't fall back to other nodes, as the cost of 19300867a57cSVlastimil Babka * remote accesses would likely offset THP benefits. 19310867a57cSVlastimil Babka * 19320867a57cSVlastimil Babka * If the policy is interleave, or does not allow the current 19330867a57cSVlastimil Babka * node in its nodemask, we allocate the standard way. 19340867a57cSVlastimil Babka */ 19350867a57cSVlastimil Babka if (pol->mode == MPOL_PREFERRED && 19360867a57cSVlastimil Babka !(pol->flags & MPOL_F_LOCAL)) 19370867a57cSVlastimil Babka hpage_node = pol->v.preferred_node; 19380867a57cSVlastimil Babka 19390867a57cSVlastimil Babka nmask = policy_nodemask(gfp, pol); 19400867a57cSVlastimil Babka if (!nmask || node_isset(hpage_node, *nmask)) { 19410867a57cSVlastimil Babka mpol_cond_put(pol); 194296db800fSVlastimil Babka page = __alloc_pages_node(hpage_node, 19430867a57cSVlastimil Babka gfp | __GFP_THISNODE, order); 19440867a57cSVlastimil Babka goto out; 19450867a57cSVlastimil Babka } 19460867a57cSVlastimil Babka } 19470867a57cSVlastimil Babka 1948077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 194904ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 195004ec6264SVlastimil Babka page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); 1951d51e9894SVlastimil Babka mpol_cond_put(pol); 1952be97a41bSVlastimil Babka out: 1953077fcf11SAneesh Kumar K.V return page; 1954077fcf11SAneesh Kumar K.V } 1955077fcf11SAneesh Kumar K.V 19561da177e4SLinus Torvalds /** 19571da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 19581da177e4SLinus Torvalds * 19591da177e4SLinus Torvalds * @gfp: 19601da177e4SLinus Torvalds * %GFP_USER user allocation, 19611da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 19621da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 19631da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 19641da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 19651da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 19661da177e4SLinus Torvalds * 19671da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 19681da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 19691da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 19701da177e4SLinus Torvalds */ 1971dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 19721da177e4SLinus Torvalds { 19738d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 1974c0ff7453SMiao Xie struct page *page; 19751da177e4SLinus Torvalds 19768d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 19778d90274bSOleg Nesterov pol = get_task_policy(current); 197852cd3b07SLee Schermerhorn 197952cd3b07SLee Schermerhorn /* 198052cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 198152cd3b07SLee Schermerhorn * nor system default_policy 198252cd3b07SLee Schermerhorn */ 198345c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 1984c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 1985c0ff7453SMiao Xie else 1986c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 198704ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 19885c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 1989cc9a6c87SMel Gorman 1990c0ff7453SMiao Xie return page; 19911da177e4SLinus Torvalds } 19921da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 19931da177e4SLinus Torvalds 1994ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 1995ef0855d3SOleg Nesterov { 1996ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 1997ef0855d3SOleg Nesterov 1998ef0855d3SOleg Nesterov if (IS_ERR(pol)) 1999ef0855d3SOleg Nesterov return PTR_ERR(pol); 2000ef0855d3SOleg Nesterov dst->vm_policy = pol; 2001ef0855d3SOleg Nesterov return 0; 2002ef0855d3SOleg Nesterov } 2003ef0855d3SOleg Nesterov 20044225399aSPaul Jackson /* 2005846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 20064225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 20074225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 20084225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 20094225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2010708c1bbcSMiao Xie * 2011708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2012708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 20134225399aSPaul Jackson */ 20144225399aSPaul Jackson 2015846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2016846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 20171da177e4SLinus Torvalds { 20181da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 20191da177e4SLinus Torvalds 20201da177e4SLinus Torvalds if (!new) 20211da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2022708c1bbcSMiao Xie 2023708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2024708c1bbcSMiao Xie if (old == current->mempolicy) { 2025708c1bbcSMiao Xie task_lock(current); 2026708c1bbcSMiao Xie *new = *old; 2027708c1bbcSMiao Xie task_unlock(current); 2028708c1bbcSMiao Xie } else 2029708c1bbcSMiao Xie *new = *old; 2030708c1bbcSMiao Xie 20314225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 20324225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2033213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 20344225399aSPaul Jackson } 20351da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 20361da177e4SLinus Torvalds return new; 20371da177e4SLinus Torvalds } 20381da177e4SLinus Torvalds 20391da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2040fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 20411da177e4SLinus Torvalds { 20421da177e4SLinus Torvalds if (!a || !b) 2043fcfb4dccSKOSAKI Motohiro return false; 204445c4745aSLee Schermerhorn if (a->mode != b->mode) 2045fcfb4dccSKOSAKI Motohiro return false; 204619800502SBob Liu if (a->flags != b->flags) 2047fcfb4dccSKOSAKI Motohiro return false; 204819800502SBob Liu if (mpol_store_user_nodemask(a)) 204919800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2050fcfb4dccSKOSAKI Motohiro return false; 205119800502SBob Liu 205245c4745aSLee Schermerhorn switch (a->mode) { 205319770b32SMel Gorman case MPOL_BIND: 205419770b32SMel Gorman /* Fall through */ 20551da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2056fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 20571da177e4SLinus Torvalds case MPOL_PREFERRED: 205875719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 20591da177e4SLinus Torvalds default: 20601da177e4SLinus Torvalds BUG(); 2061fcfb4dccSKOSAKI Motohiro return false; 20621da177e4SLinus Torvalds } 20631da177e4SLinus Torvalds } 20641da177e4SLinus Torvalds 20651da177e4SLinus Torvalds /* 20661da177e4SLinus Torvalds * Shared memory backing store policy support. 20671da177e4SLinus Torvalds * 20681da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 20691da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 20704a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 20711da177e4SLinus Torvalds * for any accesses to the tree. 20721da177e4SLinus Torvalds */ 20731da177e4SLinus Torvalds 20744a8c7bb5SNathan Zimmer /* 20754a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 20764a8c7bb5SNathan Zimmer * reading or for writing 20774a8c7bb5SNathan Zimmer */ 20781da177e4SLinus Torvalds static struct sp_node * 20791da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 20801da177e4SLinus Torvalds { 20811da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 20821da177e4SLinus Torvalds 20831da177e4SLinus Torvalds while (n) { 20841da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 20851da177e4SLinus Torvalds 20861da177e4SLinus Torvalds if (start >= p->end) 20871da177e4SLinus Torvalds n = n->rb_right; 20881da177e4SLinus Torvalds else if (end <= p->start) 20891da177e4SLinus Torvalds n = n->rb_left; 20901da177e4SLinus Torvalds else 20911da177e4SLinus Torvalds break; 20921da177e4SLinus Torvalds } 20931da177e4SLinus Torvalds if (!n) 20941da177e4SLinus Torvalds return NULL; 20951da177e4SLinus Torvalds for (;;) { 20961da177e4SLinus Torvalds struct sp_node *w = NULL; 20971da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 20981da177e4SLinus Torvalds if (!prev) 20991da177e4SLinus Torvalds break; 21001da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 21011da177e4SLinus Torvalds if (w->end <= start) 21021da177e4SLinus Torvalds break; 21031da177e4SLinus Torvalds n = prev; 21041da177e4SLinus Torvalds } 21051da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 21061da177e4SLinus Torvalds } 21071da177e4SLinus Torvalds 21084a8c7bb5SNathan Zimmer /* 21094a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 21104a8c7bb5SNathan Zimmer * writing. 21114a8c7bb5SNathan Zimmer */ 21121da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 21131da177e4SLinus Torvalds { 21141da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 21151da177e4SLinus Torvalds struct rb_node *parent = NULL; 21161da177e4SLinus Torvalds struct sp_node *nd; 21171da177e4SLinus Torvalds 21181da177e4SLinus Torvalds while (*p) { 21191da177e4SLinus Torvalds parent = *p; 21201da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 21211da177e4SLinus Torvalds if (new->start < nd->start) 21221da177e4SLinus Torvalds p = &(*p)->rb_left; 21231da177e4SLinus Torvalds else if (new->end > nd->end) 21241da177e4SLinus Torvalds p = &(*p)->rb_right; 21251da177e4SLinus Torvalds else 21261da177e4SLinus Torvalds BUG(); 21271da177e4SLinus Torvalds } 21281da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 21291da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2130140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 213145c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 21321da177e4SLinus Torvalds } 21331da177e4SLinus Torvalds 21341da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 21351da177e4SLinus Torvalds struct mempolicy * 21361da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 21371da177e4SLinus Torvalds { 21381da177e4SLinus Torvalds struct mempolicy *pol = NULL; 21391da177e4SLinus Torvalds struct sp_node *sn; 21401da177e4SLinus Torvalds 21411da177e4SLinus Torvalds if (!sp->root.rb_node) 21421da177e4SLinus Torvalds return NULL; 21434a8c7bb5SNathan Zimmer read_lock(&sp->lock); 21441da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 21451da177e4SLinus Torvalds if (sn) { 21461da177e4SLinus Torvalds mpol_get(sn->policy); 21471da177e4SLinus Torvalds pol = sn->policy; 21481da177e4SLinus Torvalds } 21494a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 21501da177e4SLinus Torvalds return pol; 21511da177e4SLinus Torvalds } 21521da177e4SLinus Torvalds 215363f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 215463f74ca2SKOSAKI Motohiro { 215563f74ca2SKOSAKI Motohiro mpol_put(n->policy); 215663f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 215763f74ca2SKOSAKI Motohiro } 215863f74ca2SKOSAKI Motohiro 2159771fb4d8SLee Schermerhorn /** 2160771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2161771fb4d8SLee Schermerhorn * 2162b46e14acSFabian Frederick * @page: page to be checked 2163b46e14acSFabian Frederick * @vma: vm area where page mapped 2164b46e14acSFabian Frederick * @addr: virtual address where page mapped 2165771fb4d8SLee Schermerhorn * 2166771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 2167771fb4d8SLee Schermerhorn * node id. 2168771fb4d8SLee Schermerhorn * 2169771fb4d8SLee Schermerhorn * Returns: 2170771fb4d8SLee Schermerhorn * -1 - not misplaced, page is in the right node 2171771fb4d8SLee Schermerhorn * node - node id where the page should be 2172771fb4d8SLee Schermerhorn * 2173771fb4d8SLee Schermerhorn * Policy determination "mimics" alloc_page_vma(). 2174771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 2175771fb4d8SLee Schermerhorn */ 2176771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2177771fb4d8SLee Schermerhorn { 2178771fb4d8SLee Schermerhorn struct mempolicy *pol; 2179c33d6c06SMel Gorman struct zoneref *z; 2180771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2181771fb4d8SLee Schermerhorn unsigned long pgoff; 218290572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 218390572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 2184771fb4d8SLee Schermerhorn int polnid = -1; 2185771fb4d8SLee Schermerhorn int ret = -1; 2186771fb4d8SLee Schermerhorn 2187771fb4d8SLee Schermerhorn BUG_ON(!vma); 2188771fb4d8SLee Schermerhorn 2189dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2190771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2191771fb4d8SLee Schermerhorn goto out; 2192771fb4d8SLee Schermerhorn 2193771fb4d8SLee Schermerhorn switch (pol->mode) { 2194771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2195771fb4d8SLee Schermerhorn BUG_ON(addr >= vma->vm_end); 2196771fb4d8SLee Schermerhorn BUG_ON(addr < vma->vm_start); 2197771fb4d8SLee Schermerhorn 2198771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2199771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2200771fb4d8SLee Schermerhorn polnid = offset_il_node(pol, vma, pgoff); 2201771fb4d8SLee Schermerhorn break; 2202771fb4d8SLee Schermerhorn 2203771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2204771fb4d8SLee Schermerhorn if (pol->flags & MPOL_F_LOCAL) 2205771fb4d8SLee Schermerhorn polnid = numa_node_id(); 2206771fb4d8SLee Schermerhorn else 2207771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2208771fb4d8SLee Schermerhorn break; 2209771fb4d8SLee Schermerhorn 2210771fb4d8SLee Schermerhorn case MPOL_BIND: 2211c33d6c06SMel Gorman 2212771fb4d8SLee Schermerhorn /* 2213771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2214771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2215771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2216771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2217771fb4d8SLee Schermerhorn */ 2218771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2219771fb4d8SLee Schermerhorn goto out; 2220c33d6c06SMel Gorman z = first_zones_zonelist( 2221771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2222771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2223c33d6c06SMel Gorman &pol->v.nodes); 2224c33d6c06SMel Gorman polnid = z->zone->node; 2225771fb4d8SLee Schermerhorn break; 2226771fb4d8SLee Schermerhorn 2227771fb4d8SLee Schermerhorn default: 2228771fb4d8SLee Schermerhorn BUG(); 2229771fb4d8SLee Schermerhorn } 22305606e387SMel Gorman 22315606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2232e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 223390572890SPeter Zijlstra polnid = thisnid; 22345606e387SMel Gorman 223510f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2236de1c9ce6SRik van Riel goto out; 2237de1c9ce6SRik van Riel } 2238e42c8ff2SMel Gorman 2239771fb4d8SLee Schermerhorn if (curnid != polnid) 2240771fb4d8SLee Schermerhorn ret = polnid; 2241771fb4d8SLee Schermerhorn out: 2242771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2243771fb4d8SLee Schermerhorn 2244771fb4d8SLee Schermerhorn return ret; 2245771fb4d8SLee Schermerhorn } 2246771fb4d8SLee Schermerhorn 2247c11600e4SDavid Rientjes /* 2248c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2249c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2250c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2251c11600e4SDavid Rientjes * policy. 2252c11600e4SDavid Rientjes */ 2253c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2254c11600e4SDavid Rientjes { 2255c11600e4SDavid Rientjes struct mempolicy *pol; 2256c11600e4SDavid Rientjes 2257c11600e4SDavid Rientjes task_lock(task); 2258c11600e4SDavid Rientjes pol = task->mempolicy; 2259c11600e4SDavid Rientjes task->mempolicy = NULL; 2260c11600e4SDavid Rientjes task_unlock(task); 2261c11600e4SDavid Rientjes mpol_put(pol); 2262c11600e4SDavid Rientjes } 2263c11600e4SDavid Rientjes 22641da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 22651da177e4SLinus Torvalds { 2266140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 22671da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 226863f74ca2SKOSAKI Motohiro sp_free(n); 22691da177e4SLinus Torvalds } 22701da177e4SLinus Torvalds 227142288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 227242288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 227342288fe3SMel Gorman { 227442288fe3SMel Gorman node->start = start; 227542288fe3SMel Gorman node->end = end; 227642288fe3SMel Gorman node->policy = pol; 227742288fe3SMel Gorman } 227842288fe3SMel Gorman 2279dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2280dbcb0f19SAdrian Bunk struct mempolicy *pol) 22811da177e4SLinus Torvalds { 2282869833f2SKOSAKI Motohiro struct sp_node *n; 2283869833f2SKOSAKI Motohiro struct mempolicy *newpol; 22841da177e4SLinus Torvalds 2285869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 22861da177e4SLinus Torvalds if (!n) 22871da177e4SLinus Torvalds return NULL; 2288869833f2SKOSAKI Motohiro 2289869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2290869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2291869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2292869833f2SKOSAKI Motohiro return NULL; 2293869833f2SKOSAKI Motohiro } 2294869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 229542288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2296869833f2SKOSAKI Motohiro 22971da177e4SLinus Torvalds return n; 22981da177e4SLinus Torvalds } 22991da177e4SLinus Torvalds 23001da177e4SLinus Torvalds /* Replace a policy range. */ 23011da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 23021da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 23031da177e4SLinus Torvalds { 2304b22d127aSMel Gorman struct sp_node *n; 230542288fe3SMel Gorman struct sp_node *n_new = NULL; 230642288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2307b22d127aSMel Gorman int ret = 0; 23081da177e4SLinus Torvalds 230942288fe3SMel Gorman restart: 23104a8c7bb5SNathan Zimmer write_lock(&sp->lock); 23111da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 23121da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 23131da177e4SLinus Torvalds while (n && n->start < end) { 23141da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 23151da177e4SLinus Torvalds if (n->start >= start) { 23161da177e4SLinus Torvalds if (n->end <= end) 23171da177e4SLinus Torvalds sp_delete(sp, n); 23181da177e4SLinus Torvalds else 23191da177e4SLinus Torvalds n->start = end; 23201da177e4SLinus Torvalds } else { 23211da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 23221da177e4SLinus Torvalds if (n->end > end) { 232342288fe3SMel Gorman if (!n_new) 232442288fe3SMel Gorman goto alloc_new; 232542288fe3SMel Gorman 232642288fe3SMel Gorman *mpol_new = *n->policy; 232742288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 23287880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 23291da177e4SLinus Torvalds n->end = start; 23305ca39575SHillf Danton sp_insert(sp, n_new); 233142288fe3SMel Gorman n_new = NULL; 233242288fe3SMel Gorman mpol_new = NULL; 23331da177e4SLinus Torvalds break; 23341da177e4SLinus Torvalds } else 23351da177e4SLinus Torvalds n->end = start; 23361da177e4SLinus Torvalds } 23371da177e4SLinus Torvalds if (!next) 23381da177e4SLinus Torvalds break; 23391da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 23401da177e4SLinus Torvalds } 23411da177e4SLinus Torvalds if (new) 23421da177e4SLinus Torvalds sp_insert(sp, new); 23434a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 234442288fe3SMel Gorman ret = 0; 234542288fe3SMel Gorman 234642288fe3SMel Gorman err_out: 234742288fe3SMel Gorman if (mpol_new) 234842288fe3SMel Gorman mpol_put(mpol_new); 234942288fe3SMel Gorman if (n_new) 235042288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 235142288fe3SMel Gorman 2352b22d127aSMel Gorman return ret; 235342288fe3SMel Gorman 235442288fe3SMel Gorman alloc_new: 23554a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 235642288fe3SMel Gorman ret = -ENOMEM; 235742288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 235842288fe3SMel Gorman if (!n_new) 235942288fe3SMel Gorman goto err_out; 236042288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 236142288fe3SMel Gorman if (!mpol_new) 236242288fe3SMel Gorman goto err_out; 236342288fe3SMel Gorman goto restart; 23641da177e4SLinus Torvalds } 23651da177e4SLinus Torvalds 236671fe804bSLee Schermerhorn /** 236771fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 236871fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 236971fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 237071fe804bSLee Schermerhorn * 237171fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 237271fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 237371fe804bSLee Schermerhorn * This must be released on exit. 23744bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 237571fe804bSLee Schermerhorn */ 237671fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 23777339ff83SRobin Holt { 237858568d2aSMiao Xie int ret; 237958568d2aSMiao Xie 238071fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 23814a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 23827339ff83SRobin Holt 238371fe804bSLee Schermerhorn if (mpol) { 23847339ff83SRobin Holt struct vm_area_struct pvma; 238571fe804bSLee Schermerhorn struct mempolicy *new; 23864bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 23877339ff83SRobin Holt 23884bfc4495SKAMEZAWA Hiroyuki if (!scratch) 23895c0c1654SLee Schermerhorn goto put_mpol; 239071fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 239171fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 239215d77835SLee Schermerhorn if (IS_ERR(new)) 23930cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 239458568d2aSMiao Xie 239558568d2aSMiao Xie task_lock(current); 23964bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 239758568d2aSMiao Xie task_unlock(current); 239815d77835SLee Schermerhorn if (ret) 23995c0c1654SLee Schermerhorn goto put_new; 240071fe804bSLee Schermerhorn 240171fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 24027339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 240371fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 240471fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 240515d77835SLee Schermerhorn 24065c0c1654SLee Schermerhorn put_new: 240771fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 24080cae3457SDan Carpenter free_scratch: 24094bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 24105c0c1654SLee Schermerhorn put_mpol: 24115c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 24127339ff83SRobin Holt } 24137339ff83SRobin Holt } 24147339ff83SRobin Holt 24151da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 24161da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 24171da177e4SLinus Torvalds { 24181da177e4SLinus Torvalds int err; 24191da177e4SLinus Torvalds struct sp_node *new = NULL; 24201da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 24211da177e4SLinus Torvalds 2422028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 24231da177e4SLinus Torvalds vma->vm_pgoff, 242445c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2425028fec41SDavid Rientjes npol ? npol->flags : -1, 242600ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 24271da177e4SLinus Torvalds 24281da177e4SLinus Torvalds if (npol) { 24291da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 24301da177e4SLinus Torvalds if (!new) 24311da177e4SLinus Torvalds return -ENOMEM; 24321da177e4SLinus Torvalds } 24331da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 24341da177e4SLinus Torvalds if (err && new) 243563f74ca2SKOSAKI Motohiro sp_free(new); 24361da177e4SLinus Torvalds return err; 24371da177e4SLinus Torvalds } 24381da177e4SLinus Torvalds 24391da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 24401da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 24411da177e4SLinus Torvalds { 24421da177e4SLinus Torvalds struct sp_node *n; 24431da177e4SLinus Torvalds struct rb_node *next; 24441da177e4SLinus Torvalds 24451da177e4SLinus Torvalds if (!p->root.rb_node) 24461da177e4SLinus Torvalds return; 24474a8c7bb5SNathan Zimmer write_lock(&p->lock); 24481da177e4SLinus Torvalds next = rb_first(&p->root); 24491da177e4SLinus Torvalds while (next) { 24501da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 24511da177e4SLinus Torvalds next = rb_next(&n->nd); 245263f74ca2SKOSAKI Motohiro sp_delete(p, n); 24531da177e4SLinus Torvalds } 24544a8c7bb5SNathan Zimmer write_unlock(&p->lock); 24551da177e4SLinus Torvalds } 24561da177e4SLinus Torvalds 24571a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2458c297663cSMel Gorman static int __initdata numabalancing_override; 24591a687c2eSMel Gorman 24601a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 24611a687c2eSMel Gorman { 24621a687c2eSMel Gorman bool numabalancing_default = false; 24631a687c2eSMel Gorman 24641a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 24651a687c2eSMel Gorman numabalancing_default = true; 24661a687c2eSMel Gorman 2467c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2468c297663cSMel Gorman if (numabalancing_override) 2469c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2470c297663cSMel Gorman 2471b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2472756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2473c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 24741a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 24751a687c2eSMel Gorman } 24761a687c2eSMel Gorman } 24771a687c2eSMel Gorman 24781a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 24791a687c2eSMel Gorman { 24801a687c2eSMel Gorman int ret = 0; 24811a687c2eSMel Gorman if (!str) 24821a687c2eSMel Gorman goto out; 24831a687c2eSMel Gorman 24841a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2485c297663cSMel Gorman numabalancing_override = 1; 24861a687c2eSMel Gorman ret = 1; 24871a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2488c297663cSMel Gorman numabalancing_override = -1; 24891a687c2eSMel Gorman ret = 1; 24901a687c2eSMel Gorman } 24911a687c2eSMel Gorman out: 24921a687c2eSMel Gorman if (!ret) 24934a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 24941a687c2eSMel Gorman 24951a687c2eSMel Gorman return ret; 24961a687c2eSMel Gorman } 24971a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 24981a687c2eSMel Gorman #else 24991a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 25001a687c2eSMel Gorman { 25011a687c2eSMel Gorman } 25021a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 25031a687c2eSMel Gorman 25041da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 25051da177e4SLinus Torvalds void __init numa_policy_init(void) 25061da177e4SLinus Torvalds { 2507b71636e2SPaul Mundt nodemask_t interleave_nodes; 2508b71636e2SPaul Mundt unsigned long largest = 0; 2509b71636e2SPaul Mundt int nid, prefer = 0; 2510b71636e2SPaul Mundt 25111da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 25121da177e4SLinus Torvalds sizeof(struct mempolicy), 251320c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 25141da177e4SLinus Torvalds 25151da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 25161da177e4SLinus Torvalds sizeof(struct sp_node), 251720c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 25181da177e4SLinus Torvalds 25195606e387SMel Gorman for_each_node(nid) { 25205606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 25215606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 25225606e387SMel Gorman .mode = MPOL_PREFERRED, 25235606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 25245606e387SMel Gorman .v = { .preferred_node = nid, }, 25255606e387SMel Gorman }; 25265606e387SMel Gorman } 25275606e387SMel Gorman 2528b71636e2SPaul Mundt /* 2529b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2530b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2531b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2532b71636e2SPaul Mundt */ 2533b71636e2SPaul Mundt nodes_clear(interleave_nodes); 253401f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2535b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 25361da177e4SLinus Torvalds 2537b71636e2SPaul Mundt /* Preserve the largest node */ 2538b71636e2SPaul Mundt if (largest < total_pages) { 2539b71636e2SPaul Mundt largest = total_pages; 2540b71636e2SPaul Mundt prefer = nid; 2541b71636e2SPaul Mundt } 2542b71636e2SPaul Mundt 2543b71636e2SPaul Mundt /* Interleave this node? */ 2544b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2545b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2546b71636e2SPaul Mundt } 2547b71636e2SPaul Mundt 2548b71636e2SPaul Mundt /* All too small, use the largest */ 2549b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2550b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2551b71636e2SPaul Mundt 2552028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2553b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 25541a687c2eSMel Gorman 25551a687c2eSMel Gorman check_numabalancing_enable(); 25561da177e4SLinus Torvalds } 25571da177e4SLinus Torvalds 25588bccd85fSChristoph Lameter /* Reset policy of current process to default */ 25591da177e4SLinus Torvalds void numa_default_policy(void) 25601da177e4SLinus Torvalds { 2561028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 25621da177e4SLinus Torvalds } 256368860ec1SPaul Jackson 25644225399aSPaul Jackson /* 2565095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2566095f1fc4SLee Schermerhorn */ 2567095f1fc4SLee Schermerhorn 2568095f1fc4SLee Schermerhorn /* 2569f2a07f40SHugh Dickins * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 25701a75a6c8SChristoph Lameter */ 2571345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2572345ace9cSLee Schermerhorn { 2573345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2574345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2575345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2576345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2577d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2578345ace9cSLee Schermerhorn }; 25791a75a6c8SChristoph Lameter 2580095f1fc4SLee Schermerhorn 2581095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2582095f1fc4SLee Schermerhorn /** 2583f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2584095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 258571fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2586095f1fc4SLee Schermerhorn * 2587095f1fc4SLee Schermerhorn * Format of input: 2588095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2589095f1fc4SLee Schermerhorn * 259071fe804bSLee Schermerhorn * On success, returns 0, else 1 2591095f1fc4SLee Schermerhorn */ 2592a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2593095f1fc4SLee Schermerhorn { 259471fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2595b4652e84SLee Schermerhorn unsigned short mode; 2596f2a07f40SHugh Dickins unsigned short mode_flags; 259771fe804bSLee Schermerhorn nodemask_t nodes; 2598095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2599095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2600095f1fc4SLee Schermerhorn int err = 1; 2601095f1fc4SLee Schermerhorn 2602095f1fc4SLee Schermerhorn if (nodelist) { 2603095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2604095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 260571fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2606095f1fc4SLee Schermerhorn goto out; 260701f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2608095f1fc4SLee Schermerhorn goto out; 260971fe804bSLee Schermerhorn } else 261071fe804bSLee Schermerhorn nodes_clear(nodes); 261171fe804bSLee Schermerhorn 2612095f1fc4SLee Schermerhorn if (flags) 2613095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2614095f1fc4SLee Schermerhorn 2615479e2802SPeter Zijlstra for (mode = 0; mode < MPOL_MAX; mode++) { 2616345ace9cSLee Schermerhorn if (!strcmp(str, policy_modes[mode])) { 2617095f1fc4SLee Schermerhorn break; 2618095f1fc4SLee Schermerhorn } 2619095f1fc4SLee Schermerhorn } 2620a720094dSMel Gorman if (mode >= MPOL_MAX) 2621095f1fc4SLee Schermerhorn goto out; 2622095f1fc4SLee Schermerhorn 262371fe804bSLee Schermerhorn switch (mode) { 2624095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 262571fe804bSLee Schermerhorn /* 262671fe804bSLee Schermerhorn * Insist on a nodelist of one node only 262771fe804bSLee Schermerhorn */ 2628095f1fc4SLee Schermerhorn if (nodelist) { 2629095f1fc4SLee Schermerhorn char *rest = nodelist; 2630095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2631095f1fc4SLee Schermerhorn rest++; 2632926f2ae0SKOSAKI Motohiro if (*rest) 2633926f2ae0SKOSAKI Motohiro goto out; 2634095f1fc4SLee Schermerhorn } 2635095f1fc4SLee Schermerhorn break; 2636095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2637095f1fc4SLee Schermerhorn /* 2638095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2639095f1fc4SLee Schermerhorn */ 2640095f1fc4SLee Schermerhorn if (!nodelist) 264101f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 26423f226aa1SLee Schermerhorn break; 264371fe804bSLee Schermerhorn case MPOL_LOCAL: 26443f226aa1SLee Schermerhorn /* 264571fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 26463f226aa1SLee Schermerhorn */ 264771fe804bSLee Schermerhorn if (nodelist) 26483f226aa1SLee Schermerhorn goto out; 264971fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 26503f226aa1SLee Schermerhorn break; 2651413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2652413b43deSRavikiran G Thirumalai /* 2653413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2654413b43deSRavikiran G Thirumalai */ 2655413b43deSRavikiran G Thirumalai if (!nodelist) 2656413b43deSRavikiran G Thirumalai err = 0; 2657413b43deSRavikiran G Thirumalai goto out; 2658d69b2e63SKOSAKI Motohiro case MPOL_BIND: 265971fe804bSLee Schermerhorn /* 2660d69b2e63SKOSAKI Motohiro * Insist on a nodelist 266171fe804bSLee Schermerhorn */ 2662d69b2e63SKOSAKI Motohiro if (!nodelist) 2663d69b2e63SKOSAKI Motohiro goto out; 2664095f1fc4SLee Schermerhorn } 2665095f1fc4SLee Schermerhorn 266671fe804bSLee Schermerhorn mode_flags = 0; 2667095f1fc4SLee Schermerhorn if (flags) { 2668095f1fc4SLee Schermerhorn /* 2669095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2670095f1fc4SLee Schermerhorn * mode flags. 2671095f1fc4SLee Schermerhorn */ 2672095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 267371fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2674095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 267571fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2676095f1fc4SLee Schermerhorn else 2677926f2ae0SKOSAKI Motohiro goto out; 2678095f1fc4SLee Schermerhorn } 267971fe804bSLee Schermerhorn 268071fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 268171fe804bSLee Schermerhorn if (IS_ERR(new)) 2682926f2ae0SKOSAKI Motohiro goto out; 2683926f2ae0SKOSAKI Motohiro 2684f2a07f40SHugh Dickins /* 2685f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2686f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2687f2a07f40SHugh Dickins */ 2688f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2689f2a07f40SHugh Dickins new->v.nodes = nodes; 2690f2a07f40SHugh Dickins else if (nodelist) 2691f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2692f2a07f40SHugh Dickins else 2693f2a07f40SHugh Dickins new->flags |= MPOL_F_LOCAL; 2694f2a07f40SHugh Dickins 2695f2a07f40SHugh Dickins /* 2696f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2697f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2698f2a07f40SHugh Dickins */ 2699e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2700f2a07f40SHugh Dickins 2701926f2ae0SKOSAKI Motohiro err = 0; 270271fe804bSLee Schermerhorn 2703095f1fc4SLee Schermerhorn out: 2704095f1fc4SLee Schermerhorn /* Restore string for error message */ 2705095f1fc4SLee Schermerhorn if (nodelist) 2706095f1fc4SLee Schermerhorn *--nodelist = ':'; 2707095f1fc4SLee Schermerhorn if (flags) 2708095f1fc4SLee Schermerhorn *--flags = '='; 270971fe804bSLee Schermerhorn if (!err) 271071fe804bSLee Schermerhorn *mpol = new; 2711095f1fc4SLee Schermerhorn return err; 2712095f1fc4SLee Schermerhorn } 2713095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2714095f1fc4SLee Schermerhorn 271571fe804bSLee Schermerhorn /** 271671fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 271771fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 271871fe804bSLee Schermerhorn * @maxlen: length of @buffer 271971fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 272071fe804bSLee Schermerhorn * 2721948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2722948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2723948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 27241a75a6c8SChristoph Lameter */ 2725948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 27261a75a6c8SChristoph Lameter { 27271a75a6c8SChristoph Lameter char *p = buffer; 2728948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2729948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2730948927eeSDavid Rientjes unsigned short flags = 0; 27311a75a6c8SChristoph Lameter 27328790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2733bea904d5SLee Schermerhorn mode = pol->mode; 2734948927eeSDavid Rientjes flags = pol->flags; 2735948927eeSDavid Rientjes } 2736bea904d5SLee Schermerhorn 27371a75a6c8SChristoph Lameter switch (mode) { 27381a75a6c8SChristoph Lameter case MPOL_DEFAULT: 27391a75a6c8SChristoph Lameter break; 27401a75a6c8SChristoph Lameter case MPOL_PREFERRED: 2741fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 2742f2a07f40SHugh Dickins mode = MPOL_LOCAL; 274353f2556bSLee Schermerhorn else 2744fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 27451a75a6c8SChristoph Lameter break; 27461a75a6c8SChristoph Lameter case MPOL_BIND: 27471a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 27481a75a6c8SChristoph Lameter nodes = pol->v.nodes; 27491a75a6c8SChristoph Lameter break; 27501a75a6c8SChristoph Lameter default: 2751948927eeSDavid Rientjes WARN_ON_ONCE(1); 2752948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 2753948927eeSDavid Rientjes return; 27541a75a6c8SChristoph Lameter } 27551a75a6c8SChristoph Lameter 2756b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 27571a75a6c8SChristoph Lameter 2758fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2759948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 2760f5b087b5SDavid Rientjes 27612291990aSLee Schermerhorn /* 27622291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 27632291990aSLee Schermerhorn */ 2764f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 27652291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 27662291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 27672291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2768f5b087b5SDavid Rientjes } 2769f5b087b5SDavid Rientjes 27709e763e0fSTejun Heo if (!nodes_empty(nodes)) 27719e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 27729e763e0fSTejun Heo nodemask_pr_args(&nodes)); 27731a75a6c8SChristoph Lameter } 2774