11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 291da177e4SLinus Torvalds * As a special case node -1 here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds could replace all the switch()es with a mempolicy_ops structure. 671da177e4SLinus Torvalds */ 681da177e4SLinus Torvalds 691da177e4SLinus Torvalds #include <linux/mempolicy.h> 701da177e4SLinus Torvalds #include <linux/mm.h> 711da177e4SLinus Torvalds #include <linux/highmem.h> 721da177e4SLinus Torvalds #include <linux/hugetlb.h> 731da177e4SLinus Torvalds #include <linux/kernel.h> 741da177e4SLinus Torvalds #include <linux/sched.h> 751da177e4SLinus Torvalds #include <linux/mm.h> 761da177e4SLinus Torvalds #include <linux/nodemask.h> 771da177e4SLinus Torvalds #include <linux/cpuset.h> 781da177e4SLinus Torvalds #include <linux/gfp.h> 791da177e4SLinus Torvalds #include <linux/slab.h> 801da177e4SLinus Torvalds #include <linux/string.h> 811da177e4SLinus Torvalds #include <linux/module.h> 821da177e4SLinus Torvalds #include <linux/interrupt.h> 831da177e4SLinus Torvalds #include <linux/init.h> 841da177e4SLinus Torvalds #include <linux/compat.h> 851da177e4SLinus Torvalds #include <linux/mempolicy.h> 86dc9aa5b9SChristoph Lameter #include <linux/swap.h> 871a75a6c8SChristoph Lameter #include <linux/seq_file.h> 881a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 89b20a3503SChristoph Lameter #include <linux/migrate.h> 9095a402c3SChristoph Lameter #include <linux/rmap.h> 9186c3a764SDavid Quigley #include <linux/security.h> 92dc9aa5b9SChristoph Lameter 931da177e4SLinus Torvalds #include <asm/tlbflush.h> 941da177e4SLinus Torvalds #include <asm/uaccess.h> 951da177e4SLinus Torvalds 9638e35860SChristoph Lameter /* Internal flags */ 97dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 9838e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 991a75a6c8SChristoph Lameter #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ 100dc9aa5b9SChristoph Lameter 101fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 102fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1031da177e4SLinus Torvalds 1041da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1051da177e4SLinus Torvalds policied. */ 1066267276fSChristoph Lameter enum zone_type policy_zone = 0; 1071da177e4SLinus Torvalds 108d42c6997SAndi Kleen struct mempolicy default_policy = { 1091da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 1101da177e4SLinus Torvalds .policy = MPOL_DEFAULT, 1111da177e4SLinus Torvalds }; 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds /* Do sanity checking on a policy */ 114dfcd3c0dSAndi Kleen static int mpol_check_policy(int mode, nodemask_t *nodes) 1151da177e4SLinus Torvalds { 116dfcd3c0dSAndi Kleen int empty = nodes_empty(*nodes); 1171da177e4SLinus Torvalds 1181da177e4SLinus Torvalds switch (mode) { 1191da177e4SLinus Torvalds case MPOL_DEFAULT: 1201da177e4SLinus Torvalds if (!empty) 1211da177e4SLinus Torvalds return -EINVAL; 1221da177e4SLinus Torvalds break; 1231da177e4SLinus Torvalds case MPOL_BIND: 1241da177e4SLinus Torvalds case MPOL_INTERLEAVE: 1251da177e4SLinus Torvalds /* Preferred will only use the first bit, but allow 1261da177e4SLinus Torvalds more for now. */ 1271da177e4SLinus Torvalds if (empty) 1281da177e4SLinus Torvalds return -EINVAL; 1291da177e4SLinus Torvalds break; 1301da177e4SLinus Torvalds } 131dfcd3c0dSAndi Kleen return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL; 1321da177e4SLinus Torvalds } 133dd942ae3SAndi Kleen 1341da177e4SLinus Torvalds /* Generate a custom zonelist for the BIND policy. */ 135dfcd3c0dSAndi Kleen static struct zonelist *bind_zonelist(nodemask_t *nodes) 1361da177e4SLinus Torvalds { 1371da177e4SLinus Torvalds struct zonelist *zl; 1382f6726e5SChristoph Lameter int num, max, nd; 1392f6726e5SChristoph Lameter enum zone_type k; 1401da177e4SLinus Torvalds 141dfcd3c0dSAndi Kleen max = 1 + MAX_NR_ZONES * nodes_weight(*nodes); 1429276b1bcSPaul Jackson max++; /* space for zlcache_ptr (see mmzone.h) */ 143dd942ae3SAndi Kleen zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL); 1441da177e4SLinus Torvalds if (!zl) 1458af5e2ebSKAMEZAWA Hiroyuki return ERR_PTR(-ENOMEM); 1469276b1bcSPaul Jackson zl->zlcache_ptr = NULL; 1471da177e4SLinus Torvalds num = 0; 148dd942ae3SAndi Kleen /* First put in the highest zones from all nodes, then all the next 149dd942ae3SAndi Kleen lower zones etc. Avoid empty zones because the memory allocator 150dd942ae3SAndi Kleen doesn't like them. If you implement node hot removal you 151dd942ae3SAndi Kleen have to fix that. */ 152*b377fd39SMel Gorman k = MAX_NR_ZONES - 1; 1532f6726e5SChristoph Lameter while (1) { 154dd942ae3SAndi Kleen for_each_node_mask(nd, *nodes) { 155dd942ae3SAndi Kleen struct zone *z = &NODE_DATA(nd)->node_zones[k]; 156dd942ae3SAndi Kleen if (z->present_pages > 0) 157dd942ae3SAndi Kleen zl->zones[num++] = z; 158dd942ae3SAndi Kleen } 1592f6726e5SChristoph Lameter if (k == 0) 1602f6726e5SChristoph Lameter break; 1612f6726e5SChristoph Lameter k--; 162dd942ae3SAndi Kleen } 1638af5e2ebSKAMEZAWA Hiroyuki if (num == 0) { 1648af5e2ebSKAMEZAWA Hiroyuki kfree(zl); 1658af5e2ebSKAMEZAWA Hiroyuki return ERR_PTR(-EINVAL); 1668af5e2ebSKAMEZAWA Hiroyuki } 1671da177e4SLinus Torvalds zl->zones[num] = NULL; 1681da177e4SLinus Torvalds return zl; 1691da177e4SLinus Torvalds } 1701da177e4SLinus Torvalds 1711da177e4SLinus Torvalds /* Create a new policy */ 172dfcd3c0dSAndi Kleen static struct mempolicy *mpol_new(int mode, nodemask_t *nodes) 1731da177e4SLinus Torvalds { 1741da177e4SLinus Torvalds struct mempolicy *policy; 1751da177e4SLinus Torvalds 176140d5a49SPaul Mundt pr_debug("setting mode %d nodes[0] %lx\n", 177140d5a49SPaul Mundt mode, nodes ? nodes_addr(*nodes)[0] : -1); 178140d5a49SPaul Mundt 1791da177e4SLinus Torvalds if (mode == MPOL_DEFAULT) 1801da177e4SLinus Torvalds return NULL; 1811da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 1821da177e4SLinus Torvalds if (!policy) 1831da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 1841da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 1851da177e4SLinus Torvalds switch (mode) { 1861da177e4SLinus Torvalds case MPOL_INTERLEAVE: 187dfcd3c0dSAndi Kleen policy->v.nodes = *nodes; 1888f493d79SAndi Kleen if (nodes_weight(*nodes) == 0) { 1898f493d79SAndi Kleen kmem_cache_free(policy_cache, policy); 1908f493d79SAndi Kleen return ERR_PTR(-EINVAL); 1918f493d79SAndi Kleen } 1921da177e4SLinus Torvalds break; 1931da177e4SLinus Torvalds case MPOL_PREFERRED: 194dfcd3c0dSAndi Kleen policy->v.preferred_node = first_node(*nodes); 1951da177e4SLinus Torvalds if (policy->v.preferred_node >= MAX_NUMNODES) 1961da177e4SLinus Torvalds policy->v.preferred_node = -1; 1971da177e4SLinus Torvalds break; 1981da177e4SLinus Torvalds case MPOL_BIND: 1991da177e4SLinus Torvalds policy->v.zonelist = bind_zonelist(nodes); 2008af5e2ebSKAMEZAWA Hiroyuki if (IS_ERR(policy->v.zonelist)) { 2018af5e2ebSKAMEZAWA Hiroyuki void *error_code = policy->v.zonelist; 2021da177e4SLinus Torvalds kmem_cache_free(policy_cache, policy); 2038af5e2ebSKAMEZAWA Hiroyuki return error_code; 2041da177e4SLinus Torvalds } 2051da177e4SLinus Torvalds break; 2061da177e4SLinus Torvalds } 2071da177e4SLinus Torvalds policy->policy = mode; 20874cb2155SPaul Jackson policy->cpuset_mems_allowed = cpuset_mems_allowed(current); 2091da177e4SLinus Torvalds return policy; 2101da177e4SLinus Torvalds } 2111da177e4SLinus Torvalds 212397874dfSChristoph Lameter static void gather_stats(struct page *, void *, int pte_dirty); 213fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 214fc301289SChristoph Lameter unsigned long flags); 2151a75a6c8SChristoph Lameter 21638e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */ 217b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 218dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 219dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 22038e35860SChristoph Lameter void *private) 2211da177e4SLinus Torvalds { 22291612e0dSHugh Dickins pte_t *orig_pte; 22391612e0dSHugh Dickins pte_t *pte; 224705e87c0SHugh Dickins spinlock_t *ptl; 225941150a3SHugh Dickins 226705e87c0SHugh Dickins orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 22791612e0dSHugh Dickins do { 2286aab341eSLinus Torvalds struct page *page; 22925ba77c1SAndy Whitcroft int nid; 23091612e0dSHugh Dickins 23191612e0dSHugh Dickins if (!pte_present(*pte)) 23291612e0dSHugh Dickins continue; 2336aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 2346aab341eSLinus Torvalds if (!page) 23591612e0dSHugh Dickins continue; 236053837fcSNick Piggin /* 237053837fcSNick Piggin * The check for PageReserved here is important to avoid 238053837fcSNick Piggin * handling zero pages and other pages that may have been 239053837fcSNick Piggin * marked special by the system. 240053837fcSNick Piggin * 241053837fcSNick Piggin * If the PageReserved would not be checked here then f.e. 242053837fcSNick Piggin * the location of the zero page could have an influence 243053837fcSNick Piggin * on MPOL_MF_STRICT, zero pages would be counted for 244053837fcSNick Piggin * the per node stats, and there would be useless attempts 245053837fcSNick Piggin * to put zero pages on the migration list. 246053837fcSNick Piggin */ 247f4598c8bSChristoph Lameter if (PageReserved(page)) 248f4598c8bSChristoph Lameter continue; 2496aab341eSLinus Torvalds nid = page_to_nid(page); 25038e35860SChristoph Lameter if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 25138e35860SChristoph Lameter continue; 25238e35860SChristoph Lameter 2531a75a6c8SChristoph Lameter if (flags & MPOL_MF_STATS) 254397874dfSChristoph Lameter gather_stats(page, private, pte_dirty(*pte)); 255053837fcSNick Piggin else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 256fc301289SChristoph Lameter migrate_page_add(page, private, flags); 257dc9aa5b9SChristoph Lameter else 2581da177e4SLinus Torvalds break; 25991612e0dSHugh Dickins } while (pte++, addr += PAGE_SIZE, addr != end); 260705e87c0SHugh Dickins pte_unmap_unlock(orig_pte, ptl); 26191612e0dSHugh Dickins return addr != end; 26291612e0dSHugh Dickins } 26391612e0dSHugh Dickins 264b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, 265dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 266dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 26738e35860SChristoph Lameter void *private) 26891612e0dSHugh Dickins { 26991612e0dSHugh Dickins pmd_t *pmd; 27091612e0dSHugh Dickins unsigned long next; 27191612e0dSHugh Dickins 27291612e0dSHugh Dickins pmd = pmd_offset(pud, addr); 27391612e0dSHugh Dickins do { 27491612e0dSHugh Dickins next = pmd_addr_end(addr, end); 27591612e0dSHugh Dickins if (pmd_none_or_clear_bad(pmd)) 27691612e0dSHugh Dickins continue; 277dc9aa5b9SChristoph Lameter if (check_pte_range(vma, pmd, addr, next, nodes, 27838e35860SChristoph Lameter flags, private)) 27991612e0dSHugh Dickins return -EIO; 28091612e0dSHugh Dickins } while (pmd++, addr = next, addr != end); 28191612e0dSHugh Dickins return 0; 28291612e0dSHugh Dickins } 28391612e0dSHugh Dickins 284b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 285dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 286dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 28738e35860SChristoph Lameter void *private) 28891612e0dSHugh Dickins { 28991612e0dSHugh Dickins pud_t *pud; 29091612e0dSHugh Dickins unsigned long next; 29191612e0dSHugh Dickins 29291612e0dSHugh Dickins pud = pud_offset(pgd, addr); 29391612e0dSHugh Dickins do { 29491612e0dSHugh Dickins next = pud_addr_end(addr, end); 29591612e0dSHugh Dickins if (pud_none_or_clear_bad(pud)) 29691612e0dSHugh Dickins continue; 297dc9aa5b9SChristoph Lameter if (check_pmd_range(vma, pud, addr, next, nodes, 29838e35860SChristoph Lameter flags, private)) 29991612e0dSHugh Dickins return -EIO; 30091612e0dSHugh Dickins } while (pud++, addr = next, addr != end); 30191612e0dSHugh Dickins return 0; 30291612e0dSHugh Dickins } 30391612e0dSHugh Dickins 304b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma, 305dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 306dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 30738e35860SChristoph Lameter void *private) 30891612e0dSHugh Dickins { 30991612e0dSHugh Dickins pgd_t *pgd; 31091612e0dSHugh Dickins unsigned long next; 31191612e0dSHugh Dickins 312b5810039SNick Piggin pgd = pgd_offset(vma->vm_mm, addr); 31391612e0dSHugh Dickins do { 31491612e0dSHugh Dickins next = pgd_addr_end(addr, end); 31591612e0dSHugh Dickins if (pgd_none_or_clear_bad(pgd)) 31691612e0dSHugh Dickins continue; 317dc9aa5b9SChristoph Lameter if (check_pud_range(vma, pgd, addr, next, nodes, 31838e35860SChristoph Lameter flags, private)) 31991612e0dSHugh Dickins return -EIO; 32091612e0dSHugh Dickins } while (pgd++, addr = next, addr != end); 32191612e0dSHugh Dickins return 0; 3221da177e4SLinus Torvalds } 3231da177e4SLinus Torvalds 324dc9aa5b9SChristoph Lameter /* 325dc9aa5b9SChristoph Lameter * Check if all pages in a range are on a set of nodes. 326dc9aa5b9SChristoph Lameter * If pagelist != NULL then isolate pages from the LRU and 327dc9aa5b9SChristoph Lameter * put them on the pagelist. 328dc9aa5b9SChristoph Lameter */ 3291da177e4SLinus Torvalds static struct vm_area_struct * 3301da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end, 33138e35860SChristoph Lameter const nodemask_t *nodes, unsigned long flags, void *private) 3321da177e4SLinus Torvalds { 3331da177e4SLinus Torvalds int err; 3341da177e4SLinus Torvalds struct vm_area_struct *first, *vma, *prev; 3351da177e4SLinus Torvalds 33690036ee5SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 33790036ee5SChristoph Lameter 338b20a3503SChristoph Lameter err = migrate_prep(); 339b20a3503SChristoph Lameter if (err) 340b20a3503SChristoph Lameter return ERR_PTR(err); 34190036ee5SChristoph Lameter } 342053837fcSNick Piggin 3431da177e4SLinus Torvalds first = find_vma(mm, start); 3441da177e4SLinus Torvalds if (!first) 3451da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 3461da177e4SLinus Torvalds prev = NULL; 3471da177e4SLinus Torvalds for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 348dc9aa5b9SChristoph Lameter if (!(flags & MPOL_MF_DISCONTIG_OK)) { 3491da177e4SLinus Torvalds if (!vma->vm_next && vma->vm_end < end) 3501da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 3511da177e4SLinus Torvalds if (prev && prev->vm_end < vma->vm_start) 3521da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 353dc9aa5b9SChristoph Lameter } 354dc9aa5b9SChristoph Lameter if (!is_vm_hugetlb_page(vma) && 355dc9aa5b9SChristoph Lameter ((flags & MPOL_MF_STRICT) || 356dc9aa5b9SChristoph Lameter ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && 357dc9aa5b9SChristoph Lameter vma_migratable(vma)))) { 3585b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 359dc9aa5b9SChristoph Lameter 3605b952b3cSAndi Kleen if (endvma > end) 3615b952b3cSAndi Kleen endvma = end; 3625b952b3cSAndi Kleen if (vma->vm_start > start) 3635b952b3cSAndi Kleen start = vma->vm_start; 364dc9aa5b9SChristoph Lameter err = check_pgd_range(vma, start, endvma, nodes, 36538e35860SChristoph Lameter flags, private); 3661da177e4SLinus Torvalds if (err) { 3671da177e4SLinus Torvalds first = ERR_PTR(err); 3681da177e4SLinus Torvalds break; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds } 3711da177e4SLinus Torvalds prev = vma; 3721da177e4SLinus Torvalds } 3731da177e4SLinus Torvalds return first; 3741da177e4SLinus Torvalds } 3751da177e4SLinus Torvalds 3761da177e4SLinus Torvalds /* Apply policy to a single VMA */ 3771da177e4SLinus Torvalds static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new) 3781da177e4SLinus Torvalds { 3791da177e4SLinus Torvalds int err = 0; 3801da177e4SLinus Torvalds struct mempolicy *old = vma->vm_policy; 3811da177e4SLinus Torvalds 382140d5a49SPaul Mundt pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 3831da177e4SLinus Torvalds vma->vm_start, vma->vm_end, vma->vm_pgoff, 3841da177e4SLinus Torvalds vma->vm_ops, vma->vm_file, 3851da177e4SLinus Torvalds vma->vm_ops ? vma->vm_ops->set_policy : NULL); 3861da177e4SLinus Torvalds 3871da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->set_policy) 3881da177e4SLinus Torvalds err = vma->vm_ops->set_policy(vma, new); 3891da177e4SLinus Torvalds if (!err) { 3901da177e4SLinus Torvalds mpol_get(new); 3911da177e4SLinus Torvalds vma->vm_policy = new; 3921da177e4SLinus Torvalds mpol_free(old); 3931da177e4SLinus Torvalds } 3941da177e4SLinus Torvalds return err; 3951da177e4SLinus Torvalds } 3961da177e4SLinus Torvalds 3971da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 3981da177e4SLinus Torvalds static int mbind_range(struct vm_area_struct *vma, unsigned long start, 3991da177e4SLinus Torvalds unsigned long end, struct mempolicy *new) 4001da177e4SLinus Torvalds { 4011da177e4SLinus Torvalds struct vm_area_struct *next; 4021da177e4SLinus Torvalds int err; 4031da177e4SLinus Torvalds 4041da177e4SLinus Torvalds err = 0; 4051da177e4SLinus Torvalds for (; vma && vma->vm_start < end; vma = next) { 4061da177e4SLinus Torvalds next = vma->vm_next; 4071da177e4SLinus Torvalds if (vma->vm_start < start) 4081da177e4SLinus Torvalds err = split_vma(vma->vm_mm, vma, start, 1); 4091da177e4SLinus Torvalds if (!err && vma->vm_end > end) 4101da177e4SLinus Torvalds err = split_vma(vma->vm_mm, vma, end, 0); 4111da177e4SLinus Torvalds if (!err) 4121da177e4SLinus Torvalds err = policy_vma(vma, new); 4131da177e4SLinus Torvalds if (err) 4141da177e4SLinus Torvalds break; 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds return err; 4171da177e4SLinus Torvalds } 4181da177e4SLinus Torvalds 4198bccd85fSChristoph Lameter static int contextualize_policy(int mode, nodemask_t *nodes) 4208bccd85fSChristoph Lameter { 4218bccd85fSChristoph Lameter if (!nodes) 4228bccd85fSChristoph Lameter return 0; 4238bccd85fSChristoph Lameter 424cf2a473cSPaul Jackson cpuset_update_task_memory_state(); 4255966514dSPaul Jackson if (!cpuset_nodes_subset_current_mems_allowed(*nodes)) 4265966514dSPaul Jackson return -EINVAL; 4278bccd85fSChristoph Lameter return mpol_check_policy(mode, nodes); 4288bccd85fSChristoph Lameter } 4298bccd85fSChristoph Lameter 430c61afb18SPaul Jackson 431c61afb18SPaul Jackson /* 432c61afb18SPaul Jackson * Update task->flags PF_MEMPOLICY bit: set iff non-default 433c61afb18SPaul Jackson * mempolicy. Allows more rapid checking of this (combined perhaps 434c61afb18SPaul Jackson * with other PF_* flag bits) on memory allocation hot code paths. 435c61afb18SPaul Jackson * 436c61afb18SPaul Jackson * If called from outside this file, the task 'p' should -only- be 437c61afb18SPaul Jackson * a newly forked child not yet visible on the task list, because 438c61afb18SPaul Jackson * manipulating the task flags of a visible task is not safe. 439c61afb18SPaul Jackson * 440c61afb18SPaul Jackson * The above limitation is why this routine has the funny name 441c61afb18SPaul Jackson * mpol_fix_fork_child_flag(). 442c61afb18SPaul Jackson * 443c61afb18SPaul Jackson * It is also safe to call this with a task pointer of current, 444c61afb18SPaul Jackson * which the static wrapper mpol_set_task_struct_flag() does, 445c61afb18SPaul Jackson * for use within this file. 446c61afb18SPaul Jackson */ 447c61afb18SPaul Jackson 448c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p) 449c61afb18SPaul Jackson { 450c61afb18SPaul Jackson if (p->mempolicy) 451c61afb18SPaul Jackson p->flags |= PF_MEMPOLICY; 452c61afb18SPaul Jackson else 453c61afb18SPaul Jackson p->flags &= ~PF_MEMPOLICY; 454c61afb18SPaul Jackson } 455c61afb18SPaul Jackson 456c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void) 457c61afb18SPaul Jackson { 458c61afb18SPaul Jackson mpol_fix_fork_child_flag(current); 459c61afb18SPaul Jackson } 460c61afb18SPaul Jackson 4611da177e4SLinus Torvalds /* Set the process memory policy */ 4628bccd85fSChristoph Lameter long do_set_mempolicy(int mode, nodemask_t *nodes) 4631da177e4SLinus Torvalds { 4641da177e4SLinus Torvalds struct mempolicy *new; 4651da177e4SLinus Torvalds 4668bccd85fSChristoph Lameter if (contextualize_policy(mode, nodes)) 4671da177e4SLinus Torvalds return -EINVAL; 4688bccd85fSChristoph Lameter new = mpol_new(mode, nodes); 4691da177e4SLinus Torvalds if (IS_ERR(new)) 4701da177e4SLinus Torvalds return PTR_ERR(new); 4711da177e4SLinus Torvalds mpol_free(current->mempolicy); 4721da177e4SLinus Torvalds current->mempolicy = new; 473c61afb18SPaul Jackson mpol_set_task_struct_flag(); 4741da177e4SLinus Torvalds if (new && new->policy == MPOL_INTERLEAVE) 475dfcd3c0dSAndi Kleen current->il_next = first_node(new->v.nodes); 4761da177e4SLinus Torvalds return 0; 4771da177e4SLinus Torvalds } 4781da177e4SLinus Torvalds 4791da177e4SLinus Torvalds /* Fill a zone bitmap for a policy */ 480dfcd3c0dSAndi Kleen static void get_zonemask(struct mempolicy *p, nodemask_t *nodes) 4811da177e4SLinus Torvalds { 4821da177e4SLinus Torvalds int i; 4831da177e4SLinus Torvalds 484dfcd3c0dSAndi Kleen nodes_clear(*nodes); 4851da177e4SLinus Torvalds switch (p->policy) { 4861da177e4SLinus Torvalds case MPOL_BIND: 4871da177e4SLinus Torvalds for (i = 0; p->v.zonelist->zones[i]; i++) 48889fa3024SChristoph Lameter node_set(zone_to_nid(p->v.zonelist->zones[i]), 4898bccd85fSChristoph Lameter *nodes); 4901da177e4SLinus Torvalds break; 4911da177e4SLinus Torvalds case MPOL_DEFAULT: 4921da177e4SLinus Torvalds break; 4931da177e4SLinus Torvalds case MPOL_INTERLEAVE: 494dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 4951da177e4SLinus Torvalds break; 4961da177e4SLinus Torvalds case MPOL_PREFERRED: 4971da177e4SLinus Torvalds /* or use current node instead of online map? */ 4981da177e4SLinus Torvalds if (p->v.preferred_node < 0) 499dfcd3c0dSAndi Kleen *nodes = node_online_map; 5001da177e4SLinus Torvalds else 501dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 5021da177e4SLinus Torvalds break; 5031da177e4SLinus Torvalds default: 5041da177e4SLinus Torvalds BUG(); 5051da177e4SLinus Torvalds } 5061da177e4SLinus Torvalds } 5071da177e4SLinus Torvalds 5081da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr) 5091da177e4SLinus Torvalds { 5101da177e4SLinus Torvalds struct page *p; 5111da177e4SLinus Torvalds int err; 5121da177e4SLinus Torvalds 5131da177e4SLinus Torvalds err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 5141da177e4SLinus Torvalds if (err >= 0) { 5151da177e4SLinus Torvalds err = page_to_nid(p); 5161da177e4SLinus Torvalds put_page(p); 5171da177e4SLinus Torvalds } 5181da177e4SLinus Torvalds return err; 5191da177e4SLinus Torvalds } 5201da177e4SLinus Torvalds 5211da177e4SLinus Torvalds /* Retrieve NUMA policy */ 5228bccd85fSChristoph Lameter long do_get_mempolicy(int *policy, nodemask_t *nmask, 5231da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 5241da177e4SLinus Torvalds { 5258bccd85fSChristoph Lameter int err; 5261da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 5271da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 5281da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 5291da177e4SLinus Torvalds 530cf2a473cSPaul Jackson cpuset_update_task_memory_state(); 5311da177e4SLinus Torvalds if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR)) 5321da177e4SLinus Torvalds return -EINVAL; 5331da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 5341da177e4SLinus Torvalds down_read(&mm->mmap_sem); 5351da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 5361da177e4SLinus Torvalds if (!vma) { 5371da177e4SLinus Torvalds up_read(&mm->mmap_sem); 5381da177e4SLinus Torvalds return -EFAULT; 5391da177e4SLinus Torvalds } 5401da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 5411da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 5421da177e4SLinus Torvalds else 5431da177e4SLinus Torvalds pol = vma->vm_policy; 5441da177e4SLinus Torvalds } else if (addr) 5451da177e4SLinus Torvalds return -EINVAL; 5461da177e4SLinus Torvalds 5471da177e4SLinus Torvalds if (!pol) 5481da177e4SLinus Torvalds pol = &default_policy; 5491da177e4SLinus Torvalds 5501da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 5511da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 5521da177e4SLinus Torvalds err = lookup_node(mm, addr); 5531da177e4SLinus Torvalds if (err < 0) 5541da177e4SLinus Torvalds goto out; 5558bccd85fSChristoph Lameter *policy = err; 5561da177e4SLinus Torvalds } else if (pol == current->mempolicy && 5571da177e4SLinus Torvalds pol->policy == MPOL_INTERLEAVE) { 5588bccd85fSChristoph Lameter *policy = current->il_next; 5591da177e4SLinus Torvalds } else { 5601da177e4SLinus Torvalds err = -EINVAL; 5611da177e4SLinus Torvalds goto out; 5621da177e4SLinus Torvalds } 5631da177e4SLinus Torvalds } else 5648bccd85fSChristoph Lameter *policy = pol->policy; 5651da177e4SLinus Torvalds 5661da177e4SLinus Torvalds if (vma) { 5671da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 5681da177e4SLinus Torvalds vma = NULL; 5691da177e4SLinus Torvalds } 5701da177e4SLinus Torvalds 5711da177e4SLinus Torvalds err = 0; 5728bccd85fSChristoph Lameter if (nmask) 5738bccd85fSChristoph Lameter get_zonemask(pol, nmask); 5741da177e4SLinus Torvalds 5751da177e4SLinus Torvalds out: 5761da177e4SLinus Torvalds if (vma) 5771da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 5781da177e4SLinus Torvalds return err; 5791da177e4SLinus Torvalds } 5801da177e4SLinus Torvalds 581b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 5828bccd85fSChristoph Lameter /* 5836ce3c4c0SChristoph Lameter * page migration 5846ce3c4c0SChristoph Lameter */ 585fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 586fc301289SChristoph Lameter unsigned long flags) 5876ce3c4c0SChristoph Lameter { 5886ce3c4c0SChristoph Lameter /* 589fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 5906ce3c4c0SChristoph Lameter */ 591b20a3503SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) 592b20a3503SChristoph Lameter isolate_lru_page(page, pagelist); 5936ce3c4c0SChristoph Lameter } 5946ce3c4c0SChristoph Lameter 595742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 59695a402c3SChristoph Lameter { 597769848c0SMel Gorman return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0); 59895a402c3SChristoph Lameter } 59995a402c3SChristoph Lameter 6006ce3c4c0SChristoph Lameter /* 6017e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 6027e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 6037e2ab150SChristoph Lameter */ 6047e2ab150SChristoph Lameter int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) 6057e2ab150SChristoph Lameter { 6067e2ab150SChristoph Lameter nodemask_t nmask; 6077e2ab150SChristoph Lameter LIST_HEAD(pagelist); 6087e2ab150SChristoph Lameter int err = 0; 6097e2ab150SChristoph Lameter 6107e2ab150SChristoph Lameter nodes_clear(nmask); 6117e2ab150SChristoph Lameter node_set(source, nmask); 6127e2ab150SChristoph Lameter 6137e2ab150SChristoph Lameter check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask, 6147e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 6157e2ab150SChristoph Lameter 6167e2ab150SChristoph Lameter if (!list_empty(&pagelist)) 61795a402c3SChristoph Lameter err = migrate_pages(&pagelist, new_node_page, dest); 61895a402c3SChristoph Lameter 6197e2ab150SChristoph Lameter return err; 6207e2ab150SChristoph Lameter } 6217e2ab150SChristoph Lameter 6227e2ab150SChristoph Lameter /* 6237e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 6247e2ab150SChristoph Lameter * layout as much as possible. 62539743889SChristoph Lameter * 62639743889SChristoph Lameter * Returns the number of page that could not be moved. 62739743889SChristoph Lameter */ 62839743889SChristoph Lameter int do_migrate_pages(struct mm_struct *mm, 62939743889SChristoph Lameter const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 63039743889SChristoph Lameter { 63139743889SChristoph Lameter LIST_HEAD(pagelist); 6327e2ab150SChristoph Lameter int busy = 0; 6337e2ab150SChristoph Lameter int err = 0; 6347e2ab150SChristoph Lameter nodemask_t tmp; 63539743889SChristoph Lameter 63639743889SChristoph Lameter down_read(&mm->mmap_sem); 637d4984711SChristoph Lameter 6387b2259b3SChristoph Lameter err = migrate_vmas(mm, from_nodes, to_nodes, flags); 6397b2259b3SChristoph Lameter if (err) 6407b2259b3SChristoph Lameter goto out; 6417b2259b3SChristoph Lameter 6427e2ab150SChristoph Lameter /* 6437e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 6447e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 6457e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 6467e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 6477e2ab150SChristoph Lameter * 6487e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 6497e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 6507e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 6517e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 6527e2ab150SChristoph Lameter * 6537e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 6547e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 6557e2ab150SChristoph Lameter * (nothing left to migrate). 6567e2ab150SChristoph Lameter * 6577e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 6587e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 6597e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 6607e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 6617e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 6627e2ab150SChristoph Lameter * 6637e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 6647e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 6657e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 6667e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 6677e2ab150SChristoph Lameter * Otherwise when we finish scannng from_tmp, we at least have the 6687e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 6697e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 6707e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 6717e2ab150SChristoph Lameter */ 6727e2ab150SChristoph Lameter 6737e2ab150SChristoph Lameter tmp = *from_nodes; 6747e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 6757e2ab150SChristoph Lameter int s,d; 6767e2ab150SChristoph Lameter int source = -1; 6777e2ab150SChristoph Lameter int dest = 0; 6787e2ab150SChristoph Lameter 6797e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 6807e2ab150SChristoph Lameter d = node_remap(s, *from_nodes, *to_nodes); 6817e2ab150SChristoph Lameter if (s == d) 6827e2ab150SChristoph Lameter continue; 6837e2ab150SChristoph Lameter 6847e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 6857e2ab150SChristoph Lameter dest = d; 6867e2ab150SChristoph Lameter 6877e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 6887e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 6897e2ab150SChristoph Lameter break; 6907e2ab150SChristoph Lameter } 6917e2ab150SChristoph Lameter if (source == -1) 6927e2ab150SChristoph Lameter break; 6937e2ab150SChristoph Lameter 6947e2ab150SChristoph Lameter node_clear(source, tmp); 6957e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 6967e2ab150SChristoph Lameter if (err > 0) 6977e2ab150SChristoph Lameter busy += err; 6987e2ab150SChristoph Lameter if (err < 0) 6997e2ab150SChristoph Lameter break; 70039743889SChristoph Lameter } 7017b2259b3SChristoph Lameter out: 70239743889SChristoph Lameter up_read(&mm->mmap_sem); 7037e2ab150SChristoph Lameter if (err < 0) 7047e2ab150SChristoph Lameter return err; 7057e2ab150SChristoph Lameter return busy; 706b20a3503SChristoph Lameter 70739743889SChristoph Lameter } 70839743889SChristoph Lameter 709742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 71095a402c3SChristoph Lameter { 71195a402c3SChristoph Lameter struct vm_area_struct *vma = (struct vm_area_struct *)private; 71295a402c3SChristoph Lameter 713769848c0SMel Gorman return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, 714769848c0SMel Gorman page_address_in_vma(page, vma)); 71595a402c3SChristoph Lameter } 716b20a3503SChristoph Lameter #else 717b20a3503SChristoph Lameter 718b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 719b20a3503SChristoph Lameter unsigned long flags) 720b20a3503SChristoph Lameter { 721b20a3503SChristoph Lameter } 722b20a3503SChristoph Lameter 723b20a3503SChristoph Lameter int do_migrate_pages(struct mm_struct *mm, 724b20a3503SChristoph Lameter const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 725b20a3503SChristoph Lameter { 726b20a3503SChristoph Lameter return -ENOSYS; 727b20a3503SChristoph Lameter } 72895a402c3SChristoph Lameter 72969939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 73095a402c3SChristoph Lameter { 73195a402c3SChristoph Lameter return NULL; 73295a402c3SChristoph Lameter } 733b20a3503SChristoph Lameter #endif 734b20a3503SChristoph Lameter 7356ce3c4c0SChristoph Lameter long do_mbind(unsigned long start, unsigned long len, 7366ce3c4c0SChristoph Lameter unsigned long mode, nodemask_t *nmask, unsigned long flags) 7376ce3c4c0SChristoph Lameter { 7386ce3c4c0SChristoph Lameter struct vm_area_struct *vma; 7396ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 7406ce3c4c0SChristoph Lameter struct mempolicy *new; 7416ce3c4c0SChristoph Lameter unsigned long end; 7426ce3c4c0SChristoph Lameter int err; 7436ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 7446ce3c4c0SChristoph Lameter 7456ce3c4c0SChristoph Lameter if ((flags & ~(unsigned long)(MPOL_MF_STRICT | 7466ce3c4c0SChristoph Lameter MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 7476ce3c4c0SChristoph Lameter || mode > MPOL_MAX) 7486ce3c4c0SChristoph Lameter return -EINVAL; 74974c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 7506ce3c4c0SChristoph Lameter return -EPERM; 7516ce3c4c0SChristoph Lameter 7526ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 7536ce3c4c0SChristoph Lameter return -EINVAL; 7546ce3c4c0SChristoph Lameter 7556ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 7566ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 7576ce3c4c0SChristoph Lameter 7586ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 7596ce3c4c0SChristoph Lameter end = start + len; 7606ce3c4c0SChristoph Lameter 7616ce3c4c0SChristoph Lameter if (end < start) 7626ce3c4c0SChristoph Lameter return -EINVAL; 7636ce3c4c0SChristoph Lameter if (end == start) 7646ce3c4c0SChristoph Lameter return 0; 7656ce3c4c0SChristoph Lameter 7666ce3c4c0SChristoph Lameter if (mpol_check_policy(mode, nmask)) 7676ce3c4c0SChristoph Lameter return -EINVAL; 7686ce3c4c0SChristoph Lameter 7696ce3c4c0SChristoph Lameter new = mpol_new(mode, nmask); 7706ce3c4c0SChristoph Lameter if (IS_ERR(new)) 7716ce3c4c0SChristoph Lameter return PTR_ERR(new); 7726ce3c4c0SChristoph Lameter 7736ce3c4c0SChristoph Lameter /* 7746ce3c4c0SChristoph Lameter * If we are using the default policy then operation 7756ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 7766ce3c4c0SChristoph Lameter */ 7776ce3c4c0SChristoph Lameter if (!new) 7786ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 7796ce3c4c0SChristoph Lameter 780140d5a49SPaul Mundt pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len, 781140d5a49SPaul Mundt mode, nmask ? nodes_addr(*nmask)[0] : -1); 7826ce3c4c0SChristoph Lameter 7836ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 7846ce3c4c0SChristoph Lameter vma = check_range(mm, start, end, nmask, 7856ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 7866ce3c4c0SChristoph Lameter 7876ce3c4c0SChristoph Lameter err = PTR_ERR(vma); 7886ce3c4c0SChristoph Lameter if (!IS_ERR(vma)) { 7896ce3c4c0SChristoph Lameter int nr_failed = 0; 7906ce3c4c0SChristoph Lameter 7916ce3c4c0SChristoph Lameter err = mbind_range(vma, start, end, new); 7927e2ab150SChristoph Lameter 7936ce3c4c0SChristoph Lameter if (!list_empty(&pagelist)) 79495a402c3SChristoph Lameter nr_failed = migrate_pages(&pagelist, new_vma_page, 79595a402c3SChristoph Lameter (unsigned long)vma); 7966ce3c4c0SChristoph Lameter 7976ce3c4c0SChristoph Lameter if (!err && nr_failed && (flags & MPOL_MF_STRICT)) 7986ce3c4c0SChristoph Lameter err = -EIO; 7996ce3c4c0SChristoph Lameter } 800b20a3503SChristoph Lameter 8016ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 8026ce3c4c0SChristoph Lameter mpol_free(new); 8036ce3c4c0SChristoph Lameter return err; 8046ce3c4c0SChristoph Lameter } 8056ce3c4c0SChristoph Lameter 80639743889SChristoph Lameter /* 8078bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 8088bccd85fSChristoph Lameter */ 8098bccd85fSChristoph Lameter 8108bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 81139743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 8128bccd85fSChristoph Lameter unsigned long maxnode) 8138bccd85fSChristoph Lameter { 8148bccd85fSChristoph Lameter unsigned long k; 8158bccd85fSChristoph Lameter unsigned long nlongs; 8168bccd85fSChristoph Lameter unsigned long endmask; 8178bccd85fSChristoph Lameter 8188bccd85fSChristoph Lameter --maxnode; 8198bccd85fSChristoph Lameter nodes_clear(*nodes); 8208bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 8218bccd85fSChristoph Lameter return 0; 822a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 823636f13c1SChris Wright return -EINVAL; 8248bccd85fSChristoph Lameter 8258bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 8268bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 8278bccd85fSChristoph Lameter endmask = ~0UL; 8288bccd85fSChristoph Lameter else 8298bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 8308bccd85fSChristoph Lameter 8318bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 8328bccd85fSChristoph Lameter if the non supported part is all zero. */ 8338bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 8348bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 8358bccd85fSChristoph Lameter return -EINVAL; 8368bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 8378bccd85fSChristoph Lameter unsigned long t; 8388bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 8398bccd85fSChristoph Lameter return -EFAULT; 8408bccd85fSChristoph Lameter if (k == nlongs - 1) { 8418bccd85fSChristoph Lameter if (t & endmask) 8428bccd85fSChristoph Lameter return -EINVAL; 8438bccd85fSChristoph Lameter } else if (t) 8448bccd85fSChristoph Lameter return -EINVAL; 8458bccd85fSChristoph Lameter } 8468bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 8478bccd85fSChristoph Lameter endmask = ~0UL; 8488bccd85fSChristoph Lameter } 8498bccd85fSChristoph Lameter 8508bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 8518bccd85fSChristoph Lameter return -EFAULT; 8528bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 8538bccd85fSChristoph Lameter return 0; 8548bccd85fSChristoph Lameter } 8558bccd85fSChristoph Lameter 8568bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 8578bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 8588bccd85fSChristoph Lameter nodemask_t *nodes) 8598bccd85fSChristoph Lameter { 8608bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 8618bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 8628bccd85fSChristoph Lameter 8638bccd85fSChristoph Lameter if (copy > nbytes) { 8648bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 8658bccd85fSChristoph Lameter return -EINVAL; 8668bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 8678bccd85fSChristoph Lameter return -EFAULT; 8688bccd85fSChristoph Lameter copy = nbytes; 8698bccd85fSChristoph Lameter } 8708bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 8718bccd85fSChristoph Lameter } 8728bccd85fSChristoph Lameter 8738bccd85fSChristoph Lameter asmlinkage long sys_mbind(unsigned long start, unsigned long len, 8748bccd85fSChristoph Lameter unsigned long mode, 8758bccd85fSChristoph Lameter unsigned long __user *nmask, unsigned long maxnode, 8768bccd85fSChristoph Lameter unsigned flags) 8778bccd85fSChristoph Lameter { 8788bccd85fSChristoph Lameter nodemask_t nodes; 8798bccd85fSChristoph Lameter int err; 8808bccd85fSChristoph Lameter 8818bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 8828bccd85fSChristoph Lameter if (err) 8838bccd85fSChristoph Lameter return err; 88430150f8dSChristoph Lameter #ifdef CONFIG_CPUSETS 88530150f8dSChristoph Lameter /* Restrict the nodes to the allowed nodes in the cpuset */ 88630150f8dSChristoph Lameter nodes_and(nodes, nodes, current->mems_allowed); 88730150f8dSChristoph Lameter #endif 8888bccd85fSChristoph Lameter return do_mbind(start, len, mode, &nodes, flags); 8898bccd85fSChristoph Lameter } 8908bccd85fSChristoph Lameter 8918bccd85fSChristoph Lameter /* Set the process memory policy */ 8928bccd85fSChristoph Lameter asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, 8938bccd85fSChristoph Lameter unsigned long maxnode) 8948bccd85fSChristoph Lameter { 8958bccd85fSChristoph Lameter int err; 8968bccd85fSChristoph Lameter nodemask_t nodes; 8978bccd85fSChristoph Lameter 8988bccd85fSChristoph Lameter if (mode < 0 || mode > MPOL_MAX) 8998bccd85fSChristoph Lameter return -EINVAL; 9008bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 9018bccd85fSChristoph Lameter if (err) 9028bccd85fSChristoph Lameter return err; 9038bccd85fSChristoph Lameter return do_set_mempolicy(mode, &nodes); 9048bccd85fSChristoph Lameter } 9058bccd85fSChristoph Lameter 90639743889SChristoph Lameter asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, 90739743889SChristoph Lameter const unsigned long __user *old_nodes, 90839743889SChristoph Lameter const unsigned long __user *new_nodes) 90939743889SChristoph Lameter { 91039743889SChristoph Lameter struct mm_struct *mm; 91139743889SChristoph Lameter struct task_struct *task; 91239743889SChristoph Lameter nodemask_t old; 91339743889SChristoph Lameter nodemask_t new; 91439743889SChristoph Lameter nodemask_t task_nodes; 91539743889SChristoph Lameter int err; 91639743889SChristoph Lameter 91739743889SChristoph Lameter err = get_nodes(&old, old_nodes, maxnode); 91839743889SChristoph Lameter if (err) 91939743889SChristoph Lameter return err; 92039743889SChristoph Lameter 92139743889SChristoph Lameter err = get_nodes(&new, new_nodes, maxnode); 92239743889SChristoph Lameter if (err) 92339743889SChristoph Lameter return err; 92439743889SChristoph Lameter 92539743889SChristoph Lameter /* Find the mm_struct */ 92639743889SChristoph Lameter read_lock(&tasklist_lock); 92739743889SChristoph Lameter task = pid ? find_task_by_pid(pid) : current; 92839743889SChristoph Lameter if (!task) { 92939743889SChristoph Lameter read_unlock(&tasklist_lock); 93039743889SChristoph Lameter return -ESRCH; 93139743889SChristoph Lameter } 93239743889SChristoph Lameter mm = get_task_mm(task); 93339743889SChristoph Lameter read_unlock(&tasklist_lock); 93439743889SChristoph Lameter 93539743889SChristoph Lameter if (!mm) 93639743889SChristoph Lameter return -EINVAL; 93739743889SChristoph Lameter 93839743889SChristoph Lameter /* 93939743889SChristoph Lameter * Check if this process has the right to modify the specified 94039743889SChristoph Lameter * process. The right exists if the process has administrative 9417f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 94239743889SChristoph Lameter * userid as the target process. 94339743889SChristoph Lameter */ 94439743889SChristoph Lameter if ((current->euid != task->suid) && (current->euid != task->uid) && 94539743889SChristoph Lameter (current->uid != task->suid) && (current->uid != task->uid) && 94674c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 94739743889SChristoph Lameter err = -EPERM; 94839743889SChristoph Lameter goto out; 94939743889SChristoph Lameter } 95039743889SChristoph Lameter 95139743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 95239743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 95374c00241SChristoph Lameter if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) { 95439743889SChristoph Lameter err = -EPERM; 95539743889SChristoph Lameter goto out; 95639743889SChristoph Lameter } 95739743889SChristoph Lameter 95886c3a764SDavid Quigley err = security_task_movememory(task); 95986c3a764SDavid Quigley if (err) 96086c3a764SDavid Quigley goto out; 96186c3a764SDavid Quigley 962511030bcSChristoph Lameter err = do_migrate_pages(mm, &old, &new, 96374c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 96439743889SChristoph Lameter out: 96539743889SChristoph Lameter mmput(mm); 96639743889SChristoph Lameter return err; 96739743889SChristoph Lameter } 96839743889SChristoph Lameter 96939743889SChristoph Lameter 9708bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 9718bccd85fSChristoph Lameter asmlinkage long sys_get_mempolicy(int __user *policy, 9728bccd85fSChristoph Lameter unsigned long __user *nmask, 9738bccd85fSChristoph Lameter unsigned long maxnode, 9748bccd85fSChristoph Lameter unsigned long addr, unsigned long flags) 9758bccd85fSChristoph Lameter { 9768bccd85fSChristoph Lameter int err, pval; 9778bccd85fSChristoph Lameter nodemask_t nodes; 9788bccd85fSChristoph Lameter 9798bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 9808bccd85fSChristoph Lameter return -EINVAL; 9818bccd85fSChristoph Lameter 9828bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 9838bccd85fSChristoph Lameter 9848bccd85fSChristoph Lameter if (err) 9858bccd85fSChristoph Lameter return err; 9868bccd85fSChristoph Lameter 9878bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 9888bccd85fSChristoph Lameter return -EFAULT; 9898bccd85fSChristoph Lameter 9908bccd85fSChristoph Lameter if (nmask) 9918bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 9928bccd85fSChristoph Lameter 9938bccd85fSChristoph Lameter return err; 9948bccd85fSChristoph Lameter } 9958bccd85fSChristoph Lameter 9961da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 9971da177e4SLinus Torvalds 9981da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy, 9991da177e4SLinus Torvalds compat_ulong_t __user *nmask, 10001da177e4SLinus Torvalds compat_ulong_t maxnode, 10011da177e4SLinus Torvalds compat_ulong_t addr, compat_ulong_t flags) 10021da177e4SLinus Torvalds { 10031da177e4SLinus Torvalds long err; 10041da177e4SLinus Torvalds unsigned long __user *nm = NULL; 10051da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 10061da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 10071da177e4SLinus Torvalds 10081da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 10091da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 10101da177e4SLinus Torvalds 10111da177e4SLinus Torvalds if (nmask) 10121da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 10131da177e4SLinus Torvalds 10141da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 10151da177e4SLinus Torvalds 10161da177e4SLinus Torvalds if (!err && nmask) { 10171da177e4SLinus Torvalds err = copy_from_user(bm, nm, alloc_size); 10181da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 10191da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 10201da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 10211da177e4SLinus Torvalds } 10221da177e4SLinus Torvalds 10231da177e4SLinus Torvalds return err; 10241da177e4SLinus Torvalds } 10251da177e4SLinus Torvalds 10261da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, 10271da177e4SLinus Torvalds compat_ulong_t maxnode) 10281da177e4SLinus Torvalds { 10291da177e4SLinus Torvalds long err = 0; 10301da177e4SLinus Torvalds unsigned long __user *nm = NULL; 10311da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 10321da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 10331da177e4SLinus Torvalds 10341da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 10351da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 10361da177e4SLinus Torvalds 10371da177e4SLinus Torvalds if (nmask) { 10381da177e4SLinus Torvalds err = compat_get_bitmap(bm, nmask, nr_bits); 10391da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 10401da177e4SLinus Torvalds err |= copy_to_user(nm, bm, alloc_size); 10411da177e4SLinus Torvalds } 10421da177e4SLinus Torvalds 10431da177e4SLinus Torvalds if (err) 10441da177e4SLinus Torvalds return -EFAULT; 10451da177e4SLinus Torvalds 10461da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 10471da177e4SLinus Torvalds } 10481da177e4SLinus Torvalds 10491da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, 10501da177e4SLinus Torvalds compat_ulong_t mode, compat_ulong_t __user *nmask, 10511da177e4SLinus Torvalds compat_ulong_t maxnode, compat_ulong_t flags) 10521da177e4SLinus Torvalds { 10531da177e4SLinus Torvalds long err = 0; 10541da177e4SLinus Torvalds unsigned long __user *nm = NULL; 10551da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1056dfcd3c0dSAndi Kleen nodemask_t bm; 10571da177e4SLinus Torvalds 10581da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 10591da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 10601da177e4SLinus Torvalds 10611da177e4SLinus Torvalds if (nmask) { 1062dfcd3c0dSAndi Kleen err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 10631da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 1064dfcd3c0dSAndi Kleen err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 10651da177e4SLinus Torvalds } 10661da177e4SLinus Torvalds 10671da177e4SLinus Torvalds if (err) 10681da177e4SLinus Torvalds return -EFAULT; 10691da177e4SLinus Torvalds 10701da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 10711da177e4SLinus Torvalds } 10721da177e4SLinus Torvalds 10731da177e4SLinus Torvalds #endif 10741da177e4SLinus Torvalds 10751da177e4SLinus Torvalds /* Return effective policy for a VMA */ 107648fce342SChristoph Lameter static struct mempolicy * get_vma_policy(struct task_struct *task, 107748fce342SChristoph Lameter struct vm_area_struct *vma, unsigned long addr) 10781da177e4SLinus Torvalds { 10796e21c8f1SChristoph Lameter struct mempolicy *pol = task->mempolicy; 10801da177e4SLinus Torvalds 10811da177e4SLinus Torvalds if (vma) { 10821da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 10831da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 10841da177e4SLinus Torvalds else if (vma->vm_policy && 10851da177e4SLinus Torvalds vma->vm_policy->policy != MPOL_DEFAULT) 10861da177e4SLinus Torvalds pol = vma->vm_policy; 10871da177e4SLinus Torvalds } 10881da177e4SLinus Torvalds if (!pol) 10891da177e4SLinus Torvalds pol = &default_policy; 10901da177e4SLinus Torvalds return pol; 10911da177e4SLinus Torvalds } 10921da177e4SLinus Torvalds 10931da177e4SLinus Torvalds /* Return a zonelist representing a mempolicy */ 1094dd0fc66fSAl Viro static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) 10951da177e4SLinus Torvalds { 10961da177e4SLinus Torvalds int nd; 10971da177e4SLinus Torvalds 10981da177e4SLinus Torvalds switch (policy->policy) { 10991da177e4SLinus Torvalds case MPOL_PREFERRED: 11001da177e4SLinus Torvalds nd = policy->v.preferred_node; 11011da177e4SLinus Torvalds if (nd < 0) 11021da177e4SLinus Torvalds nd = numa_node_id(); 11031da177e4SLinus Torvalds break; 11041da177e4SLinus Torvalds case MPOL_BIND: 11051da177e4SLinus Torvalds /* Lower zones don't get a policy applied */ 11061da177e4SLinus Torvalds /* Careful: current->mems_allowed might have moved */ 110719655d34SChristoph Lameter if (gfp_zone(gfp) >= policy_zone) 11081da177e4SLinus Torvalds if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist)) 11091da177e4SLinus Torvalds return policy->v.zonelist; 11101da177e4SLinus Torvalds /*FALL THROUGH*/ 11111da177e4SLinus Torvalds case MPOL_INTERLEAVE: /* should not happen */ 11121da177e4SLinus Torvalds case MPOL_DEFAULT: 11131da177e4SLinus Torvalds nd = numa_node_id(); 11141da177e4SLinus Torvalds break; 11151da177e4SLinus Torvalds default: 11161da177e4SLinus Torvalds nd = 0; 11171da177e4SLinus Torvalds BUG(); 11181da177e4SLinus Torvalds } 1119af4ca457SAl Viro return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp); 11201da177e4SLinus Torvalds } 11211da177e4SLinus Torvalds 11221da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 11231da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 11241da177e4SLinus Torvalds { 11251da177e4SLinus Torvalds unsigned nid, next; 11261da177e4SLinus Torvalds struct task_struct *me = current; 11271da177e4SLinus Torvalds 11281da177e4SLinus Torvalds nid = me->il_next; 1129dfcd3c0dSAndi Kleen next = next_node(nid, policy->v.nodes); 11301da177e4SLinus Torvalds if (next >= MAX_NUMNODES) 1131dfcd3c0dSAndi Kleen next = first_node(policy->v.nodes); 11321da177e4SLinus Torvalds me->il_next = next; 11331da177e4SLinus Torvalds return nid; 11341da177e4SLinus Torvalds } 11351da177e4SLinus Torvalds 1136dc85da15SChristoph Lameter /* 1137dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1138dc85da15SChristoph Lameter * next slab entry. 1139dc85da15SChristoph Lameter */ 1140dc85da15SChristoph Lameter unsigned slab_node(struct mempolicy *policy) 1141dc85da15SChristoph Lameter { 1142765c4507SChristoph Lameter int pol = policy ? policy->policy : MPOL_DEFAULT; 1143765c4507SChristoph Lameter 1144765c4507SChristoph Lameter switch (pol) { 1145dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1146dc85da15SChristoph Lameter return interleave_nodes(policy); 1147dc85da15SChristoph Lameter 1148dc85da15SChristoph Lameter case MPOL_BIND: 1149dc85da15SChristoph Lameter /* 1150dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1151dc85da15SChristoph Lameter * first node. 1152dc85da15SChristoph Lameter */ 115389fa3024SChristoph Lameter return zone_to_nid(policy->v.zonelist->zones[0]); 1154dc85da15SChristoph Lameter 1155dc85da15SChristoph Lameter case MPOL_PREFERRED: 1156dc85da15SChristoph Lameter if (policy->v.preferred_node >= 0) 1157dc85da15SChristoph Lameter return policy->v.preferred_node; 1158dc85da15SChristoph Lameter /* Fall through */ 1159dc85da15SChristoph Lameter 1160dc85da15SChristoph Lameter default: 1161dc85da15SChristoph Lameter return numa_node_id(); 1162dc85da15SChristoph Lameter } 1163dc85da15SChristoph Lameter } 1164dc85da15SChristoph Lameter 11651da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */ 11661da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 11671da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long off) 11681da177e4SLinus Torvalds { 1169dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 11701da177e4SLinus Torvalds unsigned target = (unsigned)off % nnodes; 11711da177e4SLinus Torvalds int c; 11721da177e4SLinus Torvalds int nid = -1; 11731da177e4SLinus Torvalds 11741da177e4SLinus Torvalds c = 0; 11751da177e4SLinus Torvalds do { 1176dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 11771da177e4SLinus Torvalds c++; 11781da177e4SLinus Torvalds } while (c <= target); 11791da177e4SLinus Torvalds return nid; 11801da177e4SLinus Torvalds } 11811da177e4SLinus Torvalds 11825da7ca86SChristoph Lameter /* Determine a node number for interleave */ 11835da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 11845da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 11855da7ca86SChristoph Lameter { 11865da7ca86SChristoph Lameter if (vma) { 11875da7ca86SChristoph Lameter unsigned long off; 11885da7ca86SChristoph Lameter 11893b98b087SNishanth Aravamudan /* 11903b98b087SNishanth Aravamudan * for small pages, there is no difference between 11913b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 11923b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 11933b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 11943b98b087SNishanth Aravamudan * a useful offset. 11953b98b087SNishanth Aravamudan */ 11963b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 11973b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 11985da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 11995da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 12005da7ca86SChristoph Lameter } else 12015da7ca86SChristoph Lameter return interleave_nodes(pol); 12025da7ca86SChristoph Lameter } 12035da7ca86SChristoph Lameter 120400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 12055da7ca86SChristoph Lameter /* Return a zonelist suitable for a huge page allocation. */ 1206396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 1207396faf03SMel Gorman gfp_t gfp_flags) 12085da7ca86SChristoph Lameter { 12095da7ca86SChristoph Lameter struct mempolicy *pol = get_vma_policy(current, vma, addr); 12105da7ca86SChristoph Lameter 12115da7ca86SChristoph Lameter if (pol->policy == MPOL_INTERLEAVE) { 12125da7ca86SChristoph Lameter unsigned nid; 12135da7ca86SChristoph Lameter 12145da7ca86SChristoph Lameter nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); 1215396faf03SMel Gorman return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags); 12165da7ca86SChristoph Lameter } 12175da7ca86SChristoph Lameter return zonelist_policy(GFP_HIGHUSER, pol); 12185da7ca86SChristoph Lameter } 121900ac59adSChen, Kenneth W #endif 12205da7ca86SChristoph Lameter 12211da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 12221da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1223662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1224662f3a0bSAndi Kleen unsigned nid) 12251da177e4SLinus Torvalds { 12261da177e4SLinus Torvalds struct zonelist *zl; 12271da177e4SLinus Torvalds struct page *page; 12281da177e4SLinus Torvalds 1229af4ca457SAl Viro zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp); 12301da177e4SLinus Torvalds page = __alloc_pages(gfp, order, zl); 1231ca889e6cSChristoph Lameter if (page && page_zone(page) == zl->zones[0]) 1232ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 12331da177e4SLinus Torvalds return page; 12341da177e4SLinus Torvalds } 12351da177e4SLinus Torvalds 12361da177e4SLinus Torvalds /** 12371da177e4SLinus Torvalds * alloc_page_vma - Allocate a page for a VMA. 12381da177e4SLinus Torvalds * 12391da177e4SLinus Torvalds * @gfp: 12401da177e4SLinus Torvalds * %GFP_USER user allocation. 12411da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 12421da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 12431da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 12441da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 12451da177e4SLinus Torvalds * 12461da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 12471da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 12481da177e4SLinus Torvalds * 12491da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 12501da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 12511da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 12521da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 12531da177e4SLinus Torvalds * all allocations for pages that will be mapped into 12541da177e4SLinus Torvalds * user space. Returns NULL when no page can be allocated. 12551da177e4SLinus Torvalds * 12561da177e4SLinus Torvalds * Should be called with the mm_sem of the vma hold. 12571da177e4SLinus Torvalds */ 12581da177e4SLinus Torvalds struct page * 1259dd0fc66fSAl Viro alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) 12601da177e4SLinus Torvalds { 12616e21c8f1SChristoph Lameter struct mempolicy *pol = get_vma_policy(current, vma, addr); 12621da177e4SLinus Torvalds 1263cf2a473cSPaul Jackson cpuset_update_task_memory_state(); 12641da177e4SLinus Torvalds 12651da177e4SLinus Torvalds if (unlikely(pol->policy == MPOL_INTERLEAVE)) { 12661da177e4SLinus Torvalds unsigned nid; 12675da7ca86SChristoph Lameter 12685da7ca86SChristoph Lameter nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 12691da177e4SLinus Torvalds return alloc_page_interleave(gfp, 0, nid); 12701da177e4SLinus Torvalds } 12711da177e4SLinus Torvalds return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol)); 12721da177e4SLinus Torvalds } 12731da177e4SLinus Torvalds 12741da177e4SLinus Torvalds /** 12751da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 12761da177e4SLinus Torvalds * 12771da177e4SLinus Torvalds * @gfp: 12781da177e4SLinus Torvalds * %GFP_USER user allocation, 12791da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 12801da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 12811da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 12821da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 12831da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 12841da177e4SLinus Torvalds * 12851da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 12861da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 12871da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 12881da177e4SLinus Torvalds * 1289cf2a473cSPaul Jackson * Don't call cpuset_update_task_memory_state() unless 12901da177e4SLinus Torvalds * 1) it's ok to take cpuset_sem (can WAIT), and 12911da177e4SLinus Torvalds * 2) allocating for current task (not interrupt). 12921da177e4SLinus Torvalds */ 1293dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 12941da177e4SLinus Torvalds { 12951da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 12961da177e4SLinus Torvalds 12971da177e4SLinus Torvalds if ((gfp & __GFP_WAIT) && !in_interrupt()) 1298cf2a473cSPaul Jackson cpuset_update_task_memory_state(); 12999b819d20SChristoph Lameter if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) 13001da177e4SLinus Torvalds pol = &default_policy; 13011da177e4SLinus Torvalds if (pol->policy == MPOL_INTERLEAVE) 13021da177e4SLinus Torvalds return alloc_page_interleave(gfp, order, interleave_nodes(pol)); 13031da177e4SLinus Torvalds return __alloc_pages(gfp, order, zonelist_policy(gfp, pol)); 13041da177e4SLinus Torvalds } 13051da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 13061da177e4SLinus Torvalds 13074225399aSPaul Jackson /* 13084225399aSPaul Jackson * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it 13094225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 13104225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 13114225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 13124225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 13134225399aSPaul Jackson */ 13144225399aSPaul Jackson void *cpuset_being_rebound; 13154225399aSPaul Jackson 13161da177e4SLinus Torvalds /* Slow path of a mempolicy copy */ 13171da177e4SLinus Torvalds struct mempolicy *__mpol_copy(struct mempolicy *old) 13181da177e4SLinus Torvalds { 13191da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 13201da177e4SLinus Torvalds 13211da177e4SLinus Torvalds if (!new) 13221da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 13234225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 13244225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 13254225399aSPaul Jackson mpol_rebind_policy(old, &mems); 13264225399aSPaul Jackson } 13271da177e4SLinus Torvalds *new = *old; 13281da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 13291da177e4SLinus Torvalds if (new->policy == MPOL_BIND) { 13301da177e4SLinus Torvalds int sz = ksize(old->v.zonelist); 1331e94b1766SChristoph Lameter new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL); 13321da177e4SLinus Torvalds if (!new->v.zonelist) { 13331da177e4SLinus Torvalds kmem_cache_free(policy_cache, new); 13341da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 13351da177e4SLinus Torvalds } 13361da177e4SLinus Torvalds } 13371da177e4SLinus Torvalds return new; 13381da177e4SLinus Torvalds } 13391da177e4SLinus Torvalds 13401da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 13411da177e4SLinus Torvalds int __mpol_equal(struct mempolicy *a, struct mempolicy *b) 13421da177e4SLinus Torvalds { 13431da177e4SLinus Torvalds if (!a || !b) 13441da177e4SLinus Torvalds return 0; 13451da177e4SLinus Torvalds if (a->policy != b->policy) 13461da177e4SLinus Torvalds return 0; 13471da177e4SLinus Torvalds switch (a->policy) { 13481da177e4SLinus Torvalds case MPOL_DEFAULT: 13491da177e4SLinus Torvalds return 1; 13501da177e4SLinus Torvalds case MPOL_INTERLEAVE: 1351dfcd3c0dSAndi Kleen return nodes_equal(a->v.nodes, b->v.nodes); 13521da177e4SLinus Torvalds case MPOL_PREFERRED: 13531da177e4SLinus Torvalds return a->v.preferred_node == b->v.preferred_node; 13541da177e4SLinus Torvalds case MPOL_BIND: { 13551da177e4SLinus Torvalds int i; 13561da177e4SLinus Torvalds for (i = 0; a->v.zonelist->zones[i]; i++) 13571da177e4SLinus Torvalds if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i]) 13581da177e4SLinus Torvalds return 0; 13591da177e4SLinus Torvalds return b->v.zonelist->zones[i] == NULL; 13601da177e4SLinus Torvalds } 13611da177e4SLinus Torvalds default: 13621da177e4SLinus Torvalds BUG(); 13631da177e4SLinus Torvalds return 0; 13641da177e4SLinus Torvalds } 13651da177e4SLinus Torvalds } 13661da177e4SLinus Torvalds 13671da177e4SLinus Torvalds /* Slow path of a mpol destructor. */ 13681da177e4SLinus Torvalds void __mpol_free(struct mempolicy *p) 13691da177e4SLinus Torvalds { 13701da177e4SLinus Torvalds if (!atomic_dec_and_test(&p->refcnt)) 13711da177e4SLinus Torvalds return; 13721da177e4SLinus Torvalds if (p->policy == MPOL_BIND) 13731da177e4SLinus Torvalds kfree(p->v.zonelist); 13741da177e4SLinus Torvalds p->policy = MPOL_DEFAULT; 13751da177e4SLinus Torvalds kmem_cache_free(policy_cache, p); 13761da177e4SLinus Torvalds } 13771da177e4SLinus Torvalds 13781da177e4SLinus Torvalds /* 13791da177e4SLinus Torvalds * Shared memory backing store policy support. 13801da177e4SLinus Torvalds * 13811da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 13821da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 13831da177e4SLinus Torvalds * They are protected by the sp->lock spinlock, which should be held 13841da177e4SLinus Torvalds * for any accesses to the tree. 13851da177e4SLinus Torvalds */ 13861da177e4SLinus Torvalds 13871da177e4SLinus Torvalds /* lookup first element intersecting start-end */ 13881da177e4SLinus Torvalds /* Caller holds sp->lock */ 13891da177e4SLinus Torvalds static struct sp_node * 13901da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 13911da177e4SLinus Torvalds { 13921da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 13931da177e4SLinus Torvalds 13941da177e4SLinus Torvalds while (n) { 13951da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 13961da177e4SLinus Torvalds 13971da177e4SLinus Torvalds if (start >= p->end) 13981da177e4SLinus Torvalds n = n->rb_right; 13991da177e4SLinus Torvalds else if (end <= p->start) 14001da177e4SLinus Torvalds n = n->rb_left; 14011da177e4SLinus Torvalds else 14021da177e4SLinus Torvalds break; 14031da177e4SLinus Torvalds } 14041da177e4SLinus Torvalds if (!n) 14051da177e4SLinus Torvalds return NULL; 14061da177e4SLinus Torvalds for (;;) { 14071da177e4SLinus Torvalds struct sp_node *w = NULL; 14081da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 14091da177e4SLinus Torvalds if (!prev) 14101da177e4SLinus Torvalds break; 14111da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 14121da177e4SLinus Torvalds if (w->end <= start) 14131da177e4SLinus Torvalds break; 14141da177e4SLinus Torvalds n = prev; 14151da177e4SLinus Torvalds } 14161da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 14171da177e4SLinus Torvalds } 14181da177e4SLinus Torvalds 14191da177e4SLinus Torvalds /* Insert a new shared policy into the list. */ 14201da177e4SLinus Torvalds /* Caller holds sp->lock */ 14211da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 14221da177e4SLinus Torvalds { 14231da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 14241da177e4SLinus Torvalds struct rb_node *parent = NULL; 14251da177e4SLinus Torvalds struct sp_node *nd; 14261da177e4SLinus Torvalds 14271da177e4SLinus Torvalds while (*p) { 14281da177e4SLinus Torvalds parent = *p; 14291da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 14301da177e4SLinus Torvalds if (new->start < nd->start) 14311da177e4SLinus Torvalds p = &(*p)->rb_left; 14321da177e4SLinus Torvalds else if (new->end > nd->end) 14331da177e4SLinus Torvalds p = &(*p)->rb_right; 14341da177e4SLinus Torvalds else 14351da177e4SLinus Torvalds BUG(); 14361da177e4SLinus Torvalds } 14371da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 14381da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 1439140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 14401da177e4SLinus Torvalds new->policy ? new->policy->policy : 0); 14411da177e4SLinus Torvalds } 14421da177e4SLinus Torvalds 14431da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 14441da177e4SLinus Torvalds struct mempolicy * 14451da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 14461da177e4SLinus Torvalds { 14471da177e4SLinus Torvalds struct mempolicy *pol = NULL; 14481da177e4SLinus Torvalds struct sp_node *sn; 14491da177e4SLinus Torvalds 14501da177e4SLinus Torvalds if (!sp->root.rb_node) 14511da177e4SLinus Torvalds return NULL; 14521da177e4SLinus Torvalds spin_lock(&sp->lock); 14531da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 14541da177e4SLinus Torvalds if (sn) { 14551da177e4SLinus Torvalds mpol_get(sn->policy); 14561da177e4SLinus Torvalds pol = sn->policy; 14571da177e4SLinus Torvalds } 14581da177e4SLinus Torvalds spin_unlock(&sp->lock); 14591da177e4SLinus Torvalds return pol; 14601da177e4SLinus Torvalds } 14611da177e4SLinus Torvalds 14621da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 14631da177e4SLinus Torvalds { 1464140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 14651da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 14661da177e4SLinus Torvalds mpol_free(n->policy); 14671da177e4SLinus Torvalds kmem_cache_free(sn_cache, n); 14681da177e4SLinus Torvalds } 14691da177e4SLinus Torvalds 14701da177e4SLinus Torvalds struct sp_node * 14711da177e4SLinus Torvalds sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) 14721da177e4SLinus Torvalds { 14731da177e4SLinus Torvalds struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 14741da177e4SLinus Torvalds 14751da177e4SLinus Torvalds if (!n) 14761da177e4SLinus Torvalds return NULL; 14771da177e4SLinus Torvalds n->start = start; 14781da177e4SLinus Torvalds n->end = end; 14791da177e4SLinus Torvalds mpol_get(pol); 14801da177e4SLinus Torvalds n->policy = pol; 14811da177e4SLinus Torvalds return n; 14821da177e4SLinus Torvalds } 14831da177e4SLinus Torvalds 14841da177e4SLinus Torvalds /* Replace a policy range. */ 14851da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 14861da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 14871da177e4SLinus Torvalds { 14881da177e4SLinus Torvalds struct sp_node *n, *new2 = NULL; 14891da177e4SLinus Torvalds 14901da177e4SLinus Torvalds restart: 14911da177e4SLinus Torvalds spin_lock(&sp->lock); 14921da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 14931da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 14941da177e4SLinus Torvalds while (n && n->start < end) { 14951da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 14961da177e4SLinus Torvalds if (n->start >= start) { 14971da177e4SLinus Torvalds if (n->end <= end) 14981da177e4SLinus Torvalds sp_delete(sp, n); 14991da177e4SLinus Torvalds else 15001da177e4SLinus Torvalds n->start = end; 15011da177e4SLinus Torvalds } else { 15021da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 15031da177e4SLinus Torvalds if (n->end > end) { 15041da177e4SLinus Torvalds if (!new2) { 15051da177e4SLinus Torvalds spin_unlock(&sp->lock); 15061da177e4SLinus Torvalds new2 = sp_alloc(end, n->end, n->policy); 15071da177e4SLinus Torvalds if (!new2) 15081da177e4SLinus Torvalds return -ENOMEM; 15091da177e4SLinus Torvalds goto restart; 15101da177e4SLinus Torvalds } 15111da177e4SLinus Torvalds n->end = start; 15121da177e4SLinus Torvalds sp_insert(sp, new2); 15131da177e4SLinus Torvalds new2 = NULL; 15141da177e4SLinus Torvalds break; 15151da177e4SLinus Torvalds } else 15161da177e4SLinus Torvalds n->end = start; 15171da177e4SLinus Torvalds } 15181da177e4SLinus Torvalds if (!next) 15191da177e4SLinus Torvalds break; 15201da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 15211da177e4SLinus Torvalds } 15221da177e4SLinus Torvalds if (new) 15231da177e4SLinus Torvalds sp_insert(sp, new); 15241da177e4SLinus Torvalds spin_unlock(&sp->lock); 15251da177e4SLinus Torvalds if (new2) { 15261da177e4SLinus Torvalds mpol_free(new2->policy); 15271da177e4SLinus Torvalds kmem_cache_free(sn_cache, new2); 15281da177e4SLinus Torvalds } 15291da177e4SLinus Torvalds return 0; 15301da177e4SLinus Torvalds } 15311da177e4SLinus Torvalds 15327339ff83SRobin Holt void mpol_shared_policy_init(struct shared_policy *info, int policy, 15337339ff83SRobin Holt nodemask_t *policy_nodes) 15347339ff83SRobin Holt { 15357339ff83SRobin Holt info->root = RB_ROOT; 15367339ff83SRobin Holt spin_lock_init(&info->lock); 15377339ff83SRobin Holt 15387339ff83SRobin Holt if (policy != MPOL_DEFAULT) { 15397339ff83SRobin Holt struct mempolicy *newpol; 15407339ff83SRobin Holt 15417339ff83SRobin Holt /* Falls back to MPOL_DEFAULT on any error */ 15427339ff83SRobin Holt newpol = mpol_new(policy, policy_nodes); 15437339ff83SRobin Holt if (!IS_ERR(newpol)) { 15447339ff83SRobin Holt /* Create pseudo-vma that contains just the policy */ 15457339ff83SRobin Holt struct vm_area_struct pvma; 15467339ff83SRobin Holt 15477339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 15487339ff83SRobin Holt /* Policy covers entire file */ 15497339ff83SRobin Holt pvma.vm_end = TASK_SIZE; 15507339ff83SRobin Holt mpol_set_shared_policy(info, &pvma, newpol); 15517339ff83SRobin Holt mpol_free(newpol); 15527339ff83SRobin Holt } 15537339ff83SRobin Holt } 15547339ff83SRobin Holt } 15557339ff83SRobin Holt 15561da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 15571da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 15581da177e4SLinus Torvalds { 15591da177e4SLinus Torvalds int err; 15601da177e4SLinus Torvalds struct sp_node *new = NULL; 15611da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 15621da177e4SLinus Torvalds 1563140d5a49SPaul Mundt pr_debug("set_shared_policy %lx sz %lu %d %lx\n", 15641da177e4SLinus Torvalds vma->vm_pgoff, 15651da177e4SLinus Torvalds sz, npol? npol->policy : -1, 1566dfcd3c0dSAndi Kleen npol ? nodes_addr(npol->v.nodes)[0] : -1); 15671da177e4SLinus Torvalds 15681da177e4SLinus Torvalds if (npol) { 15691da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 15701da177e4SLinus Torvalds if (!new) 15711da177e4SLinus Torvalds return -ENOMEM; 15721da177e4SLinus Torvalds } 15731da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 15741da177e4SLinus Torvalds if (err && new) 15751da177e4SLinus Torvalds kmem_cache_free(sn_cache, new); 15761da177e4SLinus Torvalds return err; 15771da177e4SLinus Torvalds } 15781da177e4SLinus Torvalds 15791da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 15801da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 15811da177e4SLinus Torvalds { 15821da177e4SLinus Torvalds struct sp_node *n; 15831da177e4SLinus Torvalds struct rb_node *next; 15841da177e4SLinus Torvalds 15851da177e4SLinus Torvalds if (!p->root.rb_node) 15861da177e4SLinus Torvalds return; 15871da177e4SLinus Torvalds spin_lock(&p->lock); 15881da177e4SLinus Torvalds next = rb_first(&p->root); 15891da177e4SLinus Torvalds while (next) { 15901da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 15911da177e4SLinus Torvalds next = rb_next(&n->nd); 159290c5029eSAndi Kleen rb_erase(&n->nd, &p->root); 15931da177e4SLinus Torvalds mpol_free(n->policy); 15941da177e4SLinus Torvalds kmem_cache_free(sn_cache, n); 15951da177e4SLinus Torvalds } 15961da177e4SLinus Torvalds spin_unlock(&p->lock); 15971da177e4SLinus Torvalds } 15981da177e4SLinus Torvalds 15991da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 16001da177e4SLinus Torvalds void __init numa_policy_init(void) 16011da177e4SLinus Torvalds { 1602b71636e2SPaul Mundt nodemask_t interleave_nodes; 1603b71636e2SPaul Mundt unsigned long largest = 0; 1604b71636e2SPaul Mundt int nid, prefer = 0; 1605b71636e2SPaul Mundt 16061da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 16071da177e4SLinus Torvalds sizeof(struct mempolicy), 160820c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 16091da177e4SLinus Torvalds 16101da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 16111da177e4SLinus Torvalds sizeof(struct sp_node), 161220c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 16131da177e4SLinus Torvalds 1614b71636e2SPaul Mundt /* 1615b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 1616b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 1617b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 1618b71636e2SPaul Mundt */ 1619b71636e2SPaul Mundt nodes_clear(interleave_nodes); 1620b71636e2SPaul Mundt for_each_online_node(nid) { 1621b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 16221da177e4SLinus Torvalds 1623b71636e2SPaul Mundt /* Preserve the largest node */ 1624b71636e2SPaul Mundt if (largest < total_pages) { 1625b71636e2SPaul Mundt largest = total_pages; 1626b71636e2SPaul Mundt prefer = nid; 1627b71636e2SPaul Mundt } 1628b71636e2SPaul Mundt 1629b71636e2SPaul Mundt /* Interleave this node? */ 1630b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 1631b71636e2SPaul Mundt node_set(nid, interleave_nodes); 1632b71636e2SPaul Mundt } 1633b71636e2SPaul Mundt 1634b71636e2SPaul Mundt /* All too small, use the largest */ 1635b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 1636b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 1637b71636e2SPaul Mundt 1638b71636e2SPaul Mundt if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes)) 16391da177e4SLinus Torvalds printk("numa_policy_init: interleaving failed\n"); 16401da177e4SLinus Torvalds } 16411da177e4SLinus Torvalds 16428bccd85fSChristoph Lameter /* Reset policy of current process to default */ 16431da177e4SLinus Torvalds void numa_default_policy(void) 16441da177e4SLinus Torvalds { 16458bccd85fSChristoph Lameter do_set_mempolicy(MPOL_DEFAULT, NULL); 16461da177e4SLinus Torvalds } 164768860ec1SPaul Jackson 164868860ec1SPaul Jackson /* Migrate a policy to a different set of nodes */ 164974cb2155SPaul Jackson void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 165068860ec1SPaul Jackson { 165174cb2155SPaul Jackson nodemask_t *mpolmask; 165268860ec1SPaul Jackson nodemask_t tmp; 165368860ec1SPaul Jackson 165468860ec1SPaul Jackson if (!pol) 165568860ec1SPaul Jackson return; 165674cb2155SPaul Jackson mpolmask = &pol->cpuset_mems_allowed; 165774cb2155SPaul Jackson if (nodes_equal(*mpolmask, *newmask)) 165874cb2155SPaul Jackson return; 165968860ec1SPaul Jackson 166068860ec1SPaul Jackson switch (pol->policy) { 166168860ec1SPaul Jackson case MPOL_DEFAULT: 166268860ec1SPaul Jackson break; 166368860ec1SPaul Jackson case MPOL_INTERLEAVE: 166474cb2155SPaul Jackson nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask); 166568860ec1SPaul Jackson pol->v.nodes = tmp; 166674cb2155SPaul Jackson *mpolmask = *newmask; 166774cb2155SPaul Jackson current->il_next = node_remap(current->il_next, 166874cb2155SPaul Jackson *mpolmask, *newmask); 166968860ec1SPaul Jackson break; 167068860ec1SPaul Jackson case MPOL_PREFERRED: 167168860ec1SPaul Jackson pol->v.preferred_node = node_remap(pol->v.preferred_node, 167274cb2155SPaul Jackson *mpolmask, *newmask); 167374cb2155SPaul Jackson *mpolmask = *newmask; 167468860ec1SPaul Jackson break; 167568860ec1SPaul Jackson case MPOL_BIND: { 167668860ec1SPaul Jackson nodemask_t nodes; 167768860ec1SPaul Jackson struct zone **z; 167868860ec1SPaul Jackson struct zonelist *zonelist; 167968860ec1SPaul Jackson 168068860ec1SPaul Jackson nodes_clear(nodes); 168168860ec1SPaul Jackson for (z = pol->v.zonelist->zones; *z; z++) 168289fa3024SChristoph Lameter node_set(zone_to_nid(*z), nodes); 168374cb2155SPaul Jackson nodes_remap(tmp, nodes, *mpolmask, *newmask); 168468860ec1SPaul Jackson nodes = tmp; 168568860ec1SPaul Jackson 168668860ec1SPaul Jackson zonelist = bind_zonelist(&nodes); 168768860ec1SPaul Jackson 168868860ec1SPaul Jackson /* If no mem, then zonelist is NULL and we keep old zonelist. 168968860ec1SPaul Jackson * If that old zonelist has no remaining mems_allowed nodes, 169068860ec1SPaul Jackson * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT. 169168860ec1SPaul Jackson */ 169268860ec1SPaul Jackson 16938af5e2ebSKAMEZAWA Hiroyuki if (!IS_ERR(zonelist)) { 169468860ec1SPaul Jackson /* Good - got mem - substitute new zonelist */ 169568860ec1SPaul Jackson kfree(pol->v.zonelist); 169668860ec1SPaul Jackson pol->v.zonelist = zonelist; 169768860ec1SPaul Jackson } 169874cb2155SPaul Jackson *mpolmask = *newmask; 169968860ec1SPaul Jackson break; 170068860ec1SPaul Jackson } 170168860ec1SPaul Jackson default: 170268860ec1SPaul Jackson BUG(); 170368860ec1SPaul Jackson break; 170468860ec1SPaul Jackson } 170568860ec1SPaul Jackson } 170668860ec1SPaul Jackson 170768860ec1SPaul Jackson /* 170874cb2155SPaul Jackson * Wrapper for mpol_rebind_policy() that just requires task 170974cb2155SPaul Jackson * pointer, and updates task mempolicy. 171068860ec1SPaul Jackson */ 171174cb2155SPaul Jackson 171274cb2155SPaul Jackson void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 171368860ec1SPaul Jackson { 171474cb2155SPaul Jackson mpol_rebind_policy(tsk->mempolicy, new); 171568860ec1SPaul Jackson } 17161a75a6c8SChristoph Lameter 17171a75a6c8SChristoph Lameter /* 17184225399aSPaul Jackson * Rebind each vma in mm to new nodemask. 17194225399aSPaul Jackson * 17204225399aSPaul Jackson * Call holding a reference to mm. Takes mm->mmap_sem during call. 17214225399aSPaul Jackson */ 17224225399aSPaul Jackson 17234225399aSPaul Jackson void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 17244225399aSPaul Jackson { 17254225399aSPaul Jackson struct vm_area_struct *vma; 17264225399aSPaul Jackson 17274225399aSPaul Jackson down_write(&mm->mmap_sem); 17284225399aSPaul Jackson for (vma = mm->mmap; vma; vma = vma->vm_next) 17294225399aSPaul Jackson mpol_rebind_policy(vma->vm_policy, new); 17304225399aSPaul Jackson up_write(&mm->mmap_sem); 17314225399aSPaul Jackson } 17324225399aSPaul Jackson 17334225399aSPaul Jackson /* 17341a75a6c8SChristoph Lameter * Display pages allocated per node and memory policy via /proc. 17351a75a6c8SChristoph Lameter */ 17361a75a6c8SChristoph Lameter 173715ad7cdcSHelge Deller static const char * const policy_types[] = 173815ad7cdcSHelge Deller { "default", "prefer", "bind", "interleave" }; 17391a75a6c8SChristoph Lameter 17401a75a6c8SChristoph Lameter /* 17411a75a6c8SChristoph Lameter * Convert a mempolicy into a string. 17421a75a6c8SChristoph Lameter * Returns the number of characters in buffer (if positive) 17431a75a6c8SChristoph Lameter * or an error (negative) 17441a75a6c8SChristoph Lameter */ 17451a75a6c8SChristoph Lameter static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 17461a75a6c8SChristoph Lameter { 17471a75a6c8SChristoph Lameter char *p = buffer; 17481a75a6c8SChristoph Lameter int l; 17491a75a6c8SChristoph Lameter nodemask_t nodes; 17501a75a6c8SChristoph Lameter int mode = pol ? pol->policy : MPOL_DEFAULT; 17511a75a6c8SChristoph Lameter 17521a75a6c8SChristoph Lameter switch (mode) { 17531a75a6c8SChristoph Lameter case MPOL_DEFAULT: 17541a75a6c8SChristoph Lameter nodes_clear(nodes); 17551a75a6c8SChristoph Lameter break; 17561a75a6c8SChristoph Lameter 17571a75a6c8SChristoph Lameter case MPOL_PREFERRED: 17581a75a6c8SChristoph Lameter nodes_clear(nodes); 17591a75a6c8SChristoph Lameter node_set(pol->v.preferred_node, nodes); 17601a75a6c8SChristoph Lameter break; 17611a75a6c8SChristoph Lameter 17621a75a6c8SChristoph Lameter case MPOL_BIND: 17631a75a6c8SChristoph Lameter get_zonemask(pol, &nodes); 17641a75a6c8SChristoph Lameter break; 17651a75a6c8SChristoph Lameter 17661a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 17671a75a6c8SChristoph Lameter nodes = pol->v.nodes; 17681a75a6c8SChristoph Lameter break; 17691a75a6c8SChristoph Lameter 17701a75a6c8SChristoph Lameter default: 17711a75a6c8SChristoph Lameter BUG(); 17721a75a6c8SChristoph Lameter return -EFAULT; 17731a75a6c8SChristoph Lameter } 17741a75a6c8SChristoph Lameter 17751a75a6c8SChristoph Lameter l = strlen(policy_types[mode]); 17761a75a6c8SChristoph Lameter if (buffer + maxlen < p + l + 1) 17771a75a6c8SChristoph Lameter return -ENOSPC; 17781a75a6c8SChristoph Lameter 17791a75a6c8SChristoph Lameter strcpy(p, policy_types[mode]); 17801a75a6c8SChristoph Lameter p += l; 17811a75a6c8SChristoph Lameter 17821a75a6c8SChristoph Lameter if (!nodes_empty(nodes)) { 17831a75a6c8SChristoph Lameter if (buffer + maxlen < p + 2) 17841a75a6c8SChristoph Lameter return -ENOSPC; 17851a75a6c8SChristoph Lameter *p++ = '='; 17861a75a6c8SChristoph Lameter p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); 17871a75a6c8SChristoph Lameter } 17881a75a6c8SChristoph Lameter return p - buffer; 17891a75a6c8SChristoph Lameter } 17901a75a6c8SChristoph Lameter 17911a75a6c8SChristoph Lameter struct numa_maps { 17921a75a6c8SChristoph Lameter unsigned long pages; 17931a75a6c8SChristoph Lameter unsigned long anon; 1794397874dfSChristoph Lameter unsigned long active; 1795397874dfSChristoph Lameter unsigned long writeback; 17961a75a6c8SChristoph Lameter unsigned long mapcount_max; 1797397874dfSChristoph Lameter unsigned long dirty; 1798397874dfSChristoph Lameter unsigned long swapcache; 17991a75a6c8SChristoph Lameter unsigned long node[MAX_NUMNODES]; 18001a75a6c8SChristoph Lameter }; 18011a75a6c8SChristoph Lameter 1802397874dfSChristoph Lameter static void gather_stats(struct page *page, void *private, int pte_dirty) 18031a75a6c8SChristoph Lameter { 18041a75a6c8SChristoph Lameter struct numa_maps *md = private; 18051a75a6c8SChristoph Lameter int count = page_mapcount(page); 18061a75a6c8SChristoph Lameter 18071a75a6c8SChristoph Lameter md->pages++; 1808397874dfSChristoph Lameter if (pte_dirty || PageDirty(page)) 1809397874dfSChristoph Lameter md->dirty++; 1810397874dfSChristoph Lameter 1811397874dfSChristoph Lameter if (PageSwapCache(page)) 1812397874dfSChristoph Lameter md->swapcache++; 1813397874dfSChristoph Lameter 1814397874dfSChristoph Lameter if (PageActive(page)) 1815397874dfSChristoph Lameter md->active++; 1816397874dfSChristoph Lameter 1817397874dfSChristoph Lameter if (PageWriteback(page)) 1818397874dfSChristoph Lameter md->writeback++; 18191a75a6c8SChristoph Lameter 18201a75a6c8SChristoph Lameter if (PageAnon(page)) 18211a75a6c8SChristoph Lameter md->anon++; 18221a75a6c8SChristoph Lameter 1823397874dfSChristoph Lameter if (count > md->mapcount_max) 1824397874dfSChristoph Lameter md->mapcount_max = count; 1825397874dfSChristoph Lameter 18261a75a6c8SChristoph Lameter md->node[page_to_nid(page)]++; 18271a75a6c8SChristoph Lameter } 18281a75a6c8SChristoph Lameter 18297f709ed0SAndrew Morton #ifdef CONFIG_HUGETLB_PAGE 1830397874dfSChristoph Lameter static void check_huge_range(struct vm_area_struct *vma, 1831397874dfSChristoph Lameter unsigned long start, unsigned long end, 1832397874dfSChristoph Lameter struct numa_maps *md) 1833397874dfSChristoph Lameter { 1834397874dfSChristoph Lameter unsigned long addr; 1835397874dfSChristoph Lameter struct page *page; 1836397874dfSChristoph Lameter 1837397874dfSChristoph Lameter for (addr = start; addr < end; addr += HPAGE_SIZE) { 1838397874dfSChristoph Lameter pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK); 1839397874dfSChristoph Lameter pte_t pte; 1840397874dfSChristoph Lameter 1841397874dfSChristoph Lameter if (!ptep) 1842397874dfSChristoph Lameter continue; 1843397874dfSChristoph Lameter 1844397874dfSChristoph Lameter pte = *ptep; 1845397874dfSChristoph Lameter if (pte_none(pte)) 1846397874dfSChristoph Lameter continue; 1847397874dfSChristoph Lameter 1848397874dfSChristoph Lameter page = pte_page(pte); 1849397874dfSChristoph Lameter if (!page) 1850397874dfSChristoph Lameter continue; 1851397874dfSChristoph Lameter 1852397874dfSChristoph Lameter gather_stats(page, md, pte_dirty(*ptep)); 1853397874dfSChristoph Lameter } 1854397874dfSChristoph Lameter } 18557f709ed0SAndrew Morton #else 18567f709ed0SAndrew Morton static inline void check_huge_range(struct vm_area_struct *vma, 18577f709ed0SAndrew Morton unsigned long start, unsigned long end, 18587f709ed0SAndrew Morton struct numa_maps *md) 18597f709ed0SAndrew Morton { 18607f709ed0SAndrew Morton } 18617f709ed0SAndrew Morton #endif 1862397874dfSChristoph Lameter 18631a75a6c8SChristoph Lameter int show_numa_map(struct seq_file *m, void *v) 18641a75a6c8SChristoph Lameter { 186599f89551SEric W. Biederman struct proc_maps_private *priv = m->private; 18661a75a6c8SChristoph Lameter struct vm_area_struct *vma = v; 18671a75a6c8SChristoph Lameter struct numa_maps *md; 1868397874dfSChristoph Lameter struct file *file = vma->vm_file; 1869397874dfSChristoph Lameter struct mm_struct *mm = vma->vm_mm; 18701a75a6c8SChristoph Lameter int n; 18711a75a6c8SChristoph Lameter char buffer[50]; 18721a75a6c8SChristoph Lameter 1873397874dfSChristoph Lameter if (!mm) 18741a75a6c8SChristoph Lameter return 0; 18751a75a6c8SChristoph Lameter 18761a75a6c8SChristoph Lameter md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); 18771a75a6c8SChristoph Lameter if (!md) 18781a75a6c8SChristoph Lameter return 0; 18791a75a6c8SChristoph Lameter 18801a75a6c8SChristoph Lameter mpol_to_str(buffer, sizeof(buffer), 188199f89551SEric W. Biederman get_vma_policy(priv->task, vma, vma->vm_start)); 18821a75a6c8SChristoph Lameter 1883397874dfSChristoph Lameter seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1884397874dfSChristoph Lameter 1885397874dfSChristoph Lameter if (file) { 1886397874dfSChristoph Lameter seq_printf(m, " file="); 1887e9536ae7SJosef Sipek seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= "); 1888397874dfSChristoph Lameter } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1889397874dfSChristoph Lameter seq_printf(m, " heap"); 1890397874dfSChristoph Lameter } else if (vma->vm_start <= mm->start_stack && 1891397874dfSChristoph Lameter vma->vm_end >= mm->start_stack) { 1892397874dfSChristoph Lameter seq_printf(m, " stack"); 1893397874dfSChristoph Lameter } 1894397874dfSChristoph Lameter 1895397874dfSChristoph Lameter if (is_vm_hugetlb_page(vma)) { 1896397874dfSChristoph Lameter check_huge_range(vma, vma->vm_start, vma->vm_end, md); 1897397874dfSChristoph Lameter seq_printf(m, " huge"); 1898397874dfSChristoph Lameter } else { 1899397874dfSChristoph Lameter check_pgd_range(vma, vma->vm_start, vma->vm_end, 1900397874dfSChristoph Lameter &node_online_map, MPOL_MF_STATS, md); 1901397874dfSChristoph Lameter } 1902397874dfSChristoph Lameter 1903397874dfSChristoph Lameter if (!md->pages) 1904397874dfSChristoph Lameter goto out; 19051a75a6c8SChristoph Lameter 19061a75a6c8SChristoph Lameter if (md->anon) 19071a75a6c8SChristoph Lameter seq_printf(m," anon=%lu",md->anon); 19081a75a6c8SChristoph Lameter 1909397874dfSChristoph Lameter if (md->dirty) 1910397874dfSChristoph Lameter seq_printf(m," dirty=%lu",md->dirty); 1911397874dfSChristoph Lameter 1912397874dfSChristoph Lameter if (md->pages != md->anon && md->pages != md->dirty) 1913397874dfSChristoph Lameter seq_printf(m, " mapped=%lu", md->pages); 1914397874dfSChristoph Lameter 1915397874dfSChristoph Lameter if (md->mapcount_max > 1) 1916397874dfSChristoph Lameter seq_printf(m, " mapmax=%lu", md->mapcount_max); 1917397874dfSChristoph Lameter 1918397874dfSChristoph Lameter if (md->swapcache) 1919397874dfSChristoph Lameter seq_printf(m," swapcache=%lu", md->swapcache); 1920397874dfSChristoph Lameter 1921397874dfSChristoph Lameter if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 1922397874dfSChristoph Lameter seq_printf(m," active=%lu", md->active); 1923397874dfSChristoph Lameter 1924397874dfSChristoph Lameter if (md->writeback) 1925397874dfSChristoph Lameter seq_printf(m," writeback=%lu", md->writeback); 1926397874dfSChristoph Lameter 19271a75a6c8SChristoph Lameter for_each_online_node(n) 19281a75a6c8SChristoph Lameter if (md->node[n]) 19291a75a6c8SChristoph Lameter seq_printf(m, " N%d=%lu", n, md->node[n]); 1930397874dfSChristoph Lameter out: 19311a75a6c8SChristoph Lameter seq_putc(m, '\n'); 19321a75a6c8SChristoph Lameter kfree(md); 19331a75a6c8SChristoph Lameter 19341a75a6c8SChristoph Lameter if (m->count < m->size) 193599f89551SEric W. Biederman m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; 19361a75a6c8SChristoph Lameter return 0; 19371a75a6c8SChristoph Lameter } 19381a75a6c8SChristoph Lameter 1939