11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 36d49e352SNadia Yvette Chambers * (C) Nadia Yvette Chambers, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/list.h> 61da177e4SLinus Torvalds #include <linux/init.h> 71da177e4SLinus Torvalds #include <linux/module.h> 81da177e4SLinus Torvalds #include <linux/mm.h> 9e1759c21SAlexey Dobriyan #include <linux/seq_file.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 12cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 131da177e4SLinus Torvalds #include <linux/nodemask.h> 1463551ae0SDavid Gibson #include <linux/pagemap.h> 155da7ca86SChristoph Lameter #include <linux/mempolicy.h> 16aea47ff3SChristoph Lameter #include <linux/cpuset.h> 173935baa9SDavid Gibson #include <linux/mutex.h> 18aa888a74SAndi Kleen #include <linux/bootmem.h> 19a3437870SNishanth Aravamudan #include <linux/sysfs.h> 205a0e3ad6STejun Heo #include <linux/slab.h> 210fe6e20bSNaoya Horiguchi #include <linux/rmap.h> 22fd6a03edSNaoya Horiguchi #include <linux/swap.h> 23fd6a03edSNaoya Horiguchi #include <linux/swapops.h> 24c8721bbbSNaoya Horiguchi #include <linux/page-isolation.h> 25d6606683SLinus Torvalds 2663551ae0SDavid Gibson #include <asm/page.h> 2763551ae0SDavid Gibson #include <asm/pgtable.h> 2824669e58SAneesh Kumar K.V #include <asm/tlb.h> 2963551ae0SDavid Gibson 3024669e58SAneesh Kumar K.V #include <linux/io.h> 3163551ae0SDavid Gibson #include <linux/hugetlb.h> 329dd540e2SAneesh Kumar K.V #include <linux/hugetlb_cgroup.h> 339a305230SLee Schermerhorn #include <linux/node.h> 347835e98bSNick Piggin #include "internal.h" 351da177e4SLinus Torvalds 361da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 37396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 38a5516438SAndi Kleen 39c3f38a38SAneesh Kumar K.V int hugetlb_max_hstate __read_mostly; 40e5ff2159SAndi Kleen unsigned int default_hstate_idx; 41e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE]; 42e5ff2159SAndi Kleen 4353ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages); 4453ba51d2SJon Tollefson 45e5ff2159SAndi Kleen /* for command line parsing */ 46e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate; 47e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages; 48e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size; 49e5ff2159SAndi Kleen 503935baa9SDavid Gibson /* 5131caf665SNaoya Horiguchi * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 5231caf665SNaoya Horiguchi * free_huge_pages, and surplus_huge_pages. 533935baa9SDavid Gibson */ 54c3f38a38SAneesh Kumar K.V DEFINE_SPINLOCK(hugetlb_lock); 550bd0f9fbSEric Paris 5690481622SDavid Gibson static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) 5790481622SDavid Gibson { 5890481622SDavid Gibson bool free = (spool->count == 0) && (spool->used_hpages == 0); 5990481622SDavid Gibson 6090481622SDavid Gibson spin_unlock(&spool->lock); 6190481622SDavid Gibson 6290481622SDavid Gibson /* If no pages are used, and no other handles to the subpool 6390481622SDavid Gibson * remain, free the subpool the subpool remain */ 6490481622SDavid Gibson if (free) 6590481622SDavid Gibson kfree(spool); 6690481622SDavid Gibson } 6790481622SDavid Gibson 6890481622SDavid Gibson struct hugepage_subpool *hugepage_new_subpool(long nr_blocks) 6990481622SDavid Gibson { 7090481622SDavid Gibson struct hugepage_subpool *spool; 7190481622SDavid Gibson 7290481622SDavid Gibson spool = kmalloc(sizeof(*spool), GFP_KERNEL); 7390481622SDavid Gibson if (!spool) 7490481622SDavid Gibson return NULL; 7590481622SDavid Gibson 7690481622SDavid Gibson spin_lock_init(&spool->lock); 7790481622SDavid Gibson spool->count = 1; 7890481622SDavid Gibson spool->max_hpages = nr_blocks; 7990481622SDavid Gibson spool->used_hpages = 0; 8090481622SDavid Gibson 8190481622SDavid Gibson return spool; 8290481622SDavid Gibson } 8390481622SDavid Gibson 8490481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool) 8590481622SDavid Gibson { 8690481622SDavid Gibson spin_lock(&spool->lock); 8790481622SDavid Gibson BUG_ON(!spool->count); 8890481622SDavid Gibson spool->count--; 8990481622SDavid Gibson unlock_or_release_subpool(spool); 9090481622SDavid Gibson } 9190481622SDavid Gibson 9290481622SDavid Gibson static int hugepage_subpool_get_pages(struct hugepage_subpool *spool, 9390481622SDavid Gibson long delta) 9490481622SDavid Gibson { 9590481622SDavid Gibson int ret = 0; 9690481622SDavid Gibson 9790481622SDavid Gibson if (!spool) 9890481622SDavid Gibson return 0; 9990481622SDavid Gibson 10090481622SDavid Gibson spin_lock(&spool->lock); 10190481622SDavid Gibson if ((spool->used_hpages + delta) <= spool->max_hpages) { 10290481622SDavid Gibson spool->used_hpages += delta; 10390481622SDavid Gibson } else { 10490481622SDavid Gibson ret = -ENOMEM; 10590481622SDavid Gibson } 10690481622SDavid Gibson spin_unlock(&spool->lock); 10790481622SDavid Gibson 10890481622SDavid Gibson return ret; 10990481622SDavid Gibson } 11090481622SDavid Gibson 11190481622SDavid Gibson static void hugepage_subpool_put_pages(struct hugepage_subpool *spool, 11290481622SDavid Gibson long delta) 11390481622SDavid Gibson { 11490481622SDavid Gibson if (!spool) 11590481622SDavid Gibson return; 11690481622SDavid Gibson 11790481622SDavid Gibson spin_lock(&spool->lock); 11890481622SDavid Gibson spool->used_hpages -= delta; 11990481622SDavid Gibson /* If hugetlbfs_put_super couldn't free spool due to 12090481622SDavid Gibson * an outstanding quota reference, free it now. */ 12190481622SDavid Gibson unlock_or_release_subpool(spool); 12290481622SDavid Gibson } 12390481622SDavid Gibson 12490481622SDavid Gibson static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 12590481622SDavid Gibson { 12690481622SDavid Gibson return HUGETLBFS_SB(inode->i_sb)->spool; 12790481622SDavid Gibson } 12890481622SDavid Gibson 12990481622SDavid Gibson static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 13090481622SDavid Gibson { 131496ad9aaSAl Viro return subpool_inode(file_inode(vma->vm_file)); 13290481622SDavid Gibson } 13390481622SDavid Gibson 134e7c4b0bfSAndy Whitcroft /* 13596822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 13696822904SAndy Whitcroft * across the pages in a mapping. 13784afd99bSAndy Whitcroft * 13884afd99bSAndy Whitcroft * The region data structures are protected by a combination of the mmap_sem 139c748c262SJoonsoo Kim * and the hugetlb_instantiation_mutex. To access or modify a region the caller 14084afd99bSAndy Whitcroft * must either hold the mmap_sem for write, or the mmap_sem for read and 141c748c262SJoonsoo Kim * the hugetlb_instantiation_mutex: 14284afd99bSAndy Whitcroft * 14384afd99bSAndy Whitcroft * down_write(&mm->mmap_sem); 14484afd99bSAndy Whitcroft * or 14584afd99bSAndy Whitcroft * down_read(&mm->mmap_sem); 14684afd99bSAndy Whitcroft * mutex_lock(&hugetlb_instantiation_mutex); 14796822904SAndy Whitcroft */ 14896822904SAndy Whitcroft struct file_region { 14996822904SAndy Whitcroft struct list_head link; 15096822904SAndy Whitcroft long from; 15196822904SAndy Whitcroft long to; 15296822904SAndy Whitcroft }; 15396822904SAndy Whitcroft 15496822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t) 15596822904SAndy Whitcroft { 15696822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 15796822904SAndy Whitcroft 15896822904SAndy Whitcroft /* Locate the region we are either in or before. */ 15996822904SAndy Whitcroft list_for_each_entry(rg, head, link) 16096822904SAndy Whitcroft if (f <= rg->to) 16196822904SAndy Whitcroft break; 16296822904SAndy Whitcroft 16396822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 16496822904SAndy Whitcroft if (f > rg->from) 16596822904SAndy Whitcroft f = rg->from; 16696822904SAndy Whitcroft 16796822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 16896822904SAndy Whitcroft nrg = rg; 16996822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 17096822904SAndy Whitcroft if (&rg->link == head) 17196822904SAndy Whitcroft break; 17296822904SAndy Whitcroft if (rg->from > t) 17396822904SAndy Whitcroft break; 17496822904SAndy Whitcroft 17596822904SAndy Whitcroft /* If this area reaches higher then extend our area to 17696822904SAndy Whitcroft * include it completely. If this is not the first area 17796822904SAndy Whitcroft * which we intend to reuse, free it. */ 17896822904SAndy Whitcroft if (rg->to > t) 17996822904SAndy Whitcroft t = rg->to; 18096822904SAndy Whitcroft if (rg != nrg) { 18196822904SAndy Whitcroft list_del(&rg->link); 18296822904SAndy Whitcroft kfree(rg); 18396822904SAndy Whitcroft } 18496822904SAndy Whitcroft } 18596822904SAndy Whitcroft nrg->from = f; 18696822904SAndy Whitcroft nrg->to = t; 18796822904SAndy Whitcroft return 0; 18896822904SAndy Whitcroft } 18996822904SAndy Whitcroft 19096822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t) 19196822904SAndy Whitcroft { 19296822904SAndy Whitcroft struct file_region *rg, *nrg; 19396822904SAndy Whitcroft long chg = 0; 19496822904SAndy Whitcroft 19596822904SAndy Whitcroft /* Locate the region we are before or in. */ 19696822904SAndy Whitcroft list_for_each_entry(rg, head, link) 19796822904SAndy Whitcroft if (f <= rg->to) 19896822904SAndy Whitcroft break; 19996822904SAndy Whitcroft 20096822904SAndy Whitcroft /* If we are below the current region then a new region is required. 20196822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 20296822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 20396822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 20496822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 20596822904SAndy Whitcroft if (!nrg) 20696822904SAndy Whitcroft return -ENOMEM; 20796822904SAndy Whitcroft nrg->from = f; 20896822904SAndy Whitcroft nrg->to = f; 20996822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 21096822904SAndy Whitcroft list_add(&nrg->link, rg->link.prev); 21196822904SAndy Whitcroft 21296822904SAndy Whitcroft return t - f; 21396822904SAndy Whitcroft } 21496822904SAndy Whitcroft 21596822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 21696822904SAndy Whitcroft if (f > rg->from) 21796822904SAndy Whitcroft f = rg->from; 21896822904SAndy Whitcroft chg = t - f; 21996822904SAndy Whitcroft 22096822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 22196822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 22296822904SAndy Whitcroft if (&rg->link == head) 22396822904SAndy Whitcroft break; 22496822904SAndy Whitcroft if (rg->from > t) 22596822904SAndy Whitcroft return chg; 22696822904SAndy Whitcroft 22725985edcSLucas De Marchi /* We overlap with this area, if it extends further than 22896822904SAndy Whitcroft * us then we must extend ourselves. Account for its 22996822904SAndy Whitcroft * existing reservation. */ 23096822904SAndy Whitcroft if (rg->to > t) { 23196822904SAndy Whitcroft chg += rg->to - t; 23296822904SAndy Whitcroft t = rg->to; 23396822904SAndy Whitcroft } 23496822904SAndy Whitcroft chg -= rg->to - rg->from; 23596822904SAndy Whitcroft } 23696822904SAndy Whitcroft return chg; 23796822904SAndy Whitcroft } 23896822904SAndy Whitcroft 23996822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end) 24096822904SAndy Whitcroft { 24196822904SAndy Whitcroft struct file_region *rg, *trg; 24296822904SAndy Whitcroft long chg = 0; 24396822904SAndy Whitcroft 24496822904SAndy Whitcroft /* Locate the region we are either in or before. */ 24596822904SAndy Whitcroft list_for_each_entry(rg, head, link) 24696822904SAndy Whitcroft if (end <= rg->to) 24796822904SAndy Whitcroft break; 24896822904SAndy Whitcroft if (&rg->link == head) 24996822904SAndy Whitcroft return 0; 25096822904SAndy Whitcroft 25196822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 25296822904SAndy Whitcroft if (end > rg->from) { 25396822904SAndy Whitcroft chg = rg->to - end; 25496822904SAndy Whitcroft rg->to = end; 25596822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 25696822904SAndy Whitcroft } 25796822904SAndy Whitcroft 25896822904SAndy Whitcroft /* Drop any remaining regions. */ 25996822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 26096822904SAndy Whitcroft if (&rg->link == head) 26196822904SAndy Whitcroft break; 26296822904SAndy Whitcroft chg += rg->to - rg->from; 26396822904SAndy Whitcroft list_del(&rg->link); 26496822904SAndy Whitcroft kfree(rg); 26596822904SAndy Whitcroft } 26696822904SAndy Whitcroft return chg; 26796822904SAndy Whitcroft } 26896822904SAndy Whitcroft 26984afd99bSAndy Whitcroft static long region_count(struct list_head *head, long f, long t) 27084afd99bSAndy Whitcroft { 27184afd99bSAndy Whitcroft struct file_region *rg; 27284afd99bSAndy Whitcroft long chg = 0; 27384afd99bSAndy Whitcroft 27484afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */ 27584afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) { 276f2135a4aSWang Sheng-Hui long seg_from; 277f2135a4aSWang Sheng-Hui long seg_to; 27884afd99bSAndy Whitcroft 27984afd99bSAndy Whitcroft if (rg->to <= f) 28084afd99bSAndy Whitcroft continue; 28184afd99bSAndy Whitcroft if (rg->from >= t) 28284afd99bSAndy Whitcroft break; 28384afd99bSAndy Whitcroft 28484afd99bSAndy Whitcroft seg_from = max(rg->from, f); 28584afd99bSAndy Whitcroft seg_to = min(rg->to, t); 28684afd99bSAndy Whitcroft 28784afd99bSAndy Whitcroft chg += seg_to - seg_from; 28884afd99bSAndy Whitcroft } 28984afd99bSAndy Whitcroft 29084afd99bSAndy Whitcroft return chg; 29184afd99bSAndy Whitcroft } 29284afd99bSAndy Whitcroft 29396822904SAndy Whitcroft /* 294e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 295e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 296e7c4b0bfSAndy Whitcroft */ 297a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h, 298a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 299e7c4b0bfSAndy Whitcroft { 300a5516438SAndi Kleen return ((address - vma->vm_start) >> huge_page_shift(h)) + 301a5516438SAndi Kleen (vma->vm_pgoff >> huge_page_order(h)); 302e7c4b0bfSAndy Whitcroft } 303e7c4b0bfSAndy Whitcroft 3040fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 3050fe6e20bSNaoya Horiguchi unsigned long address) 3060fe6e20bSNaoya Horiguchi { 3070fe6e20bSNaoya Horiguchi return vma_hugecache_offset(hstate_vma(vma), vma, address); 3080fe6e20bSNaoya Horiguchi } 3090fe6e20bSNaoya Horiguchi 31084afd99bSAndy Whitcroft /* 31108fba699SMel Gorman * Return the size of the pages allocated when backing a VMA. In the majority 31208fba699SMel Gorman * cases this will be same size as used by the page table entries. 31308fba699SMel Gorman */ 31408fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 31508fba699SMel Gorman { 31608fba699SMel Gorman struct hstate *hstate; 31708fba699SMel Gorman 31808fba699SMel Gorman if (!is_vm_hugetlb_page(vma)) 31908fba699SMel Gorman return PAGE_SIZE; 32008fba699SMel Gorman 32108fba699SMel Gorman hstate = hstate_vma(vma); 32208fba699SMel Gorman 3232415cf12SWanpeng Li return 1UL << huge_page_shift(hstate); 32408fba699SMel Gorman } 325f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 32608fba699SMel Gorman 32708fba699SMel Gorman /* 3283340289dSMel Gorman * Return the page size being used by the MMU to back a VMA. In the majority 3293340289dSMel Gorman * of cases, the page size used by the kernel matches the MMU size. On 3303340289dSMel Gorman * architectures where it differs, an architecture-specific version of this 3313340289dSMel Gorman * function is required. 3323340289dSMel Gorman */ 3333340289dSMel Gorman #ifndef vma_mmu_pagesize 3343340289dSMel Gorman unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 3353340289dSMel Gorman { 3363340289dSMel Gorman return vma_kernel_pagesize(vma); 3373340289dSMel Gorman } 3383340289dSMel Gorman #endif 3393340289dSMel Gorman 3403340289dSMel Gorman /* 34184afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom 34284afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to 34384afd99bSAndy Whitcroft * alignment. 34484afd99bSAndy Whitcroft */ 34584afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0) 34684afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1) 34704f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 34884afd99bSAndy Whitcroft 349a1e78772SMel Gorman /* 350a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 351a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 352a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 353a1e78772SMel Gorman * 354a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 355a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 356a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 357a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 35884afd99bSAndy Whitcroft * 35984afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different 36084afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated 36184afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file 36284afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even 36384afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map 36484afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which 36584afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed 36684afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated. 367a1e78772SMel Gorman */ 368e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 369e7c4b0bfSAndy Whitcroft { 370e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 371e7c4b0bfSAndy Whitcroft } 372e7c4b0bfSAndy Whitcroft 373e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 374e7c4b0bfSAndy Whitcroft unsigned long value) 375e7c4b0bfSAndy Whitcroft { 376e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 377e7c4b0bfSAndy Whitcroft } 378e7c4b0bfSAndy Whitcroft 37984afd99bSAndy Whitcroft struct resv_map { 38084afd99bSAndy Whitcroft struct kref refs; 38184afd99bSAndy Whitcroft struct list_head regions; 38284afd99bSAndy Whitcroft }; 38384afd99bSAndy Whitcroft 3842a4b3dedSHarvey Harrison static struct resv_map *resv_map_alloc(void) 38584afd99bSAndy Whitcroft { 38684afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 38784afd99bSAndy Whitcroft if (!resv_map) 38884afd99bSAndy Whitcroft return NULL; 38984afd99bSAndy Whitcroft 39084afd99bSAndy Whitcroft kref_init(&resv_map->refs); 39184afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions); 39284afd99bSAndy Whitcroft 39384afd99bSAndy Whitcroft return resv_map; 39484afd99bSAndy Whitcroft } 39584afd99bSAndy Whitcroft 3962a4b3dedSHarvey Harrison static void resv_map_release(struct kref *ref) 39784afd99bSAndy Whitcroft { 39884afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 39984afd99bSAndy Whitcroft 40084afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */ 40184afd99bSAndy Whitcroft region_truncate(&resv_map->regions, 0); 40284afd99bSAndy Whitcroft kfree(resv_map); 40384afd99bSAndy Whitcroft } 40484afd99bSAndy Whitcroft 40584afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 406a1e78772SMel Gorman { 407a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 408f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 40984afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) & 41084afd99bSAndy Whitcroft ~HPAGE_RESV_MASK); 4112a4b3dedSHarvey Harrison return NULL; 412a1e78772SMel Gorman } 413a1e78772SMel Gorman 41484afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 415a1e78772SMel Gorman { 416a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 417f83a275dSMel Gorman VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 418a1e78772SMel Gorman 41984afd99bSAndy Whitcroft set_vma_private_data(vma, (get_vma_private_data(vma) & 42084afd99bSAndy Whitcroft HPAGE_RESV_MASK) | (unsigned long)map); 42104f2cbe3SMel Gorman } 42204f2cbe3SMel Gorman 42304f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 42404f2cbe3SMel Gorman { 42504f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 426f83a275dSMel Gorman VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 427e7c4b0bfSAndy Whitcroft 428e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 42904f2cbe3SMel Gorman } 43004f2cbe3SMel Gorman 43104f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 43204f2cbe3SMel Gorman { 43304f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 434e7c4b0bfSAndy Whitcroft 435e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 436a1e78772SMel Gorman } 437a1e78772SMel Gorman 43804f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 439a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 440a1e78772SMel Gorman { 441a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 442f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 443a1e78772SMel Gorman vma->vm_private_data = (void *)0; 444a1e78772SMel Gorman } 445a1e78772SMel Gorman 446a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 447af0ed73eSJoonsoo Kim static int vma_has_reserves(struct vm_area_struct *vma, long chg) 448a1e78772SMel Gorman { 449af0ed73eSJoonsoo Kim if (vma->vm_flags & VM_NORESERVE) { 450af0ed73eSJoonsoo Kim /* 451af0ed73eSJoonsoo Kim * This address is already reserved by other process(chg == 0), 452af0ed73eSJoonsoo Kim * so, we should decrement reserved count. Without decrementing, 453af0ed73eSJoonsoo Kim * reserve count remains after releasing inode, because this 454af0ed73eSJoonsoo Kim * allocated page will go into page cache and is regarded as 455af0ed73eSJoonsoo Kim * coming from reserved pool in releasing step. Currently, we 456af0ed73eSJoonsoo Kim * don't have any other solution to deal with this situation 457af0ed73eSJoonsoo Kim * properly, so add work-around here. 458af0ed73eSJoonsoo Kim */ 459af0ed73eSJoonsoo Kim if (vma->vm_flags & VM_MAYSHARE && chg == 0) 460af0ed73eSJoonsoo Kim return 1; 461af0ed73eSJoonsoo Kim else 46272231b03SJoonsoo Kim return 0; 463af0ed73eSJoonsoo Kim } 464a63884e9SJoonsoo Kim 465a63884e9SJoonsoo Kim /* Shared mappings always use reserves */ 466f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) 467a1e78772SMel Gorman return 1; 468a63884e9SJoonsoo Kim 469a63884e9SJoonsoo Kim /* 470a63884e9SJoonsoo Kim * Only the process that called mmap() has reserves for 471a63884e9SJoonsoo Kim * private mappings. 472a63884e9SJoonsoo Kim */ 4737f09ca51SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4747f09ca51SMel Gorman return 1; 475a63884e9SJoonsoo Kim 4767f09ca51SMel Gorman return 0; 477a1e78772SMel Gorman } 478a1e78772SMel Gorman 4790ebabb41SNaoya Horiguchi static void copy_gigantic_page(struct page *dst, struct page *src) 4800ebabb41SNaoya Horiguchi { 4810ebabb41SNaoya Horiguchi int i; 4820ebabb41SNaoya Horiguchi struct hstate *h = page_hstate(src); 4830ebabb41SNaoya Horiguchi struct page *dst_base = dst; 4840ebabb41SNaoya Horiguchi struct page *src_base = src; 4850ebabb41SNaoya Horiguchi 4860ebabb41SNaoya Horiguchi for (i = 0; i < pages_per_huge_page(h); ) { 4870ebabb41SNaoya Horiguchi cond_resched(); 4880ebabb41SNaoya Horiguchi copy_highpage(dst, src); 4890ebabb41SNaoya Horiguchi 4900ebabb41SNaoya Horiguchi i++; 4910ebabb41SNaoya Horiguchi dst = mem_map_next(dst, dst_base, i); 4920ebabb41SNaoya Horiguchi src = mem_map_next(src, src_base, i); 4930ebabb41SNaoya Horiguchi } 4940ebabb41SNaoya Horiguchi } 4950ebabb41SNaoya Horiguchi 4960ebabb41SNaoya Horiguchi void copy_huge_page(struct page *dst, struct page *src) 4970ebabb41SNaoya Horiguchi { 4980ebabb41SNaoya Horiguchi int i; 4990ebabb41SNaoya Horiguchi struct hstate *h = page_hstate(src); 5000ebabb41SNaoya Horiguchi 5010ebabb41SNaoya Horiguchi if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { 5020ebabb41SNaoya Horiguchi copy_gigantic_page(dst, src); 5030ebabb41SNaoya Horiguchi return; 5040ebabb41SNaoya Horiguchi } 5050ebabb41SNaoya Horiguchi 5060ebabb41SNaoya Horiguchi might_sleep(); 5070ebabb41SNaoya Horiguchi for (i = 0; i < pages_per_huge_page(h); i++) { 5080ebabb41SNaoya Horiguchi cond_resched(); 5090ebabb41SNaoya Horiguchi copy_highpage(dst + i, src + i); 5100ebabb41SNaoya Horiguchi } 5110ebabb41SNaoya Horiguchi } 5120ebabb41SNaoya Horiguchi 513a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page) 5141da177e4SLinus Torvalds { 5151da177e4SLinus Torvalds int nid = page_to_nid(page); 5160edaecfaSAneesh Kumar K.V list_move(&page->lru, &h->hugepage_freelists[nid]); 517a5516438SAndi Kleen h->free_huge_pages++; 518a5516438SAndi Kleen h->free_huge_pages_node[nid]++; 5191da177e4SLinus Torvalds } 5201da177e4SLinus Torvalds 521bf50bab2SNaoya Horiguchi static struct page *dequeue_huge_page_node(struct hstate *h, int nid) 522bf50bab2SNaoya Horiguchi { 523bf50bab2SNaoya Horiguchi struct page *page; 524bf50bab2SNaoya Horiguchi 525c8721bbbSNaoya Horiguchi list_for_each_entry(page, &h->hugepage_freelists[nid], lru) 526c8721bbbSNaoya Horiguchi if (!is_migrate_isolate_page(page)) 527c8721bbbSNaoya Horiguchi break; 528c8721bbbSNaoya Horiguchi /* 529c8721bbbSNaoya Horiguchi * if 'non-isolated free hugepage' not found on the list, 530c8721bbbSNaoya Horiguchi * the allocation fails. 531c8721bbbSNaoya Horiguchi */ 532c8721bbbSNaoya Horiguchi if (&h->hugepage_freelists[nid] == &page->lru) 533bf50bab2SNaoya Horiguchi return NULL; 5340edaecfaSAneesh Kumar K.V list_move(&page->lru, &h->hugepage_activelist); 535a9869b83SNaoya Horiguchi set_page_refcounted(page); 536bf50bab2SNaoya Horiguchi h->free_huge_pages--; 537bf50bab2SNaoya Horiguchi h->free_huge_pages_node[nid]--; 538bf50bab2SNaoya Horiguchi return page; 539bf50bab2SNaoya Horiguchi } 540bf50bab2SNaoya Horiguchi 54186cdb465SNaoya Horiguchi /* Movability of hugepages depends on migration support. */ 54286cdb465SNaoya Horiguchi static inline gfp_t htlb_alloc_mask(struct hstate *h) 54386cdb465SNaoya Horiguchi { 54486cdb465SNaoya Horiguchi if (hugepages_treat_as_movable || hugepage_migration_support(h)) 54586cdb465SNaoya Horiguchi return GFP_HIGHUSER_MOVABLE; 54686cdb465SNaoya Horiguchi else 54786cdb465SNaoya Horiguchi return GFP_HIGHUSER; 54886cdb465SNaoya Horiguchi } 54986cdb465SNaoya Horiguchi 550a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h, 551a5516438SAndi Kleen struct vm_area_struct *vma, 552af0ed73eSJoonsoo Kim unsigned long address, int avoid_reserve, 553af0ed73eSJoonsoo Kim long chg) 5541da177e4SLinus Torvalds { 555b1c12cbcSKonstantin Khlebnikov struct page *page = NULL; 556480eccf9SLee Schermerhorn struct mempolicy *mpol; 55719770b32SMel Gorman nodemask_t *nodemask; 558c0ff7453SMiao Xie struct zonelist *zonelist; 559dd1a239fSMel Gorman struct zone *zone; 560dd1a239fSMel Gorman struct zoneref *z; 561cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 5621da177e4SLinus Torvalds 563a1e78772SMel Gorman /* 564a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 565a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 566a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 567a1e78772SMel Gorman */ 568af0ed73eSJoonsoo Kim if (!vma_has_reserves(vma, chg) && 569a5516438SAndi Kleen h->free_huge_pages - h->resv_huge_pages == 0) 570c0ff7453SMiao Xie goto err; 571a1e78772SMel Gorman 57204f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 573a5516438SAndi Kleen if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 5746eab04a8SJustin P. Mattock goto err; 57504f2cbe3SMel Gorman 5769966c4bbSJoonsoo Kim retry_cpuset: 5779966c4bbSJoonsoo Kim cpuset_mems_cookie = get_mems_allowed(); 5789966c4bbSJoonsoo Kim zonelist = huge_zonelist(vma, address, 57986cdb465SNaoya Horiguchi htlb_alloc_mask(h), &mpol, &nodemask); 5809966c4bbSJoonsoo Kim 58119770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 58219770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 58386cdb465SNaoya Horiguchi if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) { 584bf50bab2SNaoya Horiguchi page = dequeue_huge_page_node(h, zone_to_nid(zone)); 585bf50bab2SNaoya Horiguchi if (page) { 586af0ed73eSJoonsoo Kim if (avoid_reserve) 587af0ed73eSJoonsoo Kim break; 588af0ed73eSJoonsoo Kim if (!vma_has_reserves(vma, chg)) 589af0ed73eSJoonsoo Kim break; 590af0ed73eSJoonsoo Kim 59107443a85SJoonsoo Kim SetPagePrivate(page); 592a63884e9SJoonsoo Kim h->resv_huge_pages--; 5935ab3ee7bSKen Chen break; 5941da177e4SLinus Torvalds } 5953abf7afdSAndrew Morton } 596bf50bab2SNaoya Horiguchi } 597cc9a6c87SMel Gorman 598cc9a6c87SMel Gorman mpol_cond_put(mpol); 599cc9a6c87SMel Gorman if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 600cc9a6c87SMel Gorman goto retry_cpuset; 601cc9a6c87SMel Gorman return page; 602cc9a6c87SMel Gorman 603c0ff7453SMiao Xie err: 604cc9a6c87SMel Gorman return NULL; 6051da177e4SLinus Torvalds } 6061da177e4SLinus Torvalds 607a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page) 6086af2acb6SAdam Litke { 6096af2acb6SAdam Litke int i; 610a5516438SAndi Kleen 61118229df5SAndy Whitcroft VM_BUG_ON(h->order >= MAX_ORDER); 61218229df5SAndy Whitcroft 613a5516438SAndi Kleen h->nr_huge_pages--; 614a5516438SAndi Kleen h->nr_huge_pages_node[page_to_nid(page)]--; 615a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 61632f84528SChris Forbes page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 61732f84528SChris Forbes 1 << PG_referenced | 1 << PG_dirty | 61832f84528SChris Forbes 1 << PG_active | 1 << PG_reserved | 6196af2acb6SAdam Litke 1 << PG_private | 1 << PG_writeback); 6206af2acb6SAdam Litke } 6219dd540e2SAneesh Kumar K.V VM_BUG_ON(hugetlb_cgroup_from_page(page)); 6226af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 6236af2acb6SAdam Litke set_page_refcounted(page); 6247f2e9525SGerald Schaefer arch_release_hugepage(page); 625a5516438SAndi Kleen __free_pages(page, huge_page_order(h)); 6266af2acb6SAdam Litke } 6276af2acb6SAdam Litke 628e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size) 629e5ff2159SAndi Kleen { 630e5ff2159SAndi Kleen struct hstate *h; 631e5ff2159SAndi Kleen 632e5ff2159SAndi Kleen for_each_hstate(h) { 633e5ff2159SAndi Kleen if (huge_page_size(h) == size) 634e5ff2159SAndi Kleen return h; 635e5ff2159SAndi Kleen } 636e5ff2159SAndi Kleen return NULL; 637e5ff2159SAndi Kleen } 638e5ff2159SAndi Kleen 63927a85ef1SDavid Gibson static void free_huge_page(struct page *page) 64027a85ef1SDavid Gibson { 641a5516438SAndi Kleen /* 642a5516438SAndi Kleen * Can't pass hstate in here because it is called from the 643a5516438SAndi Kleen * compound page destructor. 644a5516438SAndi Kleen */ 645e5ff2159SAndi Kleen struct hstate *h = page_hstate(page); 6467893d1d5SAdam Litke int nid = page_to_nid(page); 64790481622SDavid Gibson struct hugepage_subpool *spool = 64890481622SDavid Gibson (struct hugepage_subpool *)page_private(page); 64907443a85SJoonsoo Kim bool restore_reserve; 65027a85ef1SDavid Gibson 651e5df70abSAndy Whitcroft set_page_private(page, 0); 65223be7468SMel Gorman page->mapping = NULL; 6537893d1d5SAdam Litke BUG_ON(page_count(page)); 6540fe6e20bSNaoya Horiguchi BUG_ON(page_mapcount(page)); 65507443a85SJoonsoo Kim restore_reserve = PagePrivate(page); 65616c794b4SJoonsoo Kim ClearPagePrivate(page); 65727a85ef1SDavid Gibson 65827a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 6596d76dcf4SAneesh Kumar K.V hugetlb_cgroup_uncharge_page(hstate_index(h), 6606d76dcf4SAneesh Kumar K.V pages_per_huge_page(h), page); 66107443a85SJoonsoo Kim if (restore_reserve) 66207443a85SJoonsoo Kim h->resv_huge_pages++; 66307443a85SJoonsoo Kim 664aa888a74SAndi Kleen if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { 6650edaecfaSAneesh Kumar K.V /* remove the page from active list */ 6660edaecfaSAneesh Kumar K.V list_del(&page->lru); 667a5516438SAndi Kleen update_and_free_page(h, page); 668a5516438SAndi Kleen h->surplus_huge_pages--; 669a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 6707893d1d5SAdam Litke } else { 6715d3a551cSWill Deacon arch_clear_hugepage_flags(page); 672a5516438SAndi Kleen enqueue_huge_page(h, page); 6737893d1d5SAdam Litke } 67427a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 67590481622SDavid Gibson hugepage_subpool_put_pages(spool, 1); 67627a85ef1SDavid Gibson } 67727a85ef1SDavid Gibson 678a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 679b7ba30c6SAndi Kleen { 6800edaecfaSAneesh Kumar K.V INIT_LIST_HEAD(&page->lru); 681b7ba30c6SAndi Kleen set_compound_page_dtor(page, free_huge_page); 682b7ba30c6SAndi Kleen spin_lock(&hugetlb_lock); 6839dd540e2SAneesh Kumar K.V set_hugetlb_cgroup(page, NULL); 684a5516438SAndi Kleen h->nr_huge_pages++; 685a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 686b7ba30c6SAndi Kleen spin_unlock(&hugetlb_lock); 687b7ba30c6SAndi Kleen put_page(page); /* free it into the hugepage allocator */ 688b7ba30c6SAndi Kleen } 689b7ba30c6SAndi Kleen 69020a0307cSWu Fengguang static void prep_compound_gigantic_page(struct page *page, unsigned long order) 69120a0307cSWu Fengguang { 69220a0307cSWu Fengguang int i; 69320a0307cSWu Fengguang int nr_pages = 1 << order; 69420a0307cSWu Fengguang struct page *p = page + 1; 69520a0307cSWu Fengguang 69620a0307cSWu Fengguang /* we rely on prep_new_huge_page to set the destructor */ 69720a0307cSWu Fengguang set_compound_order(page, order); 69820a0307cSWu Fengguang __SetPageHead(page); 699ef5a22beSAndrea Arcangeli __ClearPageReserved(page); 70020a0307cSWu Fengguang for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 70120a0307cSWu Fengguang __SetPageTail(p); 702ef5a22beSAndrea Arcangeli /* 703ef5a22beSAndrea Arcangeli * For gigantic hugepages allocated through bootmem at 704ef5a22beSAndrea Arcangeli * boot, it's safer to be consistent with the not-gigantic 705ef5a22beSAndrea Arcangeli * hugepages and clear the PG_reserved bit from all tail pages 706ef5a22beSAndrea Arcangeli * too. Otherwse drivers using get_user_pages() to access tail 707ef5a22beSAndrea Arcangeli * pages may get the reference counting wrong if they see 708ef5a22beSAndrea Arcangeli * PG_reserved set on a tail page (despite the head page not 709ef5a22beSAndrea Arcangeli * having PG_reserved set). Enforcing this consistency between 710ef5a22beSAndrea Arcangeli * head and tail pages allows drivers to optimize away a check 711ef5a22beSAndrea Arcangeli * on the head page when they need know if put_page() is needed 712ef5a22beSAndrea Arcangeli * after get_user_pages(). 713ef5a22beSAndrea Arcangeli */ 714ef5a22beSAndrea Arcangeli __ClearPageReserved(p); 71558a84aa9SYouquan Song set_page_count(p, 0); 71620a0307cSWu Fengguang p->first_page = page; 71720a0307cSWu Fengguang } 71820a0307cSWu Fengguang } 71920a0307cSWu Fengguang 7207795912cSAndrew Morton /* 7217795912cSAndrew Morton * PageHuge() only returns true for hugetlbfs pages, but not for normal or 7227795912cSAndrew Morton * transparent huge pages. See the PageTransHuge() documentation for more 7237795912cSAndrew Morton * details. 7247795912cSAndrew Morton */ 72520a0307cSWu Fengguang int PageHuge(struct page *page) 72620a0307cSWu Fengguang { 72720a0307cSWu Fengguang compound_page_dtor *dtor; 72820a0307cSWu Fengguang 72920a0307cSWu Fengguang if (!PageCompound(page)) 73020a0307cSWu Fengguang return 0; 73120a0307cSWu Fengguang 73220a0307cSWu Fengguang page = compound_head(page); 73320a0307cSWu Fengguang dtor = get_compound_page_dtor(page); 73420a0307cSWu Fengguang 73520a0307cSWu Fengguang return dtor == free_huge_page; 73620a0307cSWu Fengguang } 73743131e14SNaoya Horiguchi EXPORT_SYMBOL_GPL(PageHuge); 73843131e14SNaoya Horiguchi 73913d60f4bSZhang Yi pgoff_t __basepage_index(struct page *page) 74013d60f4bSZhang Yi { 74113d60f4bSZhang Yi struct page *page_head = compound_head(page); 74213d60f4bSZhang Yi pgoff_t index = page_index(page_head); 74313d60f4bSZhang Yi unsigned long compound_idx; 74413d60f4bSZhang Yi 74513d60f4bSZhang Yi if (!PageHuge(page_head)) 74613d60f4bSZhang Yi return page_index(page); 74713d60f4bSZhang Yi 74813d60f4bSZhang Yi if (compound_order(page_head) >= MAX_ORDER) 74913d60f4bSZhang Yi compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 75013d60f4bSZhang Yi else 75113d60f4bSZhang Yi compound_idx = page - page_head; 75213d60f4bSZhang Yi 75313d60f4bSZhang Yi return (index << compound_order(page_head)) + compound_idx; 75413d60f4bSZhang Yi } 75513d60f4bSZhang Yi 756a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 7571da177e4SLinus Torvalds { 7581da177e4SLinus Torvalds struct page *page; 759f96efd58SJoe Jin 760aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 761aa888a74SAndi Kleen return NULL; 762aa888a74SAndi Kleen 7636484eb3eSMel Gorman page = alloc_pages_exact_node(nid, 76486cdb465SNaoya Horiguchi htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| 765551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 766a5516438SAndi Kleen huge_page_order(h)); 7671da177e4SLinus Torvalds if (page) { 7687f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 769caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 7707b8ee84dSHarvey Harrison return NULL; 7717f2e9525SGerald Schaefer } 772a5516438SAndi Kleen prep_new_huge_page(h, page, nid); 7731da177e4SLinus Torvalds } 77463b4613cSNishanth Aravamudan 77563b4613cSNishanth Aravamudan return page; 77663b4613cSNishanth Aravamudan } 77763b4613cSNishanth Aravamudan 7785ced66c9SAndi Kleen /* 7796ae11b27SLee Schermerhorn * common helper functions for hstate_next_node_to_{alloc|free}. 7806ae11b27SLee Schermerhorn * We may have allocated or freed a huge page based on a different 7816ae11b27SLee Schermerhorn * nodes_allowed previously, so h->next_node_to_{alloc|free} might 7826ae11b27SLee Schermerhorn * be outside of *nodes_allowed. Ensure that we use an allowed 7836ae11b27SLee Schermerhorn * node for alloc or free. 7849a76db09SLee Schermerhorn */ 7856ae11b27SLee Schermerhorn static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 7869a76db09SLee Schermerhorn { 7876ae11b27SLee Schermerhorn nid = next_node(nid, *nodes_allowed); 7889a76db09SLee Schermerhorn if (nid == MAX_NUMNODES) 7896ae11b27SLee Schermerhorn nid = first_node(*nodes_allowed); 7909a76db09SLee Schermerhorn VM_BUG_ON(nid >= MAX_NUMNODES); 7919a76db09SLee Schermerhorn 7929a76db09SLee Schermerhorn return nid; 7939a76db09SLee Schermerhorn } 7949a76db09SLee Schermerhorn 7956ae11b27SLee Schermerhorn static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 7965ced66c9SAndi Kleen { 7976ae11b27SLee Schermerhorn if (!node_isset(nid, *nodes_allowed)) 7986ae11b27SLee Schermerhorn nid = next_node_allowed(nid, nodes_allowed); 7999a76db09SLee Schermerhorn return nid; 8005ced66c9SAndi Kleen } 8015ced66c9SAndi Kleen 8026ae11b27SLee Schermerhorn /* 8036ae11b27SLee Schermerhorn * returns the previously saved node ["this node"] from which to 8046ae11b27SLee Schermerhorn * allocate a persistent huge page for the pool and advance the 8056ae11b27SLee Schermerhorn * next node from which to allocate, handling wrap at end of node 8066ae11b27SLee Schermerhorn * mask. 8076ae11b27SLee Schermerhorn */ 8086ae11b27SLee Schermerhorn static int hstate_next_node_to_alloc(struct hstate *h, 8096ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 8106ae11b27SLee Schermerhorn { 8116ae11b27SLee Schermerhorn int nid; 8126ae11b27SLee Schermerhorn 8136ae11b27SLee Schermerhorn VM_BUG_ON(!nodes_allowed); 8146ae11b27SLee Schermerhorn 8156ae11b27SLee Schermerhorn nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 8166ae11b27SLee Schermerhorn h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 8176ae11b27SLee Schermerhorn 8186ae11b27SLee Schermerhorn return nid; 8196ae11b27SLee Schermerhorn } 8206ae11b27SLee Schermerhorn 821e8c5c824SLee Schermerhorn /* 8226ae11b27SLee Schermerhorn * helper for free_pool_huge_page() - return the previously saved 8236ae11b27SLee Schermerhorn * node ["this node"] from which to free a huge page. Advance the 8246ae11b27SLee Schermerhorn * next node id whether or not we find a free huge page to free so 8256ae11b27SLee Schermerhorn * that the next attempt to free addresses the next node. 826e8c5c824SLee Schermerhorn */ 8276ae11b27SLee Schermerhorn static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 828e8c5c824SLee Schermerhorn { 8296ae11b27SLee Schermerhorn int nid; 8309a76db09SLee Schermerhorn 8316ae11b27SLee Schermerhorn VM_BUG_ON(!nodes_allowed); 8326ae11b27SLee Schermerhorn 8336ae11b27SLee Schermerhorn nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 8346ae11b27SLee Schermerhorn h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 8356ae11b27SLee Schermerhorn 8369a76db09SLee Schermerhorn return nid; 837e8c5c824SLee Schermerhorn } 838e8c5c824SLee Schermerhorn 839b2261026SJoonsoo Kim #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 840b2261026SJoonsoo Kim for (nr_nodes = nodes_weight(*mask); \ 841b2261026SJoonsoo Kim nr_nodes > 0 && \ 842b2261026SJoonsoo Kim ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 843b2261026SJoonsoo Kim nr_nodes--) 844b2261026SJoonsoo Kim 845b2261026SJoonsoo Kim #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 846b2261026SJoonsoo Kim for (nr_nodes = nodes_weight(*mask); \ 847b2261026SJoonsoo Kim nr_nodes > 0 && \ 848b2261026SJoonsoo Kim ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 849b2261026SJoonsoo Kim nr_nodes--) 850b2261026SJoonsoo Kim 851b2261026SJoonsoo Kim static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 852b2261026SJoonsoo Kim { 853b2261026SJoonsoo Kim struct page *page; 854b2261026SJoonsoo Kim int nr_nodes, node; 855b2261026SJoonsoo Kim int ret = 0; 856b2261026SJoonsoo Kim 857b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 858b2261026SJoonsoo Kim page = alloc_fresh_huge_page_node(h, node); 859b2261026SJoonsoo Kim if (page) { 860b2261026SJoonsoo Kim ret = 1; 861b2261026SJoonsoo Kim break; 862b2261026SJoonsoo Kim } 863b2261026SJoonsoo Kim } 864b2261026SJoonsoo Kim 865b2261026SJoonsoo Kim if (ret) 866b2261026SJoonsoo Kim count_vm_event(HTLB_BUDDY_PGALLOC); 867b2261026SJoonsoo Kim else 868b2261026SJoonsoo Kim count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 869b2261026SJoonsoo Kim 870b2261026SJoonsoo Kim return ret; 871b2261026SJoonsoo Kim } 872b2261026SJoonsoo Kim 873e8c5c824SLee Schermerhorn /* 874e8c5c824SLee Schermerhorn * Free huge page from pool from next node to free. 875e8c5c824SLee Schermerhorn * Attempt to keep persistent huge pages more or less 876e8c5c824SLee Schermerhorn * balanced over allowed nodes. 877e8c5c824SLee Schermerhorn * Called with hugetlb_lock locked. 878e8c5c824SLee Schermerhorn */ 8796ae11b27SLee Schermerhorn static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 8806ae11b27SLee Schermerhorn bool acct_surplus) 881e8c5c824SLee Schermerhorn { 882b2261026SJoonsoo Kim int nr_nodes, node; 883e8c5c824SLee Schermerhorn int ret = 0; 884e8c5c824SLee Schermerhorn 885b2261026SJoonsoo Kim for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 886685f3457SLee Schermerhorn /* 887685f3457SLee Schermerhorn * If we're returning unused surplus pages, only examine 888685f3457SLee Schermerhorn * nodes with surplus pages. 889685f3457SLee Schermerhorn */ 890b2261026SJoonsoo Kim if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 891b2261026SJoonsoo Kim !list_empty(&h->hugepage_freelists[node])) { 892e8c5c824SLee Schermerhorn struct page *page = 893b2261026SJoonsoo Kim list_entry(h->hugepage_freelists[node].next, 894e8c5c824SLee Schermerhorn struct page, lru); 895e8c5c824SLee Schermerhorn list_del(&page->lru); 896e8c5c824SLee Schermerhorn h->free_huge_pages--; 897b2261026SJoonsoo Kim h->free_huge_pages_node[node]--; 898685f3457SLee Schermerhorn if (acct_surplus) { 899685f3457SLee Schermerhorn h->surplus_huge_pages--; 900b2261026SJoonsoo Kim h->surplus_huge_pages_node[node]--; 901685f3457SLee Schermerhorn } 902e8c5c824SLee Schermerhorn update_and_free_page(h, page); 903e8c5c824SLee Schermerhorn ret = 1; 9049a76db09SLee Schermerhorn break; 905e8c5c824SLee Schermerhorn } 906b2261026SJoonsoo Kim } 907e8c5c824SLee Schermerhorn 908e8c5c824SLee Schermerhorn return ret; 909e8c5c824SLee Schermerhorn } 910e8c5c824SLee Schermerhorn 911c8721bbbSNaoya Horiguchi /* 912c8721bbbSNaoya Horiguchi * Dissolve a given free hugepage into free buddy pages. This function does 913c8721bbbSNaoya Horiguchi * nothing for in-use (including surplus) hugepages. 914c8721bbbSNaoya Horiguchi */ 915c8721bbbSNaoya Horiguchi static void dissolve_free_huge_page(struct page *page) 916c8721bbbSNaoya Horiguchi { 917c8721bbbSNaoya Horiguchi spin_lock(&hugetlb_lock); 918c8721bbbSNaoya Horiguchi if (PageHuge(page) && !page_count(page)) { 919c8721bbbSNaoya Horiguchi struct hstate *h = page_hstate(page); 920c8721bbbSNaoya Horiguchi int nid = page_to_nid(page); 921c8721bbbSNaoya Horiguchi list_del(&page->lru); 922c8721bbbSNaoya Horiguchi h->free_huge_pages--; 923c8721bbbSNaoya Horiguchi h->free_huge_pages_node[nid]--; 924c8721bbbSNaoya Horiguchi update_and_free_page(h, page); 925c8721bbbSNaoya Horiguchi } 926c8721bbbSNaoya Horiguchi spin_unlock(&hugetlb_lock); 927c8721bbbSNaoya Horiguchi } 928c8721bbbSNaoya Horiguchi 929c8721bbbSNaoya Horiguchi /* 930c8721bbbSNaoya Horiguchi * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 931c8721bbbSNaoya Horiguchi * make specified memory blocks removable from the system. 932c8721bbbSNaoya Horiguchi * Note that start_pfn should aligned with (minimum) hugepage size. 933c8721bbbSNaoya Horiguchi */ 934c8721bbbSNaoya Horiguchi void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 935c8721bbbSNaoya Horiguchi { 936c8721bbbSNaoya Horiguchi unsigned int order = 8 * sizeof(void *); 937c8721bbbSNaoya Horiguchi unsigned long pfn; 938c8721bbbSNaoya Horiguchi struct hstate *h; 939c8721bbbSNaoya Horiguchi 940c8721bbbSNaoya Horiguchi /* Set scan step to minimum hugepage size */ 941c8721bbbSNaoya Horiguchi for_each_hstate(h) 942c8721bbbSNaoya Horiguchi if (order > huge_page_order(h)) 943c8721bbbSNaoya Horiguchi order = huge_page_order(h); 944c8721bbbSNaoya Horiguchi VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order)); 945c8721bbbSNaoya Horiguchi for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) 946c8721bbbSNaoya Horiguchi dissolve_free_huge_page(pfn_to_page(pfn)); 947c8721bbbSNaoya Horiguchi } 948c8721bbbSNaoya Horiguchi 949bf50bab2SNaoya Horiguchi static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) 9507893d1d5SAdam Litke { 9517893d1d5SAdam Litke struct page *page; 952bf50bab2SNaoya Horiguchi unsigned int r_nid; 9537893d1d5SAdam Litke 954aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 955aa888a74SAndi Kleen return NULL; 956aa888a74SAndi Kleen 957d1c3fb1fSNishanth Aravamudan /* 958d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 959d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 960d1c3fb1fSNishanth Aravamudan * overcommit 961d1c3fb1fSNishanth Aravamudan * 962d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 963d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 964d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 965d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 966d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 967d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 968d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 969d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 970d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 971d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 972d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 973d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 974d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 975d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 976d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 977d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 978d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 979d1c3fb1fSNishanth Aravamudan */ 980d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 981a5516438SAndi Kleen if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 982d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 983d1c3fb1fSNishanth Aravamudan return NULL; 984d1c3fb1fSNishanth Aravamudan } else { 985a5516438SAndi Kleen h->nr_huge_pages++; 986a5516438SAndi Kleen h->surplus_huge_pages++; 987d1c3fb1fSNishanth Aravamudan } 988d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 989d1c3fb1fSNishanth Aravamudan 990bf50bab2SNaoya Horiguchi if (nid == NUMA_NO_NODE) 99186cdb465SNaoya Horiguchi page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP| 992551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 993a5516438SAndi Kleen huge_page_order(h)); 994bf50bab2SNaoya Horiguchi else 995bf50bab2SNaoya Horiguchi page = alloc_pages_exact_node(nid, 99686cdb465SNaoya Horiguchi htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| 997bf50bab2SNaoya Horiguchi __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); 998d1c3fb1fSNishanth Aravamudan 999caff3a2cSGerald Schaefer if (page && arch_prepare_hugepage(page)) { 1000caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 1001ea5768c7SHillf Danton page = NULL; 1002caff3a2cSGerald Schaefer } 1003caff3a2cSGerald Schaefer 10047893d1d5SAdam Litke spin_lock(&hugetlb_lock); 1005d1c3fb1fSNishanth Aravamudan if (page) { 10060edaecfaSAneesh Kumar K.V INIT_LIST_HEAD(&page->lru); 1007bf50bab2SNaoya Horiguchi r_nid = page_to_nid(page); 1008d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 10099dd540e2SAneesh Kumar K.V set_hugetlb_cgroup(page, NULL); 1010d1c3fb1fSNishanth Aravamudan /* 1011d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 1012d1c3fb1fSNishanth Aravamudan */ 1013bf50bab2SNaoya Horiguchi h->nr_huge_pages_node[r_nid]++; 1014bf50bab2SNaoya Horiguchi h->surplus_huge_pages_node[r_nid]++; 10153b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 1016d1c3fb1fSNishanth Aravamudan } else { 1017a5516438SAndi Kleen h->nr_huge_pages--; 1018a5516438SAndi Kleen h->surplus_huge_pages--; 10193b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 10207893d1d5SAdam Litke } 1021d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 10227893d1d5SAdam Litke 10237893d1d5SAdam Litke return page; 10247893d1d5SAdam Litke } 10257893d1d5SAdam Litke 1026e4e574b7SAdam Litke /* 1027bf50bab2SNaoya Horiguchi * This allocation function is useful in the context where vma is irrelevant. 1028bf50bab2SNaoya Horiguchi * E.g. soft-offlining uses this function because it only cares physical 1029bf50bab2SNaoya Horiguchi * address of error page. 1030bf50bab2SNaoya Horiguchi */ 1031bf50bab2SNaoya Horiguchi struct page *alloc_huge_page_node(struct hstate *h, int nid) 1032bf50bab2SNaoya Horiguchi { 10334ef91848SJoonsoo Kim struct page *page = NULL; 1034bf50bab2SNaoya Horiguchi 1035bf50bab2SNaoya Horiguchi spin_lock(&hugetlb_lock); 10364ef91848SJoonsoo Kim if (h->free_huge_pages - h->resv_huge_pages > 0) 1037bf50bab2SNaoya Horiguchi page = dequeue_huge_page_node(h, nid); 1038bf50bab2SNaoya Horiguchi spin_unlock(&hugetlb_lock); 1039bf50bab2SNaoya Horiguchi 104094ae8ba7SAneesh Kumar K.V if (!page) 1041bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, nid); 1042bf50bab2SNaoya Horiguchi 1043bf50bab2SNaoya Horiguchi return page; 1044bf50bab2SNaoya Horiguchi } 1045bf50bab2SNaoya Horiguchi 1046bf50bab2SNaoya Horiguchi /* 104725985edcSLucas De Marchi * Increase the hugetlb pool such that it can accommodate a reservation 1048e4e574b7SAdam Litke * of size 'delta'. 1049e4e574b7SAdam Litke */ 1050a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta) 1051e4e574b7SAdam Litke { 1052e4e574b7SAdam Litke struct list_head surplus_list; 1053e4e574b7SAdam Litke struct page *page, *tmp; 1054e4e574b7SAdam Litke int ret, i; 1055e4e574b7SAdam Litke int needed, allocated; 105628073b02SHillf Danton bool alloc_ok = true; 1057e4e574b7SAdam Litke 1058a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 1059ac09b3a1SAdam Litke if (needed <= 0) { 1060a5516438SAndi Kleen h->resv_huge_pages += delta; 1061e4e574b7SAdam Litke return 0; 1062ac09b3a1SAdam Litke } 1063e4e574b7SAdam Litke 1064e4e574b7SAdam Litke allocated = 0; 1065e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 1066e4e574b7SAdam Litke 1067e4e574b7SAdam Litke ret = -ENOMEM; 1068e4e574b7SAdam Litke retry: 1069e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 1070e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 1071bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 107228073b02SHillf Danton if (!page) { 107328073b02SHillf Danton alloc_ok = false; 107428073b02SHillf Danton break; 107528073b02SHillf Danton } 1076e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 1077e4e574b7SAdam Litke } 107828073b02SHillf Danton allocated += i; 1079e4e574b7SAdam Litke 1080e4e574b7SAdam Litke /* 1081e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 1082e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 1083e4e574b7SAdam Litke */ 1084e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 1085a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - 1086a5516438SAndi Kleen (h->free_huge_pages + allocated); 108728073b02SHillf Danton if (needed > 0) { 108828073b02SHillf Danton if (alloc_ok) 1089e4e574b7SAdam Litke goto retry; 109028073b02SHillf Danton /* 109128073b02SHillf Danton * We were not able to allocate enough pages to 109228073b02SHillf Danton * satisfy the entire reservation so we free what 109328073b02SHillf Danton * we've allocated so far. 109428073b02SHillf Danton */ 109528073b02SHillf Danton goto free; 109628073b02SHillf Danton } 1097e4e574b7SAdam Litke /* 1098e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 109925985edcSLucas De Marchi * needed to accommodate the reservation. Add the appropriate number 1100e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 1101ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 1102ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 1103ac09b3a1SAdam Litke * before they are reserved. 1104e4e574b7SAdam Litke */ 1105e4e574b7SAdam Litke needed += allocated; 1106a5516438SAndi Kleen h->resv_huge_pages += delta; 1107e4e574b7SAdam Litke ret = 0; 1108a9869b83SNaoya Horiguchi 110919fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 111019fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 111119fc3f0aSAdam Litke if ((--needed) < 0) 111219fc3f0aSAdam Litke break; 1113a9869b83SNaoya Horiguchi /* 1114a9869b83SNaoya Horiguchi * This page is now managed by the hugetlb allocator and has 1115a9869b83SNaoya Horiguchi * no users -- drop the buddy allocator's reference. 1116a9869b83SNaoya Horiguchi */ 1117a9869b83SNaoya Horiguchi put_page_testzero(page); 1118a9869b83SNaoya Horiguchi VM_BUG_ON(page_count(page)); 1119a5516438SAndi Kleen enqueue_huge_page(h, page); 112019fc3f0aSAdam Litke } 112128073b02SHillf Danton free: 1122b0365c8dSHillf Danton spin_unlock(&hugetlb_lock); 112319fc3f0aSAdam Litke 112419fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 1125c0d934baSJoonsoo Kim list_for_each_entry_safe(page, tmp, &surplus_list, lru) 1126a9869b83SNaoya Horiguchi put_page(page); 112719fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 1128e4e574b7SAdam Litke 1129e4e574b7SAdam Litke return ret; 1130e4e574b7SAdam Litke } 1131e4e574b7SAdam Litke 1132e4e574b7SAdam Litke /* 1133e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 1134e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 1135e4e574b7SAdam Litke * never used. 1136685f3457SLee Schermerhorn * Called with hugetlb_lock held. 1137e4e574b7SAdam Litke */ 1138a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h, 1139a5516438SAndi Kleen unsigned long unused_resv_pages) 1140e4e574b7SAdam Litke { 1141e4e574b7SAdam Litke unsigned long nr_pages; 1142e4e574b7SAdam Litke 1143ac09b3a1SAdam Litke /* Uncommit the reservation */ 1144a5516438SAndi Kleen h->resv_huge_pages -= unused_resv_pages; 1145ac09b3a1SAdam Litke 1146aa888a74SAndi Kleen /* Cannot return gigantic pages currently */ 1147aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1148aa888a74SAndi Kleen return; 1149aa888a74SAndi Kleen 1150a5516438SAndi Kleen nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 1151e4e574b7SAdam Litke 1152685f3457SLee Schermerhorn /* 1153685f3457SLee Schermerhorn * We want to release as many surplus pages as possible, spread 11549b5e5d0fSLee Schermerhorn * evenly across all nodes with memory. Iterate across these nodes 11559b5e5d0fSLee Schermerhorn * until we can no longer free unreserved surplus pages. This occurs 11569b5e5d0fSLee Schermerhorn * when the nodes with surplus pages have no free pages. 11579b5e5d0fSLee Schermerhorn * free_pool_huge_page() will balance the the freed pages across the 11589b5e5d0fSLee Schermerhorn * on-line nodes with memory and will handle the hstate accounting. 1159685f3457SLee Schermerhorn */ 1160685f3457SLee Schermerhorn while (nr_pages--) { 11618cebfcd0SLai Jiangshan if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) 1162685f3457SLee Schermerhorn break; 1163e4e574b7SAdam Litke } 1164e4e574b7SAdam Litke } 1165e4e574b7SAdam Litke 1166c37f9fb1SAndy Whitcroft /* 1167c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 1168c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 116990481622SDavid Gibson * reservation and actually increase subpool usage before an allocation 117090481622SDavid Gibson * can occur. Where any new reservation would be required the 117190481622SDavid Gibson * reservation change is prepared, but not committed. Once the page 117290481622SDavid Gibson * has been allocated from the subpool and instantiated the change should 117390481622SDavid Gibson * be committed via vma_commit_reservation. No action is required on 117490481622SDavid Gibson * failure. 1175c37f9fb1SAndy Whitcroft */ 1176e2f17d94SRoel Kluin static long vma_needs_reservation(struct hstate *h, 1177a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 1178c37f9fb1SAndy Whitcroft { 1179c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 1180c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 1181c37f9fb1SAndy Whitcroft 1182f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 1183a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1184c37f9fb1SAndy Whitcroft return region_chg(&inode->i_mapping->private_list, 1185c37f9fb1SAndy Whitcroft idx, idx + 1); 1186c37f9fb1SAndy Whitcroft 118784afd99bSAndy Whitcroft } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1188c37f9fb1SAndy Whitcroft return 1; 1189c37f9fb1SAndy Whitcroft 119084afd99bSAndy Whitcroft } else { 1191e2f17d94SRoel Kluin long err; 1192a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1193f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 119484afd99bSAndy Whitcroft 1195f522c3acSJoonsoo Kim err = region_chg(&resv->regions, idx, idx + 1); 119684afd99bSAndy Whitcroft if (err < 0) 119784afd99bSAndy Whitcroft return err; 1198c37f9fb1SAndy Whitcroft return 0; 1199c37f9fb1SAndy Whitcroft } 120084afd99bSAndy Whitcroft } 1201a5516438SAndi Kleen static void vma_commit_reservation(struct hstate *h, 1202a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 1203c37f9fb1SAndy Whitcroft { 1204c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 1205c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 1206c37f9fb1SAndy Whitcroft 1207f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 1208a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1209c37f9fb1SAndy Whitcroft region_add(&inode->i_mapping->private_list, idx, idx + 1); 121084afd99bSAndy Whitcroft 121184afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1212a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1213f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 121484afd99bSAndy Whitcroft 121584afd99bSAndy Whitcroft /* Mark this page used in the map. */ 1216f522c3acSJoonsoo Kim region_add(&resv->regions, idx, idx + 1); 1217c37f9fb1SAndy Whitcroft } 1218c37f9fb1SAndy Whitcroft } 1219c37f9fb1SAndy Whitcroft 1220348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 122104f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 1222348ea204SAdam Litke { 122390481622SDavid Gibson struct hugepage_subpool *spool = subpool_vma(vma); 1224a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1225348ea204SAdam Litke struct page *page; 1226e2f17d94SRoel Kluin long chg; 12276d76dcf4SAneesh Kumar K.V int ret, idx; 12286d76dcf4SAneesh Kumar K.V struct hugetlb_cgroup *h_cg; 12292fc39cecSAdam Litke 12306d76dcf4SAneesh Kumar K.V idx = hstate_index(h); 1231a1e78772SMel Gorman /* 123290481622SDavid Gibson * Processes that did not create the mapping will have no 123390481622SDavid Gibson * reserves and will not have accounted against subpool 123490481622SDavid Gibson * limit. Check that the subpool limit can be made before 123590481622SDavid Gibson * satisfying the allocation MAP_NORESERVE mappings may also 123690481622SDavid Gibson * need pages and subpool limit allocated allocated if no reserve 123790481622SDavid Gibson * mapping overlaps. 1238a1e78772SMel Gorman */ 1239a5516438SAndi Kleen chg = vma_needs_reservation(h, vma, addr); 1240c37f9fb1SAndy Whitcroft if (chg < 0) 124176dcee75SAneesh Kumar K.V return ERR_PTR(-ENOMEM); 12428bb3f12eSJoonsoo Kim if (chg || avoid_reserve) 12438bb3f12eSJoonsoo Kim if (hugepage_subpool_get_pages(spool, 1)) 124476dcee75SAneesh Kumar K.V return ERR_PTR(-ENOSPC); 124590d8b7e6SAdam Litke 12466d76dcf4SAneesh Kumar K.V ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 12476d76dcf4SAneesh Kumar K.V if (ret) { 12488bb3f12eSJoonsoo Kim if (chg || avoid_reserve) 12498bb3f12eSJoonsoo Kim hugepage_subpool_put_pages(spool, 1); 12506d76dcf4SAneesh Kumar K.V return ERR_PTR(-ENOSPC); 12516d76dcf4SAneesh Kumar K.V } 1252a1e78772SMel Gorman spin_lock(&hugetlb_lock); 1253af0ed73eSJoonsoo Kim page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); 125481a6fcaeSJoonsoo Kim if (!page) { 125594ae8ba7SAneesh Kumar K.V spin_unlock(&hugetlb_lock); 1256bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 1257a1e78772SMel Gorman if (!page) { 12586d76dcf4SAneesh Kumar K.V hugetlb_cgroup_uncharge_cgroup(idx, 12596d76dcf4SAneesh Kumar K.V pages_per_huge_page(h), 12606d76dcf4SAneesh Kumar K.V h_cg); 12618bb3f12eSJoonsoo Kim if (chg || avoid_reserve) 12628bb3f12eSJoonsoo Kim hugepage_subpool_put_pages(spool, 1); 126376dcee75SAneesh Kumar K.V return ERR_PTR(-ENOSPC); 1264a1e78772SMel Gorman } 126579dbb236SAneesh Kumar K.V spin_lock(&hugetlb_lock); 126679dbb236SAneesh Kumar K.V list_move(&page->lru, &h->hugepage_activelist); 126781a6fcaeSJoonsoo Kim /* Fall through */ 1268a1e78772SMel Gorman } 126981a6fcaeSJoonsoo Kim hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 127081a6fcaeSJoonsoo Kim spin_unlock(&hugetlb_lock); 1271a1e78772SMel Gorman 127290481622SDavid Gibson set_page_private(page, (unsigned long)spool); 1273a1e78772SMel Gorman 1274a5516438SAndi Kleen vma_commit_reservation(h, vma, addr); 12757893d1d5SAdam Litke return page; 1276b45b5bd6SDavid Gibson } 1277b45b5bd6SDavid Gibson 127874060e4dSNaoya Horiguchi /* 127974060e4dSNaoya Horiguchi * alloc_huge_page()'s wrapper which simply returns the page if allocation 128074060e4dSNaoya Horiguchi * succeeds, otherwise NULL. This function is called from new_vma_page(), 128174060e4dSNaoya Horiguchi * where no ERR_VALUE is expected to be returned. 128274060e4dSNaoya Horiguchi */ 128374060e4dSNaoya Horiguchi struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, 128474060e4dSNaoya Horiguchi unsigned long addr, int avoid_reserve) 128574060e4dSNaoya Horiguchi { 128674060e4dSNaoya Horiguchi struct page *page = alloc_huge_page(vma, addr, avoid_reserve); 128774060e4dSNaoya Horiguchi if (IS_ERR(page)) 128874060e4dSNaoya Horiguchi page = NULL; 128974060e4dSNaoya Horiguchi return page; 129074060e4dSNaoya Horiguchi } 129174060e4dSNaoya Horiguchi 129291f47662SCyrill Gorcunov int __weak alloc_bootmem_huge_page(struct hstate *h) 1293aa888a74SAndi Kleen { 1294aa888a74SAndi Kleen struct huge_bootmem_page *m; 1295b2261026SJoonsoo Kim int nr_nodes, node; 1296aa888a74SAndi Kleen 1297b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 1298aa888a74SAndi Kleen void *addr; 1299aa888a74SAndi Kleen 1300b2261026SJoonsoo Kim addr = __alloc_bootmem_node_nopanic(NODE_DATA(node), 1301aa888a74SAndi Kleen huge_page_size(h), huge_page_size(h), 0); 1302aa888a74SAndi Kleen 1303aa888a74SAndi Kleen if (addr) { 1304aa888a74SAndi Kleen /* 1305aa888a74SAndi Kleen * Use the beginning of the huge page to store the 1306aa888a74SAndi Kleen * huge_bootmem_page struct (until gather_bootmem 1307aa888a74SAndi Kleen * puts them into the mem_map). 1308aa888a74SAndi Kleen */ 1309aa888a74SAndi Kleen m = addr; 1310aa888a74SAndi Kleen goto found; 1311aa888a74SAndi Kleen } 1312aa888a74SAndi Kleen } 1313aa888a74SAndi Kleen return 0; 1314aa888a74SAndi Kleen 1315aa888a74SAndi Kleen found: 1316aa888a74SAndi Kleen BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); 1317aa888a74SAndi Kleen /* Put them into a private list first because mem_map is not up yet */ 1318aa888a74SAndi Kleen list_add(&m->list, &huge_boot_pages); 1319aa888a74SAndi Kleen m->hstate = h; 1320aa888a74SAndi Kleen return 1; 1321aa888a74SAndi Kleen } 1322aa888a74SAndi Kleen 132318229df5SAndy Whitcroft static void prep_compound_huge_page(struct page *page, int order) 132418229df5SAndy Whitcroft { 132518229df5SAndy Whitcroft if (unlikely(order > (MAX_ORDER - 1))) 132618229df5SAndy Whitcroft prep_compound_gigantic_page(page, order); 132718229df5SAndy Whitcroft else 132818229df5SAndy Whitcroft prep_compound_page(page, order); 132918229df5SAndy Whitcroft } 133018229df5SAndy Whitcroft 1331aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */ 1332aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void) 1333aa888a74SAndi Kleen { 1334aa888a74SAndi Kleen struct huge_bootmem_page *m; 1335aa888a74SAndi Kleen 1336aa888a74SAndi Kleen list_for_each_entry(m, &huge_boot_pages, list) { 1337aa888a74SAndi Kleen struct hstate *h = m->hstate; 1338ee8f248dSBecky Bruce struct page *page; 1339ee8f248dSBecky Bruce 1340ee8f248dSBecky Bruce #ifdef CONFIG_HIGHMEM 1341ee8f248dSBecky Bruce page = pfn_to_page(m->phys >> PAGE_SHIFT); 1342ee8f248dSBecky Bruce free_bootmem_late((unsigned long)m, 1343ee8f248dSBecky Bruce sizeof(struct huge_bootmem_page)); 1344ee8f248dSBecky Bruce #else 1345ee8f248dSBecky Bruce page = virt_to_page(m); 1346ee8f248dSBecky Bruce #endif 1347aa888a74SAndi Kleen WARN_ON(page_count(page) != 1); 134818229df5SAndy Whitcroft prep_compound_huge_page(page, h->order); 1349ef5a22beSAndrea Arcangeli WARN_ON(PageReserved(page)); 1350aa888a74SAndi Kleen prep_new_huge_page(h, page, page_to_nid(page)); 1351b0320c7bSRafael Aquini /* 1352b0320c7bSRafael Aquini * If we had gigantic hugepages allocated at boot time, we need 1353b0320c7bSRafael Aquini * to restore the 'stolen' pages to totalram_pages in order to 1354b0320c7bSRafael Aquini * fix confusing memory reports from free(1) and another 1355b0320c7bSRafael Aquini * side-effects, like CommitLimit going negative. 1356b0320c7bSRafael Aquini */ 1357b0320c7bSRafael Aquini if (h->order > (MAX_ORDER - 1)) 13583dcc0571SJiang Liu adjust_managed_page_count(page, 1 << h->order); 1359aa888a74SAndi Kleen } 1360aa888a74SAndi Kleen } 1361aa888a74SAndi Kleen 13628faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 13631da177e4SLinus Torvalds { 13641da177e4SLinus Torvalds unsigned long i; 13651da177e4SLinus Torvalds 1366e5ff2159SAndi Kleen for (i = 0; i < h->max_huge_pages; ++i) { 1367aa888a74SAndi Kleen if (h->order >= MAX_ORDER) { 1368aa888a74SAndi Kleen if (!alloc_bootmem_huge_page(h)) 1369aa888a74SAndi Kleen break; 13709b5e5d0fSLee Schermerhorn } else if (!alloc_fresh_huge_page(h, 13718cebfcd0SLai Jiangshan &node_states[N_MEMORY])) 13721da177e4SLinus Torvalds break; 13731da177e4SLinus Torvalds } 13748faa8b07SAndi Kleen h->max_huge_pages = i; 1375e5ff2159SAndi Kleen } 1376e5ff2159SAndi Kleen 1377e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void) 1378e5ff2159SAndi Kleen { 1379e5ff2159SAndi Kleen struct hstate *h; 1380e5ff2159SAndi Kleen 1381e5ff2159SAndi Kleen for_each_hstate(h) { 13828faa8b07SAndi Kleen /* oversize hugepages were init'ed in early boot */ 13838faa8b07SAndi Kleen if (h->order < MAX_ORDER) 13848faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(h); 1385e5ff2159SAndi Kleen } 1386e5ff2159SAndi Kleen } 1387e5ff2159SAndi Kleen 13884abd32dbSAndi Kleen static char * __init memfmt(char *buf, unsigned long n) 13894abd32dbSAndi Kleen { 13904abd32dbSAndi Kleen if (n >= (1UL << 30)) 13914abd32dbSAndi Kleen sprintf(buf, "%lu GB", n >> 30); 13924abd32dbSAndi Kleen else if (n >= (1UL << 20)) 13934abd32dbSAndi Kleen sprintf(buf, "%lu MB", n >> 20); 13944abd32dbSAndi Kleen else 13954abd32dbSAndi Kleen sprintf(buf, "%lu KB", n >> 10); 13964abd32dbSAndi Kleen return buf; 13974abd32dbSAndi Kleen } 13984abd32dbSAndi Kleen 1399e5ff2159SAndi Kleen static void __init report_hugepages(void) 1400e5ff2159SAndi Kleen { 1401e5ff2159SAndi Kleen struct hstate *h; 1402e5ff2159SAndi Kleen 1403e5ff2159SAndi Kleen for_each_hstate(h) { 14044abd32dbSAndi Kleen char buf[32]; 1405ffb22af5SAndrew Morton pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", 14064abd32dbSAndi Kleen memfmt(buf, huge_page_size(h)), 14074abd32dbSAndi Kleen h->free_huge_pages); 1408e5ff2159SAndi Kleen } 1409e5ff2159SAndi Kleen } 1410e5ff2159SAndi Kleen 14111da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 14126ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count, 14136ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 14141da177e4SLinus Torvalds { 14154415cc8dSChristoph Lameter int i; 14164415cc8dSChristoph Lameter 1417aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1418aa888a74SAndi Kleen return; 1419aa888a74SAndi Kleen 14206ae11b27SLee Schermerhorn for_each_node_mask(i, *nodes_allowed) { 14211da177e4SLinus Torvalds struct page *page, *next; 1422a5516438SAndi Kleen struct list_head *freel = &h->hugepage_freelists[i]; 1423a5516438SAndi Kleen list_for_each_entry_safe(page, next, freel, lru) { 1424a5516438SAndi Kleen if (count >= h->nr_huge_pages) 14256b0c880dSAdam Litke return; 14261da177e4SLinus Torvalds if (PageHighMem(page)) 14271da177e4SLinus Torvalds continue; 14281da177e4SLinus Torvalds list_del(&page->lru); 1429e5ff2159SAndi Kleen update_and_free_page(h, page); 1430a5516438SAndi Kleen h->free_huge_pages--; 1431a5516438SAndi Kleen h->free_huge_pages_node[page_to_nid(page)]--; 14321da177e4SLinus Torvalds } 14331da177e4SLinus Torvalds } 14341da177e4SLinus Torvalds } 14351da177e4SLinus Torvalds #else 14366ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count, 14376ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 14381da177e4SLinus Torvalds { 14391da177e4SLinus Torvalds } 14401da177e4SLinus Torvalds #endif 14411da177e4SLinus Torvalds 144220a0307cSWu Fengguang /* 144320a0307cSWu Fengguang * Increment or decrement surplus_huge_pages. Keep node-specific counters 144420a0307cSWu Fengguang * balanced by operating on them in a round-robin fashion. 144520a0307cSWu Fengguang * Returns 1 if an adjustment was made. 144620a0307cSWu Fengguang */ 14476ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 14486ae11b27SLee Schermerhorn int delta) 144920a0307cSWu Fengguang { 1450b2261026SJoonsoo Kim int nr_nodes, node; 145120a0307cSWu Fengguang 145220a0307cSWu Fengguang VM_BUG_ON(delta != -1 && delta != 1); 145320a0307cSWu Fengguang 1454e8c5c824SLee Schermerhorn if (delta < 0) { 1455b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1456b2261026SJoonsoo Kim if (h->surplus_huge_pages_node[node]) 1457b2261026SJoonsoo Kim goto found; 1458b2261026SJoonsoo Kim } 1459b2261026SJoonsoo Kim } else { 1460b2261026SJoonsoo Kim for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 1461b2261026SJoonsoo Kim if (h->surplus_huge_pages_node[node] < 1462b2261026SJoonsoo Kim h->nr_huge_pages_node[node]) 1463b2261026SJoonsoo Kim goto found; 1464e8c5c824SLee Schermerhorn } 14659a76db09SLee Schermerhorn } 1466b2261026SJoonsoo Kim return 0; 146720a0307cSWu Fengguang 1468b2261026SJoonsoo Kim found: 146920a0307cSWu Fengguang h->surplus_huge_pages += delta; 1470b2261026SJoonsoo Kim h->surplus_huge_pages_node[node] += delta; 1471b2261026SJoonsoo Kim return 1; 147220a0307cSWu Fengguang } 147320a0307cSWu Fengguang 1474a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 14756ae11b27SLee Schermerhorn static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, 14766ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 14771da177e4SLinus Torvalds { 14787893d1d5SAdam Litke unsigned long min_count, ret; 14791da177e4SLinus Torvalds 1480aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1481aa888a74SAndi Kleen return h->max_huge_pages; 1482aa888a74SAndi Kleen 14837893d1d5SAdam Litke /* 14847893d1d5SAdam Litke * Increase the pool size 14857893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 14867893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 1487d1c3fb1fSNishanth Aravamudan * 1488d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 1489d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 1490d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 1491d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 1492d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 14937893d1d5SAdam Litke */ 14941da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 1495a5516438SAndi Kleen while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 14966ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, -1)) 14977893d1d5SAdam Litke break; 14987893d1d5SAdam Litke } 14997893d1d5SAdam Litke 1500a5516438SAndi Kleen while (count > persistent_huge_pages(h)) { 15017893d1d5SAdam Litke /* 15027893d1d5SAdam Litke * If this allocation races such that we no longer need the 15037893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 15047893d1d5SAdam Litke * and reducing the surplus. 15057893d1d5SAdam Litke */ 15067893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 15076ae11b27SLee Schermerhorn ret = alloc_fresh_huge_page(h, nodes_allowed); 15087893d1d5SAdam Litke spin_lock(&hugetlb_lock); 15097893d1d5SAdam Litke if (!ret) 15107893d1d5SAdam Litke goto out; 15117893d1d5SAdam Litke 1512536240f2SMel Gorman /* Bail for signals. Probably ctrl-c from user */ 1513536240f2SMel Gorman if (signal_pending(current)) 1514536240f2SMel Gorman goto out; 15157893d1d5SAdam Litke } 15167893d1d5SAdam Litke 15177893d1d5SAdam Litke /* 15187893d1d5SAdam Litke * Decrease the pool size 15197893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 15207893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 15217893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 15227893d1d5SAdam Litke * to the desired size as pages become free. 1523d1c3fb1fSNishanth Aravamudan * 1524d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 1525d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 1526d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 1527d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 1528d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 1529d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 1530d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 15317893d1d5SAdam Litke */ 1532a5516438SAndi Kleen min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 15336b0c880dSAdam Litke min_count = max(count, min_count); 15346ae11b27SLee Schermerhorn try_to_free_low(h, min_count, nodes_allowed); 1535a5516438SAndi Kleen while (min_count < persistent_huge_pages(h)) { 15366ae11b27SLee Schermerhorn if (!free_pool_huge_page(h, nodes_allowed, 0)) 15371da177e4SLinus Torvalds break; 15381da177e4SLinus Torvalds } 1539a5516438SAndi Kleen while (count < persistent_huge_pages(h)) { 15406ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, 1)) 15417893d1d5SAdam Litke break; 15427893d1d5SAdam Litke } 15437893d1d5SAdam Litke out: 1544a5516438SAndi Kleen ret = persistent_huge_pages(h); 15451da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 15467893d1d5SAdam Litke return ret; 15471da177e4SLinus Torvalds } 15481da177e4SLinus Torvalds 1549a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \ 1550a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 1551a3437870SNishanth Aravamudan 1552a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \ 1553a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = \ 1554a3437870SNishanth Aravamudan __ATTR(_name, 0644, _name##_show, _name##_store) 1555a3437870SNishanth Aravamudan 1556a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj; 1557a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1558a3437870SNishanth Aravamudan 15599a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 15609a305230SLee Schermerhorn 15619a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 1562a3437870SNishanth Aravamudan { 1563a3437870SNishanth Aravamudan int i; 15649a305230SLee Schermerhorn 1565a3437870SNishanth Aravamudan for (i = 0; i < HUGE_MAX_HSTATE; i++) 15669a305230SLee Schermerhorn if (hstate_kobjs[i] == kobj) { 15679a305230SLee Schermerhorn if (nidp) 15689a305230SLee Schermerhorn *nidp = NUMA_NO_NODE; 1569a3437870SNishanth Aravamudan return &hstates[i]; 15709a305230SLee Schermerhorn } 15719a305230SLee Schermerhorn 15729a305230SLee Schermerhorn return kobj_to_node_hstate(kobj, nidp); 1573a3437870SNishanth Aravamudan } 1574a3437870SNishanth Aravamudan 157506808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj, 1576a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1577a3437870SNishanth Aravamudan { 15789a305230SLee Schermerhorn struct hstate *h; 15799a305230SLee Schermerhorn unsigned long nr_huge_pages; 15809a305230SLee Schermerhorn int nid; 15819a305230SLee Schermerhorn 15829a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 15839a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 15849a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages; 15859a305230SLee Schermerhorn else 15869a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages_node[nid]; 15879a305230SLee Schermerhorn 15889a305230SLee Schermerhorn return sprintf(buf, "%lu\n", nr_huge_pages); 1589a3437870SNishanth Aravamudan } 1590adbe8726SEric B Munson 159106808b08SLee Schermerhorn static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 159206808b08SLee Schermerhorn struct kobject *kobj, struct kobj_attribute *attr, 159306808b08SLee Schermerhorn const char *buf, size_t len) 1594a3437870SNishanth Aravamudan { 1595a3437870SNishanth Aravamudan int err; 15969a305230SLee Schermerhorn int nid; 159706808b08SLee Schermerhorn unsigned long count; 15989a305230SLee Schermerhorn struct hstate *h; 1599bad44b5bSDavid Rientjes NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); 1600a3437870SNishanth Aravamudan 16013dbb95f7SJingoo Han err = kstrtoul(buf, 10, &count); 160273ae31e5SEric B Munson if (err) 1603adbe8726SEric B Munson goto out; 1604a3437870SNishanth Aravamudan 16059a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 1606adbe8726SEric B Munson if (h->order >= MAX_ORDER) { 1607adbe8726SEric B Munson err = -EINVAL; 1608adbe8726SEric B Munson goto out; 1609adbe8726SEric B Munson } 1610adbe8726SEric B Munson 16119a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) { 16129a305230SLee Schermerhorn /* 16139a305230SLee Schermerhorn * global hstate attribute 16149a305230SLee Schermerhorn */ 16159a305230SLee Schermerhorn if (!(obey_mempolicy && 16169a305230SLee Schermerhorn init_nodemask_of_mempolicy(nodes_allowed))) { 161706808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 16188cebfcd0SLai Jiangshan nodes_allowed = &node_states[N_MEMORY]; 161906808b08SLee Schermerhorn } 16209a305230SLee Schermerhorn } else if (nodes_allowed) { 16219a305230SLee Schermerhorn /* 16229a305230SLee Schermerhorn * per node hstate attribute: adjust count to global, 16239a305230SLee Schermerhorn * but restrict alloc/free to the specified node. 16249a305230SLee Schermerhorn */ 16259a305230SLee Schermerhorn count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 16269a305230SLee Schermerhorn init_nodemask_of_node(nodes_allowed, nid); 16279a305230SLee Schermerhorn } else 16288cebfcd0SLai Jiangshan nodes_allowed = &node_states[N_MEMORY]; 16299a305230SLee Schermerhorn 163006808b08SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); 1631a3437870SNishanth Aravamudan 16328cebfcd0SLai Jiangshan if (nodes_allowed != &node_states[N_MEMORY]) 163306808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 163406808b08SLee Schermerhorn 163506808b08SLee Schermerhorn return len; 1636adbe8726SEric B Munson out: 1637adbe8726SEric B Munson NODEMASK_FREE(nodes_allowed); 1638adbe8726SEric B Munson return err; 163906808b08SLee Schermerhorn } 164006808b08SLee Schermerhorn 164106808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj, 164206808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf) 164306808b08SLee Schermerhorn { 164406808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf); 164506808b08SLee Schermerhorn } 164606808b08SLee Schermerhorn 164706808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj, 164806808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len) 164906808b08SLee Schermerhorn { 165006808b08SLee Schermerhorn return nr_hugepages_store_common(false, kobj, attr, buf, len); 1651a3437870SNishanth Aravamudan } 1652a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages); 1653a3437870SNishanth Aravamudan 165406808b08SLee Schermerhorn #ifdef CONFIG_NUMA 165506808b08SLee Schermerhorn 165606808b08SLee Schermerhorn /* 165706808b08SLee Schermerhorn * hstate attribute for optionally mempolicy-based constraint on persistent 165806808b08SLee Schermerhorn * huge page alloc/free. 165906808b08SLee Schermerhorn */ 166006808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 166106808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf) 166206808b08SLee Schermerhorn { 166306808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf); 166406808b08SLee Schermerhorn } 166506808b08SLee Schermerhorn 166606808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 166706808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len) 166806808b08SLee Schermerhorn { 166906808b08SLee Schermerhorn return nr_hugepages_store_common(true, kobj, attr, buf, len); 167006808b08SLee Schermerhorn } 167106808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy); 167206808b08SLee Schermerhorn #endif 167306808b08SLee Schermerhorn 167406808b08SLee Schermerhorn 1675a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 1676a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1677a3437870SNishanth Aravamudan { 16789a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1679a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 1680a3437870SNishanth Aravamudan } 1681adbe8726SEric B Munson 1682a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 1683a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1684a3437870SNishanth Aravamudan { 1685a3437870SNishanth Aravamudan int err; 1686a3437870SNishanth Aravamudan unsigned long input; 16879a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1688a3437870SNishanth Aravamudan 1689adbe8726SEric B Munson if (h->order >= MAX_ORDER) 1690adbe8726SEric B Munson return -EINVAL; 1691adbe8726SEric B Munson 16923dbb95f7SJingoo Han err = kstrtoul(buf, 10, &input); 1693a3437870SNishanth Aravamudan if (err) 169473ae31e5SEric B Munson return err; 1695a3437870SNishanth Aravamudan 1696a3437870SNishanth Aravamudan spin_lock(&hugetlb_lock); 1697a3437870SNishanth Aravamudan h->nr_overcommit_huge_pages = input; 1698a3437870SNishanth Aravamudan spin_unlock(&hugetlb_lock); 1699a3437870SNishanth Aravamudan 1700a3437870SNishanth Aravamudan return count; 1701a3437870SNishanth Aravamudan } 1702a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages); 1703a3437870SNishanth Aravamudan 1704a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj, 1705a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1706a3437870SNishanth Aravamudan { 17079a305230SLee Schermerhorn struct hstate *h; 17089a305230SLee Schermerhorn unsigned long free_huge_pages; 17099a305230SLee Schermerhorn int nid; 17109a305230SLee Schermerhorn 17119a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 17129a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 17139a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages; 17149a305230SLee Schermerhorn else 17159a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages_node[nid]; 17169a305230SLee Schermerhorn 17179a305230SLee Schermerhorn return sprintf(buf, "%lu\n", free_huge_pages); 1718a3437870SNishanth Aravamudan } 1719a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages); 1720a3437870SNishanth Aravamudan 1721a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj, 1722a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1723a3437870SNishanth Aravamudan { 17249a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1725a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->resv_huge_pages); 1726a3437870SNishanth Aravamudan } 1727a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages); 1728a3437870SNishanth Aravamudan 1729a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj, 1730a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1731a3437870SNishanth Aravamudan { 17329a305230SLee Schermerhorn struct hstate *h; 17339a305230SLee Schermerhorn unsigned long surplus_huge_pages; 17349a305230SLee Schermerhorn int nid; 17359a305230SLee Schermerhorn 17369a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 17379a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 17389a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages; 17399a305230SLee Schermerhorn else 17409a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages_node[nid]; 17419a305230SLee Schermerhorn 17429a305230SLee Schermerhorn return sprintf(buf, "%lu\n", surplus_huge_pages); 1743a3437870SNishanth Aravamudan } 1744a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages); 1745a3437870SNishanth Aravamudan 1746a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = { 1747a3437870SNishanth Aravamudan &nr_hugepages_attr.attr, 1748a3437870SNishanth Aravamudan &nr_overcommit_hugepages_attr.attr, 1749a3437870SNishanth Aravamudan &free_hugepages_attr.attr, 1750a3437870SNishanth Aravamudan &resv_hugepages_attr.attr, 1751a3437870SNishanth Aravamudan &surplus_hugepages_attr.attr, 175206808b08SLee Schermerhorn #ifdef CONFIG_NUMA 175306808b08SLee Schermerhorn &nr_hugepages_mempolicy_attr.attr, 175406808b08SLee Schermerhorn #endif 1755a3437870SNishanth Aravamudan NULL, 1756a3437870SNishanth Aravamudan }; 1757a3437870SNishanth Aravamudan 1758a3437870SNishanth Aravamudan static struct attribute_group hstate_attr_group = { 1759a3437870SNishanth Aravamudan .attrs = hstate_attrs, 1760a3437870SNishanth Aravamudan }; 1761a3437870SNishanth Aravamudan 1762094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 17639a305230SLee Schermerhorn struct kobject **hstate_kobjs, 17649a305230SLee Schermerhorn struct attribute_group *hstate_attr_group) 1765a3437870SNishanth Aravamudan { 1766a3437870SNishanth Aravamudan int retval; 1767972dc4deSAneesh Kumar K.V int hi = hstate_index(h); 1768a3437870SNishanth Aravamudan 17699a305230SLee Schermerhorn hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 17709a305230SLee Schermerhorn if (!hstate_kobjs[hi]) 1771a3437870SNishanth Aravamudan return -ENOMEM; 1772a3437870SNishanth Aravamudan 17739a305230SLee Schermerhorn retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 1774a3437870SNishanth Aravamudan if (retval) 17759a305230SLee Schermerhorn kobject_put(hstate_kobjs[hi]); 1776a3437870SNishanth Aravamudan 1777a3437870SNishanth Aravamudan return retval; 1778a3437870SNishanth Aravamudan } 1779a3437870SNishanth Aravamudan 1780a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void) 1781a3437870SNishanth Aravamudan { 1782a3437870SNishanth Aravamudan struct hstate *h; 1783a3437870SNishanth Aravamudan int err; 1784a3437870SNishanth Aravamudan 1785a3437870SNishanth Aravamudan hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 1786a3437870SNishanth Aravamudan if (!hugepages_kobj) 1787a3437870SNishanth Aravamudan return; 1788a3437870SNishanth Aravamudan 1789a3437870SNishanth Aravamudan for_each_hstate(h) { 17909a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 17919a305230SLee Schermerhorn hstate_kobjs, &hstate_attr_group); 1792a3437870SNishanth Aravamudan if (err) 1793ffb22af5SAndrew Morton pr_err("Hugetlb: Unable to add hstate %s", h->name); 1794a3437870SNishanth Aravamudan } 1795a3437870SNishanth Aravamudan } 1796a3437870SNishanth Aravamudan 17979a305230SLee Schermerhorn #ifdef CONFIG_NUMA 17989a305230SLee Schermerhorn 17999a305230SLee Schermerhorn /* 18009a305230SLee Schermerhorn * node_hstate/s - associate per node hstate attributes, via their kobjects, 180110fbcf4cSKay Sievers * with node devices in node_devices[] using a parallel array. The array 180210fbcf4cSKay Sievers * index of a node device or _hstate == node id. 180310fbcf4cSKay Sievers * This is here to avoid any static dependency of the node device driver, in 18049a305230SLee Schermerhorn * the base kernel, on the hugetlb module. 18059a305230SLee Schermerhorn */ 18069a305230SLee Schermerhorn struct node_hstate { 18079a305230SLee Schermerhorn struct kobject *hugepages_kobj; 18089a305230SLee Schermerhorn struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 18099a305230SLee Schermerhorn }; 18109a305230SLee Schermerhorn struct node_hstate node_hstates[MAX_NUMNODES]; 18119a305230SLee Schermerhorn 18129a305230SLee Schermerhorn /* 181310fbcf4cSKay Sievers * A subset of global hstate attributes for node devices 18149a305230SLee Schermerhorn */ 18159a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = { 18169a305230SLee Schermerhorn &nr_hugepages_attr.attr, 18179a305230SLee Schermerhorn &free_hugepages_attr.attr, 18189a305230SLee Schermerhorn &surplus_hugepages_attr.attr, 18199a305230SLee Schermerhorn NULL, 18209a305230SLee Schermerhorn }; 18219a305230SLee Schermerhorn 18229a305230SLee Schermerhorn static struct attribute_group per_node_hstate_attr_group = { 18239a305230SLee Schermerhorn .attrs = per_node_hstate_attrs, 18249a305230SLee Schermerhorn }; 18259a305230SLee Schermerhorn 18269a305230SLee Schermerhorn /* 182710fbcf4cSKay Sievers * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 18289a305230SLee Schermerhorn * Returns node id via non-NULL nidp. 18299a305230SLee Schermerhorn */ 18309a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 18319a305230SLee Schermerhorn { 18329a305230SLee Schermerhorn int nid; 18339a305230SLee Schermerhorn 18349a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) { 18359a305230SLee Schermerhorn struct node_hstate *nhs = &node_hstates[nid]; 18369a305230SLee Schermerhorn int i; 18379a305230SLee Schermerhorn for (i = 0; i < HUGE_MAX_HSTATE; i++) 18389a305230SLee Schermerhorn if (nhs->hstate_kobjs[i] == kobj) { 18399a305230SLee Schermerhorn if (nidp) 18409a305230SLee Schermerhorn *nidp = nid; 18419a305230SLee Schermerhorn return &hstates[i]; 18429a305230SLee Schermerhorn } 18439a305230SLee Schermerhorn } 18449a305230SLee Schermerhorn 18459a305230SLee Schermerhorn BUG(); 18469a305230SLee Schermerhorn return NULL; 18479a305230SLee Schermerhorn } 18489a305230SLee Schermerhorn 18499a305230SLee Schermerhorn /* 185010fbcf4cSKay Sievers * Unregister hstate attributes from a single node device. 18519a305230SLee Schermerhorn * No-op if no hstate attributes attached. 18529a305230SLee Schermerhorn */ 18533cd8b44fSClaudiu Ghioc static void hugetlb_unregister_node(struct node *node) 18549a305230SLee Schermerhorn { 18559a305230SLee Schermerhorn struct hstate *h; 185610fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id]; 18579a305230SLee Schermerhorn 18589a305230SLee Schermerhorn if (!nhs->hugepages_kobj) 18599b5e5d0fSLee Schermerhorn return; /* no hstate attributes */ 18609a305230SLee Schermerhorn 1861972dc4deSAneesh Kumar K.V for_each_hstate(h) { 1862972dc4deSAneesh Kumar K.V int idx = hstate_index(h); 1863972dc4deSAneesh Kumar K.V if (nhs->hstate_kobjs[idx]) { 1864972dc4deSAneesh Kumar K.V kobject_put(nhs->hstate_kobjs[idx]); 1865972dc4deSAneesh Kumar K.V nhs->hstate_kobjs[idx] = NULL; 1866972dc4deSAneesh Kumar K.V } 18679a305230SLee Schermerhorn } 18689a305230SLee Schermerhorn 18699a305230SLee Schermerhorn kobject_put(nhs->hugepages_kobj); 18709a305230SLee Schermerhorn nhs->hugepages_kobj = NULL; 18719a305230SLee Schermerhorn } 18729a305230SLee Schermerhorn 18739a305230SLee Schermerhorn /* 187410fbcf4cSKay Sievers * hugetlb module exit: unregister hstate attributes from node devices 18759a305230SLee Schermerhorn * that have them. 18769a305230SLee Schermerhorn */ 18779a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) 18789a305230SLee Schermerhorn { 18799a305230SLee Schermerhorn int nid; 18809a305230SLee Schermerhorn 18819a305230SLee Schermerhorn /* 188210fbcf4cSKay Sievers * disable node device registrations. 18839a305230SLee Schermerhorn */ 18849a305230SLee Schermerhorn register_hugetlbfs_with_node(NULL, NULL); 18859a305230SLee Schermerhorn 18869a305230SLee Schermerhorn /* 18879a305230SLee Schermerhorn * remove hstate attributes from any nodes that have them. 18889a305230SLee Schermerhorn */ 18899a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) 18908732794bSWen Congyang hugetlb_unregister_node(node_devices[nid]); 18919a305230SLee Schermerhorn } 18929a305230SLee Schermerhorn 18939a305230SLee Schermerhorn /* 189410fbcf4cSKay Sievers * Register hstate attributes for a single node device. 18959a305230SLee Schermerhorn * No-op if attributes already registered. 18969a305230SLee Schermerhorn */ 18973cd8b44fSClaudiu Ghioc static void hugetlb_register_node(struct node *node) 18989a305230SLee Schermerhorn { 18999a305230SLee Schermerhorn struct hstate *h; 190010fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id]; 19019a305230SLee Schermerhorn int err; 19029a305230SLee Schermerhorn 19039a305230SLee Schermerhorn if (nhs->hugepages_kobj) 19049a305230SLee Schermerhorn return; /* already allocated */ 19059a305230SLee Schermerhorn 19069a305230SLee Schermerhorn nhs->hugepages_kobj = kobject_create_and_add("hugepages", 190710fbcf4cSKay Sievers &node->dev.kobj); 19089a305230SLee Schermerhorn if (!nhs->hugepages_kobj) 19099a305230SLee Schermerhorn return; 19109a305230SLee Schermerhorn 19119a305230SLee Schermerhorn for_each_hstate(h) { 19129a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 19139a305230SLee Schermerhorn nhs->hstate_kobjs, 19149a305230SLee Schermerhorn &per_node_hstate_attr_group); 19159a305230SLee Schermerhorn if (err) { 1916ffb22af5SAndrew Morton pr_err("Hugetlb: Unable to add hstate %s for node %d\n", 191710fbcf4cSKay Sievers h->name, node->dev.id); 19189a305230SLee Schermerhorn hugetlb_unregister_node(node); 19199a305230SLee Schermerhorn break; 19209a305230SLee Schermerhorn } 19219a305230SLee Schermerhorn } 19229a305230SLee Schermerhorn } 19239a305230SLee Schermerhorn 19249a305230SLee Schermerhorn /* 19259b5e5d0fSLee Schermerhorn * hugetlb init time: register hstate attributes for all registered node 192610fbcf4cSKay Sievers * devices of nodes that have memory. All on-line nodes should have 192710fbcf4cSKay Sievers * registered their associated device by this time. 19289a305230SLee Schermerhorn */ 19299a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) 19309a305230SLee Schermerhorn { 19319a305230SLee Schermerhorn int nid; 19329a305230SLee Schermerhorn 19338cebfcd0SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 19348732794bSWen Congyang struct node *node = node_devices[nid]; 193510fbcf4cSKay Sievers if (node->dev.id == nid) 19369a305230SLee Schermerhorn hugetlb_register_node(node); 19379a305230SLee Schermerhorn } 19389a305230SLee Schermerhorn 19399a305230SLee Schermerhorn /* 194010fbcf4cSKay Sievers * Let the node device driver know we're here so it can 19419a305230SLee Schermerhorn * [un]register hstate attributes on node hotplug. 19429a305230SLee Schermerhorn */ 19439a305230SLee Schermerhorn register_hugetlbfs_with_node(hugetlb_register_node, 19449a305230SLee Schermerhorn hugetlb_unregister_node); 19459a305230SLee Schermerhorn } 19469a305230SLee Schermerhorn #else /* !CONFIG_NUMA */ 19479a305230SLee Schermerhorn 19489a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 19499a305230SLee Schermerhorn { 19509a305230SLee Schermerhorn BUG(); 19519a305230SLee Schermerhorn if (nidp) 19529a305230SLee Schermerhorn *nidp = -1; 19539a305230SLee Schermerhorn return NULL; 19549a305230SLee Schermerhorn } 19559a305230SLee Schermerhorn 19569a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) { } 19579a305230SLee Schermerhorn 19589a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { } 19599a305230SLee Schermerhorn 19609a305230SLee Schermerhorn #endif 19619a305230SLee Schermerhorn 1962a3437870SNishanth Aravamudan static void __exit hugetlb_exit(void) 1963a3437870SNishanth Aravamudan { 1964a3437870SNishanth Aravamudan struct hstate *h; 1965a3437870SNishanth Aravamudan 19669a305230SLee Schermerhorn hugetlb_unregister_all_nodes(); 19679a305230SLee Schermerhorn 1968a3437870SNishanth Aravamudan for_each_hstate(h) { 1969972dc4deSAneesh Kumar K.V kobject_put(hstate_kobjs[hstate_index(h)]); 1970a3437870SNishanth Aravamudan } 1971a3437870SNishanth Aravamudan 1972a3437870SNishanth Aravamudan kobject_put(hugepages_kobj); 1973a3437870SNishanth Aravamudan } 1974a3437870SNishanth Aravamudan module_exit(hugetlb_exit); 1975a3437870SNishanth Aravamudan 1976a3437870SNishanth Aravamudan static int __init hugetlb_init(void) 1977a3437870SNishanth Aravamudan { 19780ef89d25SBenjamin Herrenschmidt /* Some platform decide whether they support huge pages at boot 19790ef89d25SBenjamin Herrenschmidt * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 19800ef89d25SBenjamin Herrenschmidt * there is no such support 19810ef89d25SBenjamin Herrenschmidt */ 19820ef89d25SBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 19830ef89d25SBenjamin Herrenschmidt return 0; 1984a3437870SNishanth Aravamudan 1985e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) { 1986e11bfbfcSNick Piggin default_hstate_size = HPAGE_SIZE; 1987e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) 1988a3437870SNishanth Aravamudan hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 1989a3437870SNishanth Aravamudan } 1990972dc4deSAneesh Kumar K.V default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 1991e11bfbfcSNick Piggin if (default_hstate_max_huge_pages) 1992e11bfbfcSNick Piggin default_hstate.max_huge_pages = default_hstate_max_huge_pages; 1993a3437870SNishanth Aravamudan 1994a3437870SNishanth Aravamudan hugetlb_init_hstates(); 1995aa888a74SAndi Kleen gather_bootmem_prealloc(); 1996a3437870SNishanth Aravamudan report_hugepages(); 1997a3437870SNishanth Aravamudan 1998a3437870SNishanth Aravamudan hugetlb_sysfs_init(); 19999a305230SLee Schermerhorn hugetlb_register_all_nodes(); 20007179e7bfSJianguo Wu hugetlb_cgroup_file_init(); 20019a305230SLee Schermerhorn 2002a3437870SNishanth Aravamudan return 0; 2003a3437870SNishanth Aravamudan } 2004a3437870SNishanth Aravamudan module_init(hugetlb_init); 2005a3437870SNishanth Aravamudan 2006a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */ 2007a3437870SNishanth Aravamudan void __init hugetlb_add_hstate(unsigned order) 2008a3437870SNishanth Aravamudan { 2009a3437870SNishanth Aravamudan struct hstate *h; 20108faa8b07SAndi Kleen unsigned long i; 20118faa8b07SAndi Kleen 2012a3437870SNishanth Aravamudan if (size_to_hstate(PAGE_SIZE << order)) { 2013ffb22af5SAndrew Morton pr_warning("hugepagesz= specified twice, ignoring\n"); 2014a3437870SNishanth Aravamudan return; 2015a3437870SNishanth Aravamudan } 201647d38344SAneesh Kumar K.V BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 2017a3437870SNishanth Aravamudan BUG_ON(order == 0); 201847d38344SAneesh Kumar K.V h = &hstates[hugetlb_max_hstate++]; 2019a3437870SNishanth Aravamudan h->order = order; 2020a3437870SNishanth Aravamudan h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 20218faa8b07SAndi Kleen h->nr_huge_pages = 0; 20228faa8b07SAndi Kleen h->free_huge_pages = 0; 20238faa8b07SAndi Kleen for (i = 0; i < MAX_NUMNODES; ++i) 20248faa8b07SAndi Kleen INIT_LIST_HEAD(&h->hugepage_freelists[i]); 20250edaecfaSAneesh Kumar K.V INIT_LIST_HEAD(&h->hugepage_activelist); 20268cebfcd0SLai Jiangshan h->next_nid_to_alloc = first_node(node_states[N_MEMORY]); 20278cebfcd0SLai Jiangshan h->next_nid_to_free = first_node(node_states[N_MEMORY]); 2028a3437870SNishanth Aravamudan snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 2029a3437870SNishanth Aravamudan huge_page_size(h)/1024); 20308faa8b07SAndi Kleen 2031a3437870SNishanth Aravamudan parsed_hstate = h; 2032a3437870SNishanth Aravamudan } 2033a3437870SNishanth Aravamudan 2034e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s) 2035a3437870SNishanth Aravamudan { 2036a3437870SNishanth Aravamudan unsigned long *mhp; 20378faa8b07SAndi Kleen static unsigned long *last_mhp; 2038a3437870SNishanth Aravamudan 2039a3437870SNishanth Aravamudan /* 204047d38344SAneesh Kumar K.V * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, 2041a3437870SNishanth Aravamudan * so this hugepages= parameter goes to the "default hstate". 2042a3437870SNishanth Aravamudan */ 204347d38344SAneesh Kumar K.V if (!hugetlb_max_hstate) 2044a3437870SNishanth Aravamudan mhp = &default_hstate_max_huge_pages; 2045a3437870SNishanth Aravamudan else 2046a3437870SNishanth Aravamudan mhp = &parsed_hstate->max_huge_pages; 2047a3437870SNishanth Aravamudan 20488faa8b07SAndi Kleen if (mhp == last_mhp) { 2049ffb22af5SAndrew Morton pr_warning("hugepages= specified twice without " 20508faa8b07SAndi Kleen "interleaving hugepagesz=, ignoring\n"); 20518faa8b07SAndi Kleen return 1; 20528faa8b07SAndi Kleen } 20538faa8b07SAndi Kleen 2054a3437870SNishanth Aravamudan if (sscanf(s, "%lu", mhp) <= 0) 2055a3437870SNishanth Aravamudan *mhp = 0; 2056a3437870SNishanth Aravamudan 20578faa8b07SAndi Kleen /* 20588faa8b07SAndi Kleen * Global state is always initialized later in hugetlb_init. 20598faa8b07SAndi Kleen * But we need to allocate >= MAX_ORDER hstates here early to still 20608faa8b07SAndi Kleen * use the bootmem allocator. 20618faa8b07SAndi Kleen */ 206247d38344SAneesh Kumar K.V if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) 20638faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(parsed_hstate); 20648faa8b07SAndi Kleen 20658faa8b07SAndi Kleen last_mhp = mhp; 20668faa8b07SAndi Kleen 2067a3437870SNishanth Aravamudan return 1; 2068a3437870SNishanth Aravamudan } 2069e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup); 2070e11bfbfcSNick Piggin 2071e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s) 2072e11bfbfcSNick Piggin { 2073e11bfbfcSNick Piggin default_hstate_size = memparse(s, &s); 2074e11bfbfcSNick Piggin return 1; 2075e11bfbfcSNick Piggin } 2076e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup); 2077a3437870SNishanth Aravamudan 20788a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array) 20798a213460SNishanth Aravamudan { 20808a213460SNishanth Aravamudan int node; 20818a213460SNishanth Aravamudan unsigned int nr = 0; 20828a213460SNishanth Aravamudan 20838a213460SNishanth Aravamudan for_each_node_mask(node, cpuset_current_mems_allowed) 20848a213460SNishanth Aravamudan nr += array[node]; 20858a213460SNishanth Aravamudan 20868a213460SNishanth Aravamudan return nr; 20878a213460SNishanth Aravamudan } 20888a213460SNishanth Aravamudan 20898a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL 209006808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 209106808b08SLee Schermerhorn struct ctl_table *table, int write, 209206808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 20931da177e4SLinus Torvalds { 2094e5ff2159SAndi Kleen struct hstate *h = &default_hstate; 2095e5ff2159SAndi Kleen unsigned long tmp; 209608d4a246SMichal Hocko int ret; 2097e5ff2159SAndi Kleen 2098e5ff2159SAndi Kleen tmp = h->max_huge_pages; 2099e5ff2159SAndi Kleen 2100adbe8726SEric B Munson if (write && h->order >= MAX_ORDER) 2101adbe8726SEric B Munson return -EINVAL; 2102adbe8726SEric B Munson 2103e5ff2159SAndi Kleen table->data = &tmp; 2104e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 210508d4a246SMichal Hocko ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 210608d4a246SMichal Hocko if (ret) 210708d4a246SMichal Hocko goto out; 2108e5ff2159SAndi Kleen 210906808b08SLee Schermerhorn if (write) { 2110bad44b5bSDavid Rientjes NODEMASK_ALLOC(nodemask_t, nodes_allowed, 2111bad44b5bSDavid Rientjes GFP_KERNEL | __GFP_NORETRY); 211206808b08SLee Schermerhorn if (!(obey_mempolicy && 211306808b08SLee Schermerhorn init_nodemask_of_mempolicy(nodes_allowed))) { 211406808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 21158cebfcd0SLai Jiangshan nodes_allowed = &node_states[N_MEMORY]; 211606808b08SLee Schermerhorn } 211706808b08SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed); 211806808b08SLee Schermerhorn 21198cebfcd0SLai Jiangshan if (nodes_allowed != &node_states[N_MEMORY]) 212006808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 212106808b08SLee Schermerhorn } 212208d4a246SMichal Hocko out: 212308d4a246SMichal Hocko return ret; 21241da177e4SLinus Torvalds } 2125396faf03SMel Gorman 212606808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write, 212706808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 212806808b08SLee Schermerhorn { 212906808b08SLee Schermerhorn 213006808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(false, table, write, 213106808b08SLee Schermerhorn buffer, length, ppos); 213206808b08SLee Schermerhorn } 213306808b08SLee Schermerhorn 213406808b08SLee Schermerhorn #ifdef CONFIG_NUMA 213506808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 213606808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 213706808b08SLee Schermerhorn { 213806808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(true, table, write, 213906808b08SLee Schermerhorn buffer, length, ppos); 214006808b08SLee Schermerhorn } 214106808b08SLee Schermerhorn #endif /* CONFIG_NUMA */ 214206808b08SLee Schermerhorn 2143a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 21448d65af78SAlexey Dobriyan void __user *buffer, 2145a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 2146a3d0c6aaSNishanth Aravamudan { 2147a5516438SAndi Kleen struct hstate *h = &default_hstate; 2148e5ff2159SAndi Kleen unsigned long tmp; 214908d4a246SMichal Hocko int ret; 2150e5ff2159SAndi Kleen 2151e5ff2159SAndi Kleen tmp = h->nr_overcommit_huge_pages; 2152e5ff2159SAndi Kleen 2153adbe8726SEric B Munson if (write && h->order >= MAX_ORDER) 2154adbe8726SEric B Munson return -EINVAL; 2155adbe8726SEric B Munson 2156e5ff2159SAndi Kleen table->data = &tmp; 2157e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 215808d4a246SMichal Hocko ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 215908d4a246SMichal Hocko if (ret) 216008d4a246SMichal Hocko goto out; 2161e5ff2159SAndi Kleen 2162e5ff2159SAndi Kleen if (write) { 2163064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 2164e5ff2159SAndi Kleen h->nr_overcommit_huge_pages = tmp; 2165a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 2166e5ff2159SAndi Kleen } 216708d4a246SMichal Hocko out: 216808d4a246SMichal Hocko return ret; 2169a3d0c6aaSNishanth Aravamudan } 2170a3d0c6aaSNishanth Aravamudan 21711da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 21721da177e4SLinus Torvalds 2173e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m) 21741da177e4SLinus Torvalds { 2175a5516438SAndi Kleen struct hstate *h = &default_hstate; 2176e1759c21SAlexey Dobriyan seq_printf(m, 21771da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 21781da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 2179b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 21807893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 21814f98a2feSRik van Riel "Hugepagesize: %8lu kB\n", 2182a5516438SAndi Kleen h->nr_huge_pages, 2183a5516438SAndi Kleen h->free_huge_pages, 2184a5516438SAndi Kleen h->resv_huge_pages, 2185a5516438SAndi Kleen h->surplus_huge_pages, 2186a5516438SAndi Kleen 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 21871da177e4SLinus Torvalds } 21881da177e4SLinus Torvalds 21891da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 21901da177e4SLinus Torvalds { 2191a5516438SAndi Kleen struct hstate *h = &default_hstate; 21921da177e4SLinus Torvalds return sprintf(buf, 21931da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 2194a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 2195a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 2196a5516438SAndi Kleen nid, h->nr_huge_pages_node[nid], 2197a5516438SAndi Kleen nid, h->free_huge_pages_node[nid], 2198a5516438SAndi Kleen nid, h->surplus_huge_pages_node[nid]); 21991da177e4SLinus Torvalds } 22001da177e4SLinus Torvalds 2201949f7ec5SDavid Rientjes void hugetlb_show_meminfo(void) 2202949f7ec5SDavid Rientjes { 2203949f7ec5SDavid Rientjes struct hstate *h; 2204949f7ec5SDavid Rientjes int nid; 2205949f7ec5SDavid Rientjes 2206949f7ec5SDavid Rientjes for_each_node_state(nid, N_MEMORY) 2207949f7ec5SDavid Rientjes for_each_hstate(h) 2208949f7ec5SDavid Rientjes pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 2209949f7ec5SDavid Rientjes nid, 2210949f7ec5SDavid Rientjes h->nr_huge_pages_node[nid], 2211949f7ec5SDavid Rientjes h->free_huge_pages_node[nid], 2212949f7ec5SDavid Rientjes h->surplus_huge_pages_node[nid], 2213949f7ec5SDavid Rientjes 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 2214949f7ec5SDavid Rientjes } 2215949f7ec5SDavid Rientjes 22161da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 22171da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 22181da177e4SLinus Torvalds { 2219d0028588SWanpeng Li struct hstate *h; 2220d0028588SWanpeng Li unsigned long nr_total_pages = 0; 2221d0028588SWanpeng Li 2222d0028588SWanpeng Li for_each_hstate(h) 2223d0028588SWanpeng Li nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 2224d0028588SWanpeng Li return nr_total_pages; 22251da177e4SLinus Torvalds } 22261da177e4SLinus Torvalds 2227a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta) 2228fc1b8a73SMel Gorman { 2229fc1b8a73SMel Gorman int ret = -ENOMEM; 2230fc1b8a73SMel Gorman 2231fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 2232fc1b8a73SMel Gorman /* 2233fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 2234fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 2235fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 2236fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 2237fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 2238fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 2239fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 2240fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 2241fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 2242fc1b8a73SMel Gorman * 2243fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 2244fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 2245fc1b8a73SMel Gorman * we fall back to check against current free page availability as 2246fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 2247fc1b8a73SMel Gorman * semantics that cpuset has. 2248fc1b8a73SMel Gorman */ 2249fc1b8a73SMel Gorman if (delta > 0) { 2250a5516438SAndi Kleen if (gather_surplus_pages(h, delta) < 0) 2251fc1b8a73SMel Gorman goto out; 2252fc1b8a73SMel Gorman 2253a5516438SAndi Kleen if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 2254a5516438SAndi Kleen return_unused_surplus_pages(h, delta); 2255fc1b8a73SMel Gorman goto out; 2256fc1b8a73SMel Gorman } 2257fc1b8a73SMel Gorman } 2258fc1b8a73SMel Gorman 2259fc1b8a73SMel Gorman ret = 0; 2260fc1b8a73SMel Gorman if (delta < 0) 2261a5516438SAndi Kleen return_unused_surplus_pages(h, (unsigned long) -delta); 2262fc1b8a73SMel Gorman 2263fc1b8a73SMel Gorman out: 2264fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 2265fc1b8a73SMel Gorman return ret; 2266fc1b8a73SMel Gorman } 2267fc1b8a73SMel Gorman 226884afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma) 226984afd99bSAndy Whitcroft { 2270f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 227184afd99bSAndy Whitcroft 227284afd99bSAndy Whitcroft /* 227384afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present. 227484afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where 227584afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA 227625985edcSLucas De Marchi * has a reference to the reservation map it cannot disappear until 227784afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a 227884afd99bSAndy Whitcroft * new reference here without additional locking. 227984afd99bSAndy Whitcroft */ 2280f522c3acSJoonsoo Kim if (resv) 2281f522c3acSJoonsoo Kim kref_get(&resv->refs); 228284afd99bSAndy Whitcroft } 228384afd99bSAndy Whitcroft 2284c50ac050SDave Hansen static void resv_map_put(struct vm_area_struct *vma) 2285c50ac050SDave Hansen { 2286f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 2287c50ac050SDave Hansen 2288f522c3acSJoonsoo Kim if (!resv) 2289c50ac050SDave Hansen return; 2290f522c3acSJoonsoo Kim kref_put(&resv->refs, resv_map_release); 2291c50ac050SDave Hansen } 2292c50ac050SDave Hansen 2293a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 2294a1e78772SMel Gorman { 2295a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2296f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 229790481622SDavid Gibson struct hugepage_subpool *spool = subpool_vma(vma); 229884afd99bSAndy Whitcroft unsigned long reserve; 229984afd99bSAndy Whitcroft unsigned long start; 230084afd99bSAndy Whitcroft unsigned long end; 230184afd99bSAndy Whitcroft 2302f522c3acSJoonsoo Kim if (resv) { 2303a5516438SAndi Kleen start = vma_hugecache_offset(h, vma, vma->vm_start); 2304a5516438SAndi Kleen end = vma_hugecache_offset(h, vma, vma->vm_end); 230584afd99bSAndy Whitcroft 230684afd99bSAndy Whitcroft reserve = (end - start) - 2307f522c3acSJoonsoo Kim region_count(&resv->regions, start, end); 230884afd99bSAndy Whitcroft 2309c50ac050SDave Hansen resv_map_put(vma); 231084afd99bSAndy Whitcroft 23117251ff78SAdam Litke if (reserve) { 2312a5516438SAndi Kleen hugetlb_acct_memory(h, -reserve); 231390481622SDavid Gibson hugepage_subpool_put_pages(spool, reserve); 23147251ff78SAdam Litke } 2315a1e78772SMel Gorman } 231684afd99bSAndy Whitcroft } 2317a1e78772SMel Gorman 23181da177e4SLinus Torvalds /* 23191da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 23201da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 23211da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 23221da177e4SLinus Torvalds * this far. 23231da177e4SLinus Torvalds */ 2324d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 23251da177e4SLinus Torvalds { 23261da177e4SLinus Torvalds BUG(); 2327d0217ac0SNick Piggin return 0; 23281da177e4SLinus Torvalds } 23291da177e4SLinus Torvalds 2330f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = { 2331d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 233284afd99bSAndy Whitcroft .open = hugetlb_vm_op_open, 2333a1e78772SMel Gorman .close = hugetlb_vm_op_close, 23341da177e4SLinus Torvalds }; 23351da177e4SLinus Torvalds 23361e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 23371e8f889bSDavid Gibson int writable) 233863551ae0SDavid Gibson { 233963551ae0SDavid Gibson pte_t entry; 234063551ae0SDavid Gibson 23411e8f889bSDavid Gibson if (writable) { 2342106c992aSGerald Schaefer entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 2343106c992aSGerald Schaefer vma->vm_page_prot))); 234463551ae0SDavid Gibson } else { 2345106c992aSGerald Schaefer entry = huge_pte_wrprotect(mk_huge_pte(page, 2346106c992aSGerald Schaefer vma->vm_page_prot)); 234763551ae0SDavid Gibson } 234863551ae0SDavid Gibson entry = pte_mkyoung(entry); 234963551ae0SDavid Gibson entry = pte_mkhuge(entry); 2350d9ed9faaSChris Metcalf entry = arch_make_huge_pte(entry, vma, page, writable); 235163551ae0SDavid Gibson 235263551ae0SDavid Gibson return entry; 235363551ae0SDavid Gibson } 235463551ae0SDavid Gibson 23551e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 23561e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 23571e8f889bSDavid Gibson { 23581e8f889bSDavid Gibson pte_t entry; 23591e8f889bSDavid Gibson 2360106c992aSGerald Schaefer entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 236132f84528SChris Forbes if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 23624b3073e1SRussell King update_mmu_cache(vma, address, ptep); 23631e8f889bSDavid Gibson } 23641e8f889bSDavid Gibson 23651e8f889bSDavid Gibson 236663551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 236763551ae0SDavid Gibson struct vm_area_struct *vma) 236863551ae0SDavid Gibson { 236963551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 237063551ae0SDavid Gibson struct page *ptepage; 23711c59827dSHugh Dickins unsigned long addr; 23721e8f889bSDavid Gibson int cow; 2373a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2374a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 23751e8f889bSDavid Gibson 23761e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 237763551ae0SDavid Gibson 2378a5516438SAndi Kleen for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 2379cb900f41SKirill A. Shutemov spinlock_t *src_ptl, *dst_ptl; 2380c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 2381c74df32cSHugh Dickins if (!src_pte) 2382c74df32cSHugh Dickins continue; 2383a5516438SAndi Kleen dst_pte = huge_pte_alloc(dst, addr, sz); 238463551ae0SDavid Gibson if (!dst_pte) 238563551ae0SDavid Gibson goto nomem; 2386c5c99429SLarry Woodman 2387c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 2388c5c99429SLarry Woodman if (dst_pte == src_pte) 2389c5c99429SLarry Woodman continue; 2390c5c99429SLarry Woodman 2391cb900f41SKirill A. Shutemov dst_ptl = huge_pte_lock(h, dst, dst_pte); 2392cb900f41SKirill A. Shutemov src_ptl = huge_pte_lockptr(h, src, src_pte); 2393cb900f41SKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 23947f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 23951e8f889bSDavid Gibson if (cow) 23967f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 23977f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 239863551ae0SDavid Gibson ptepage = pte_page(entry); 239963551ae0SDavid Gibson get_page(ptepage); 24000fe6e20bSNaoya Horiguchi page_dup_rmap(ptepage); 240163551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 24021c59827dSHugh Dickins } 2403cb900f41SKirill A. Shutemov spin_unlock(src_ptl); 2404cb900f41SKirill A. Shutemov spin_unlock(dst_ptl); 240563551ae0SDavid Gibson } 240663551ae0SDavid Gibson return 0; 240763551ae0SDavid Gibson 240863551ae0SDavid Gibson nomem: 240963551ae0SDavid Gibson return -ENOMEM; 241063551ae0SDavid Gibson } 241163551ae0SDavid Gibson 2412290408d4SNaoya Horiguchi static int is_hugetlb_entry_migration(pte_t pte) 2413290408d4SNaoya Horiguchi { 2414290408d4SNaoya Horiguchi swp_entry_t swp; 2415290408d4SNaoya Horiguchi 2416290408d4SNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte)) 2417290408d4SNaoya Horiguchi return 0; 2418290408d4SNaoya Horiguchi swp = pte_to_swp_entry(pte); 241932f84528SChris Forbes if (non_swap_entry(swp) && is_migration_entry(swp)) 2420290408d4SNaoya Horiguchi return 1; 242132f84528SChris Forbes else 2422290408d4SNaoya Horiguchi return 0; 2423290408d4SNaoya Horiguchi } 2424290408d4SNaoya Horiguchi 2425fd6a03edSNaoya Horiguchi static int is_hugetlb_entry_hwpoisoned(pte_t pte) 2426fd6a03edSNaoya Horiguchi { 2427fd6a03edSNaoya Horiguchi swp_entry_t swp; 2428fd6a03edSNaoya Horiguchi 2429fd6a03edSNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte)) 2430fd6a03edSNaoya Horiguchi return 0; 2431fd6a03edSNaoya Horiguchi swp = pte_to_swp_entry(pte); 243232f84528SChris Forbes if (non_swap_entry(swp) && is_hwpoison_entry(swp)) 2433fd6a03edSNaoya Horiguchi return 1; 243432f84528SChris Forbes else 2435fd6a03edSNaoya Horiguchi return 0; 2436fd6a03edSNaoya Horiguchi } 2437fd6a03edSNaoya Horiguchi 243824669e58SAneesh Kumar K.V void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 243924669e58SAneesh Kumar K.V unsigned long start, unsigned long end, 244024669e58SAneesh Kumar K.V struct page *ref_page) 244163551ae0SDavid Gibson { 244224669e58SAneesh Kumar K.V int force_flush = 0; 244363551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 244463551ae0SDavid Gibson unsigned long address; 2445c7546f8fSDavid Gibson pte_t *ptep; 244663551ae0SDavid Gibson pte_t pte; 2447cb900f41SKirill A. Shutemov spinlock_t *ptl; 244863551ae0SDavid Gibson struct page *page; 2449a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2450a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 24512ec74c3eSSagi Grimberg const unsigned long mmun_start = start; /* For mmu_notifiers */ 24522ec74c3eSSagi Grimberg const unsigned long mmun_end = end; /* For mmu_notifiers */ 2453a5516438SAndi Kleen 245463551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 2455a5516438SAndi Kleen BUG_ON(start & ~huge_page_mask(h)); 2456a5516438SAndi Kleen BUG_ON(end & ~huge_page_mask(h)); 245763551ae0SDavid Gibson 245824669e58SAneesh Kumar K.V tlb_start_vma(tlb, vma); 24592ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 246024669e58SAneesh Kumar K.V again: 2461a5516438SAndi Kleen for (address = start; address < end; address += sz) { 2462c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 2463c7546f8fSDavid Gibson if (!ptep) 2464c7546f8fSDavid Gibson continue; 2465c7546f8fSDavid Gibson 2466cb900f41SKirill A. Shutemov ptl = huge_pte_lock(h, mm, ptep); 246739dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 2468cb900f41SKirill A. Shutemov goto unlock; 246939dde65cSChen, Kenneth W 24706629326bSHillf Danton pte = huge_ptep_get(ptep); 24716629326bSHillf Danton if (huge_pte_none(pte)) 2472cb900f41SKirill A. Shutemov goto unlock; 24736629326bSHillf Danton 24746629326bSHillf Danton /* 24756629326bSHillf Danton * HWPoisoned hugepage is already unmapped and dropped reference 24766629326bSHillf Danton */ 24778c4894c6SNaoya Horiguchi if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 2478106c992aSGerald Schaefer huge_pte_clear(mm, address, ptep); 2479cb900f41SKirill A. Shutemov goto unlock; 24808c4894c6SNaoya Horiguchi } 24816629326bSHillf Danton 24826629326bSHillf Danton page = pte_page(pte); 248304f2cbe3SMel Gorman /* 248404f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 248504f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 248604f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 248704f2cbe3SMel Gorman */ 248804f2cbe3SMel Gorman if (ref_page) { 248904f2cbe3SMel Gorman if (page != ref_page) 2490cb900f41SKirill A. Shutemov goto unlock; 249104f2cbe3SMel Gorman 249204f2cbe3SMel Gorman /* 249304f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 249404f2cbe3SMel Gorman * future faults in this VMA will fail rather than 249504f2cbe3SMel Gorman * looking like data was lost 249604f2cbe3SMel Gorman */ 249704f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 249804f2cbe3SMel Gorman } 249904f2cbe3SMel Gorman 2500c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 250124669e58SAneesh Kumar K.V tlb_remove_tlb_entry(tlb, ptep, address); 2502106c992aSGerald Schaefer if (huge_pte_dirty(pte)) 25036649a386SKen Chen set_page_dirty(page); 25049e81130bSHillf Danton 250524669e58SAneesh Kumar K.V page_remove_rmap(page); 250624669e58SAneesh Kumar K.V force_flush = !__tlb_remove_page(tlb, page); 2507cb900f41SKirill A. Shutemov if (force_flush) { 2508cb900f41SKirill A. Shutemov spin_unlock(ptl); 25099e81130bSHillf Danton break; 251063551ae0SDavid Gibson } 2511cb900f41SKirill A. Shutemov /* Bail out after unmapping reference page if supplied */ 2512cb900f41SKirill A. Shutemov if (ref_page) { 2513cb900f41SKirill A. Shutemov spin_unlock(ptl); 2514cb900f41SKirill A. Shutemov break; 2515cb900f41SKirill A. Shutemov } 2516cb900f41SKirill A. Shutemov unlock: 2517cb900f41SKirill A. Shutemov spin_unlock(ptl); 2518cb900f41SKirill A. Shutemov } 251924669e58SAneesh Kumar K.V /* 252024669e58SAneesh Kumar K.V * mmu_gather ran out of room to batch pages, we break out of 252124669e58SAneesh Kumar K.V * the PTE lock to avoid doing the potential expensive TLB invalidate 252224669e58SAneesh Kumar K.V * and page-free while holding it. 252324669e58SAneesh Kumar K.V */ 252424669e58SAneesh Kumar K.V if (force_flush) { 252524669e58SAneesh Kumar K.V force_flush = 0; 252624669e58SAneesh Kumar K.V tlb_flush_mmu(tlb); 252724669e58SAneesh Kumar K.V if (address < end && !ref_page) 252824669e58SAneesh Kumar K.V goto again; 2529fe1668aeSChen, Kenneth W } 25302ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 253124669e58SAneesh Kumar K.V tlb_end_vma(tlb, vma); 25321da177e4SLinus Torvalds } 253363551ae0SDavid Gibson 2534d833352aSMel Gorman void __unmap_hugepage_range_final(struct mmu_gather *tlb, 2535d833352aSMel Gorman struct vm_area_struct *vma, unsigned long start, 2536d833352aSMel Gorman unsigned long end, struct page *ref_page) 2537d833352aSMel Gorman { 2538d833352aSMel Gorman __unmap_hugepage_range(tlb, vma, start, end, ref_page); 2539d833352aSMel Gorman 2540d833352aSMel Gorman /* 2541d833352aSMel Gorman * Clear this flag so that x86's huge_pmd_share page_table_shareable 2542d833352aSMel Gorman * test will fail on a vma being torn down, and not grab a page table 2543d833352aSMel Gorman * on its way out. We're lucky that the flag has such an appropriate 2544d833352aSMel Gorman * name, and can in fact be safely cleared here. We could clear it 2545d833352aSMel Gorman * before the __unmap_hugepage_range above, but all that's necessary 2546d833352aSMel Gorman * is to clear it before releasing the i_mmap_mutex. This works 2547d833352aSMel Gorman * because in the context this is called, the VMA is about to be 2548d833352aSMel Gorman * destroyed and the i_mmap_mutex is held. 2549d833352aSMel Gorman */ 2550d833352aSMel Gorman vma->vm_flags &= ~VM_MAYSHARE; 2551d833352aSMel Gorman } 2552d833352aSMel Gorman 2553502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 255404f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 2555502717f4SChen, Kenneth W { 255624669e58SAneesh Kumar K.V struct mm_struct *mm; 255724669e58SAneesh Kumar K.V struct mmu_gather tlb; 255824669e58SAneesh Kumar K.V 255924669e58SAneesh Kumar K.V mm = vma->vm_mm; 256024669e58SAneesh Kumar K.V 25612b047252SLinus Torvalds tlb_gather_mmu(&tlb, mm, start, end); 256224669e58SAneesh Kumar K.V __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 256324669e58SAneesh Kumar K.V tlb_finish_mmu(&tlb, start, end); 2564502717f4SChen, Kenneth W } 2565502717f4SChen, Kenneth W 256604f2cbe3SMel Gorman /* 256704f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 256804f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 256904f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 257004f2cbe3SMel Gorman * same region. 257104f2cbe3SMel Gorman */ 25722a4b3dedSHarvey Harrison static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 25732a4b3dedSHarvey Harrison struct page *page, unsigned long address) 257404f2cbe3SMel Gorman { 25757526674dSAdam Litke struct hstate *h = hstate_vma(vma); 257604f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 257704f2cbe3SMel Gorman struct address_space *mapping; 257804f2cbe3SMel Gorman pgoff_t pgoff; 257904f2cbe3SMel Gorman 258004f2cbe3SMel Gorman /* 258104f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 258204f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 258304f2cbe3SMel Gorman */ 25847526674dSAdam Litke address = address & huge_page_mask(h); 258536e4f20aSMichal Hocko pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 258636e4f20aSMichal Hocko vma->vm_pgoff; 2587496ad9aaSAl Viro mapping = file_inode(vma->vm_file)->i_mapping; 258804f2cbe3SMel Gorman 25894eb2b1dcSMel Gorman /* 25904eb2b1dcSMel Gorman * Take the mapping lock for the duration of the table walk. As 25914eb2b1dcSMel Gorman * this mapping should be shared between all the VMAs, 25924eb2b1dcSMel Gorman * __unmap_hugepage_range() is called as the lock is already held 25934eb2b1dcSMel Gorman */ 25943d48ae45SPeter Zijlstra mutex_lock(&mapping->i_mmap_mutex); 25956b2dbba8SMichel Lespinasse vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 259604f2cbe3SMel Gorman /* Do not unmap the current VMA */ 259704f2cbe3SMel Gorman if (iter_vma == vma) 259804f2cbe3SMel Gorman continue; 259904f2cbe3SMel Gorman 260004f2cbe3SMel Gorman /* 260104f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 260204f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 260304f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 260404f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 260504f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 260604f2cbe3SMel Gorman */ 260704f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 260824669e58SAneesh Kumar K.V unmap_hugepage_range(iter_vma, address, 260924669e58SAneesh Kumar K.V address + huge_page_size(h), page); 261004f2cbe3SMel Gorman } 26113d48ae45SPeter Zijlstra mutex_unlock(&mapping->i_mmap_mutex); 261204f2cbe3SMel Gorman 261304f2cbe3SMel Gorman return 1; 261404f2cbe3SMel Gorman } 261504f2cbe3SMel Gorman 26160fe6e20bSNaoya Horiguchi /* 26170fe6e20bSNaoya Horiguchi * Hugetlb_cow() should be called with page lock of the original hugepage held. 2618ef009b25SMichal Hocko * Called with hugetlb_instantiation_mutex held and pte_page locked so we 2619ef009b25SMichal Hocko * cannot race with other handlers or page migration. 2620ef009b25SMichal Hocko * Keep the pte_same checks anyway to make transition from the mutex easier. 26210fe6e20bSNaoya Horiguchi */ 26221e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 262304f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 2624cb900f41SKirill A. Shutemov struct page *pagecache_page, spinlock_t *ptl) 26251e8f889bSDavid Gibson { 2626a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 26271e8f889bSDavid Gibson struct page *old_page, *new_page; 262804f2cbe3SMel Gorman int outside_reserve = 0; 26292ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 26302ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 26311e8f889bSDavid Gibson 26321e8f889bSDavid Gibson old_page = pte_page(pte); 26331e8f889bSDavid Gibson 263404f2cbe3SMel Gorman retry_avoidcopy: 26351e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 26361e8f889bSDavid Gibson * and just make the page writable */ 263737a2140dSJoonsoo Kim if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 26380fe6e20bSNaoya Horiguchi page_move_anon_rmap(old_page, vma, address); 26391e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 264083c54070SNick Piggin return 0; 26411e8f889bSDavid Gibson } 26421e8f889bSDavid Gibson 264304f2cbe3SMel Gorman /* 264404f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 264504f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 264604f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 264704f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 264804f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 264904f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 265004f2cbe3SMel Gorman * of the full address range. 265104f2cbe3SMel Gorman */ 26525944d011SJoonsoo Kim if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 265304f2cbe3SMel Gorman old_page != pagecache_page) 265404f2cbe3SMel Gorman outside_reserve = 1; 265504f2cbe3SMel Gorman 26561e8f889bSDavid Gibson page_cache_get(old_page); 2657b76c8cfbSLarry Woodman 2658cb900f41SKirill A. Shutemov /* Drop page table lock as buddy allocator may be called */ 2659cb900f41SKirill A. Shutemov spin_unlock(ptl); 266004f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 26611e8f889bSDavid Gibson 26622fc39cecSAdam Litke if (IS_ERR(new_page)) { 266376dcee75SAneesh Kumar K.V long err = PTR_ERR(new_page); 26641e8f889bSDavid Gibson page_cache_release(old_page); 266504f2cbe3SMel Gorman 266604f2cbe3SMel Gorman /* 266704f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 266804f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 266904f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 267004f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 267104f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 267204f2cbe3SMel Gorman */ 267304f2cbe3SMel Gorman if (outside_reserve) { 267404f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 267504f2cbe3SMel Gorman if (unmap_ref_private(mm, vma, old_page, address)) { 267604f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 2677cb900f41SKirill A. Shutemov spin_lock(ptl); 2678a734bcc8SHillf Danton ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2679a734bcc8SHillf Danton if (likely(pte_same(huge_ptep_get(ptep), pte))) 268004f2cbe3SMel Gorman goto retry_avoidcopy; 2681a734bcc8SHillf Danton /* 2682cb900f41SKirill A. Shutemov * race occurs while re-acquiring page table 2683cb900f41SKirill A. Shutemov * lock, and our job is done. 2684a734bcc8SHillf Danton */ 2685a734bcc8SHillf Danton return 0; 268604f2cbe3SMel Gorman } 268704f2cbe3SMel Gorman WARN_ON_ONCE(1); 268804f2cbe3SMel Gorman } 268904f2cbe3SMel Gorman 2690b76c8cfbSLarry Woodman /* Caller expects lock to be held */ 2691cb900f41SKirill A. Shutemov spin_lock(ptl); 269276dcee75SAneesh Kumar K.V if (err == -ENOMEM) 269376dcee75SAneesh Kumar K.V return VM_FAULT_OOM; 269476dcee75SAneesh Kumar K.V else 269576dcee75SAneesh Kumar K.V return VM_FAULT_SIGBUS; 26961e8f889bSDavid Gibson } 26971e8f889bSDavid Gibson 26980fe6e20bSNaoya Horiguchi /* 26990fe6e20bSNaoya Horiguchi * When the original hugepage is shared one, it does not have 27000fe6e20bSNaoya Horiguchi * anon_vma prepared. 27010fe6e20bSNaoya Horiguchi */ 270244e2aa93SDean Nelson if (unlikely(anon_vma_prepare(vma))) { 2703ea4039a3SHillf Danton page_cache_release(new_page); 2704ea4039a3SHillf Danton page_cache_release(old_page); 270544e2aa93SDean Nelson /* Caller expects lock to be held */ 2706cb900f41SKirill A. Shutemov spin_lock(ptl); 27070fe6e20bSNaoya Horiguchi return VM_FAULT_OOM; 270844e2aa93SDean Nelson } 27090fe6e20bSNaoya Horiguchi 271047ad8475SAndrea Arcangeli copy_user_huge_page(new_page, old_page, address, vma, 271147ad8475SAndrea Arcangeli pages_per_huge_page(h)); 27120ed361deSNick Piggin __SetPageUptodate(new_page); 27131e8f889bSDavid Gibson 27142ec74c3eSSagi Grimberg mmun_start = address & huge_page_mask(h); 27152ec74c3eSSagi Grimberg mmun_end = mmun_start + huge_page_size(h); 27162ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2717b76c8cfbSLarry Woodman /* 2718cb900f41SKirill A. Shutemov * Retake the page table lock to check for racing updates 2719b76c8cfbSLarry Woodman * before the page tables are altered 2720b76c8cfbSLarry Woodman */ 2721cb900f41SKirill A. Shutemov spin_lock(ptl); 2722a5516438SAndi Kleen ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 27237f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 272407443a85SJoonsoo Kim ClearPagePrivate(new_page); 272507443a85SJoonsoo Kim 27261e8f889bSDavid Gibson /* Break COW */ 27278fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 27281e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 27291e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 27300fe6e20bSNaoya Horiguchi page_remove_rmap(old_page); 2731cd67f0d2SNaoya Horiguchi hugepage_add_new_anon_rmap(new_page, vma, address); 27321e8f889bSDavid Gibson /* Make the old page be freed below */ 27331e8f889bSDavid Gibson new_page = old_page; 27341e8f889bSDavid Gibson } 2735cb900f41SKirill A. Shutemov spin_unlock(ptl); 27362ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 27371e8f889bSDavid Gibson page_cache_release(new_page); 27381e8f889bSDavid Gibson page_cache_release(old_page); 27398312034fSJoonsoo Kim 27408312034fSJoonsoo Kim /* Caller expects lock to be held */ 2741cb900f41SKirill A. Shutemov spin_lock(ptl); 274283c54070SNick Piggin return 0; 27431e8f889bSDavid Gibson } 27441e8f889bSDavid Gibson 274504f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 2746a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h, 2747a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 274804f2cbe3SMel Gorman { 274904f2cbe3SMel Gorman struct address_space *mapping; 2750e7c4b0bfSAndy Whitcroft pgoff_t idx; 275104f2cbe3SMel Gorman 275204f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 2753a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 275404f2cbe3SMel Gorman 275504f2cbe3SMel Gorman return find_lock_page(mapping, idx); 275604f2cbe3SMel Gorman } 275704f2cbe3SMel Gorman 27583ae77f43SHugh Dickins /* 27593ae77f43SHugh Dickins * Return whether there is a pagecache page to back given address within VMA. 27603ae77f43SHugh Dickins * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 27613ae77f43SHugh Dickins */ 27623ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h, 27632a15efc9SHugh Dickins struct vm_area_struct *vma, unsigned long address) 27642a15efc9SHugh Dickins { 27652a15efc9SHugh Dickins struct address_space *mapping; 27662a15efc9SHugh Dickins pgoff_t idx; 27672a15efc9SHugh Dickins struct page *page; 27682a15efc9SHugh Dickins 27692a15efc9SHugh Dickins mapping = vma->vm_file->f_mapping; 27702a15efc9SHugh Dickins idx = vma_hugecache_offset(h, vma, address); 27712a15efc9SHugh Dickins 27722a15efc9SHugh Dickins page = find_get_page(mapping, idx); 27732a15efc9SHugh Dickins if (page) 27742a15efc9SHugh Dickins put_page(page); 27752a15efc9SHugh Dickins return page != NULL; 27762a15efc9SHugh Dickins } 27772a15efc9SHugh Dickins 2778a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 2779788c7df4SHugh Dickins unsigned long address, pte_t *ptep, unsigned int flags) 2780ac9b9c66SHugh Dickins { 2781a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2782ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 2783409eb8c2SHillf Danton int anon_rmap = 0; 2784e7c4b0bfSAndy Whitcroft pgoff_t idx; 27854c887265SAdam Litke unsigned long size; 27864c887265SAdam Litke struct page *page; 27874c887265SAdam Litke struct address_space *mapping; 27881e8f889bSDavid Gibson pte_t new_pte; 2789cb900f41SKirill A. Shutemov spinlock_t *ptl; 27904c887265SAdam Litke 279104f2cbe3SMel Gorman /* 279204f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 279304f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 279425985edcSLucas De Marchi * COW. Warn that such a situation has occurred as it may not be obvious 279504f2cbe3SMel Gorman */ 279604f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 2797ffb22af5SAndrew Morton pr_warning("PID %d killed due to inadequate hugepage pool\n", 279804f2cbe3SMel Gorman current->pid); 279904f2cbe3SMel Gorman return ret; 280004f2cbe3SMel Gorman } 280104f2cbe3SMel Gorman 28024c887265SAdam Litke mapping = vma->vm_file->f_mapping; 2803a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 28044c887265SAdam Litke 28054c887265SAdam Litke /* 28064c887265SAdam Litke * Use page lock to guard against racing truncation 28074c887265SAdam Litke * before we get page_table_lock. 28084c887265SAdam Litke */ 28096bda666aSChristoph Lameter retry: 28106bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 28116bda666aSChristoph Lameter if (!page) { 2812a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 2813ebed4bfcSHugh Dickins if (idx >= size) 2814ebed4bfcSHugh Dickins goto out; 281504f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 28162fc39cecSAdam Litke if (IS_ERR(page)) { 281776dcee75SAneesh Kumar K.V ret = PTR_ERR(page); 281876dcee75SAneesh Kumar K.V if (ret == -ENOMEM) 281976dcee75SAneesh Kumar K.V ret = VM_FAULT_OOM; 282076dcee75SAneesh Kumar K.V else 282176dcee75SAneesh Kumar K.V ret = VM_FAULT_SIGBUS; 28226bda666aSChristoph Lameter goto out; 28236bda666aSChristoph Lameter } 282447ad8475SAndrea Arcangeli clear_huge_page(page, address, pages_per_huge_page(h)); 28250ed361deSNick Piggin __SetPageUptodate(page); 2826ac9b9c66SHugh Dickins 2827f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 28286bda666aSChristoph Lameter int err; 282945c682a6SKen Chen struct inode *inode = mapping->host; 28306bda666aSChristoph Lameter 28316bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 28326bda666aSChristoph Lameter if (err) { 28336bda666aSChristoph Lameter put_page(page); 28346bda666aSChristoph Lameter if (err == -EEXIST) 28356bda666aSChristoph Lameter goto retry; 28366bda666aSChristoph Lameter goto out; 28376bda666aSChristoph Lameter } 283807443a85SJoonsoo Kim ClearPagePrivate(page); 283945c682a6SKen Chen 284045c682a6SKen Chen spin_lock(&inode->i_lock); 2841a5516438SAndi Kleen inode->i_blocks += blocks_per_huge_page(h); 284245c682a6SKen Chen spin_unlock(&inode->i_lock); 284323be7468SMel Gorman } else { 28446bda666aSChristoph Lameter lock_page(page); 28450fe6e20bSNaoya Horiguchi if (unlikely(anon_vma_prepare(vma))) { 28460fe6e20bSNaoya Horiguchi ret = VM_FAULT_OOM; 28470fe6e20bSNaoya Horiguchi goto backout_unlocked; 284823be7468SMel Gorman } 2849409eb8c2SHillf Danton anon_rmap = 1; 28500fe6e20bSNaoya Horiguchi } 28510fe6e20bSNaoya Horiguchi } else { 285257303d80SAndy Whitcroft /* 2853998b4382SNaoya Horiguchi * If memory error occurs between mmap() and fault, some process 2854998b4382SNaoya Horiguchi * don't have hwpoisoned swap entry for errored virtual address. 2855998b4382SNaoya Horiguchi * So we need to block hugepage fault by PG_hwpoison bit check. 2856fd6a03edSNaoya Horiguchi */ 2857fd6a03edSNaoya Horiguchi if (unlikely(PageHWPoison(page))) { 2858aa50d3a7SAndi Kleen ret = VM_FAULT_HWPOISON | 2859972dc4deSAneesh Kumar K.V VM_FAULT_SET_HINDEX(hstate_index(h)); 2860fd6a03edSNaoya Horiguchi goto backout_unlocked; 28616bda666aSChristoph Lameter } 2862998b4382SNaoya Horiguchi } 28631e8f889bSDavid Gibson 286457303d80SAndy Whitcroft /* 286557303d80SAndy Whitcroft * If we are going to COW a private mapping later, we examine the 286657303d80SAndy Whitcroft * pending reservations for this page now. This will ensure that 286757303d80SAndy Whitcroft * any allocations necessary to record that reservation occur outside 286857303d80SAndy Whitcroft * the spinlock. 286957303d80SAndy Whitcroft */ 2870788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) 28712b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 28722b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 28732b26736cSAndy Whitcroft goto backout_unlocked; 28742b26736cSAndy Whitcroft } 287557303d80SAndy Whitcroft 2876cb900f41SKirill A. Shutemov ptl = huge_pte_lockptr(h, mm, ptep); 2877cb900f41SKirill A. Shutemov spin_lock(ptl); 2878a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 28794c887265SAdam Litke if (idx >= size) 28804c887265SAdam Litke goto backout; 28814c887265SAdam Litke 288283c54070SNick Piggin ret = 0; 28837f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 28844c887265SAdam Litke goto backout; 28854c887265SAdam Litke 288607443a85SJoonsoo Kim if (anon_rmap) { 288707443a85SJoonsoo Kim ClearPagePrivate(page); 2888409eb8c2SHillf Danton hugepage_add_new_anon_rmap(page, vma, address); 288907443a85SJoonsoo Kim } 2890409eb8c2SHillf Danton else 2891409eb8c2SHillf Danton page_dup_rmap(page); 28921e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 28931e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 28941e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 28951e8f889bSDavid Gibson 2896788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 28971e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 2898cb900f41SKirill A. Shutemov ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); 28991e8f889bSDavid Gibson } 29001e8f889bSDavid Gibson 2901cb900f41SKirill A. Shutemov spin_unlock(ptl); 29024c887265SAdam Litke unlock_page(page); 29034c887265SAdam Litke out: 2904ac9b9c66SHugh Dickins return ret; 29054c887265SAdam Litke 29064c887265SAdam Litke backout: 2907cb900f41SKirill A. Shutemov spin_unlock(ptl); 29082b26736cSAndy Whitcroft backout_unlocked: 29094c887265SAdam Litke unlock_page(page); 29104c887265SAdam Litke put_page(page); 29114c887265SAdam Litke goto out; 2912ac9b9c66SHugh Dickins } 2913ac9b9c66SHugh Dickins 291486e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2915788c7df4SHugh Dickins unsigned long address, unsigned int flags) 291686e5216fSAdam Litke { 291786e5216fSAdam Litke pte_t *ptep; 291886e5216fSAdam Litke pte_t entry; 2919cb900f41SKirill A. Shutemov spinlock_t *ptl; 29201e8f889bSDavid Gibson int ret; 29210fe6e20bSNaoya Horiguchi struct page *page = NULL; 292257303d80SAndy Whitcroft struct page *pagecache_page = NULL; 29233935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 2924a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 292586e5216fSAdam Litke 29261e16a539SKAMEZAWA Hiroyuki address &= huge_page_mask(h); 29271e16a539SKAMEZAWA Hiroyuki 2928fd6a03edSNaoya Horiguchi ptep = huge_pte_offset(mm, address); 2929fd6a03edSNaoya Horiguchi if (ptep) { 2930fd6a03edSNaoya Horiguchi entry = huge_ptep_get(ptep); 2931290408d4SNaoya Horiguchi if (unlikely(is_hugetlb_entry_migration(entry))) { 2932cb900f41SKirill A. Shutemov migration_entry_wait_huge(vma, mm, ptep); 2933290408d4SNaoya Horiguchi return 0; 2934290408d4SNaoya Horiguchi } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2935aa50d3a7SAndi Kleen return VM_FAULT_HWPOISON_LARGE | 2936972dc4deSAneesh Kumar K.V VM_FAULT_SET_HINDEX(hstate_index(h)); 2937fd6a03edSNaoya Horiguchi } 2938fd6a03edSNaoya Horiguchi 2939a5516438SAndi Kleen ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 294086e5216fSAdam Litke if (!ptep) 294186e5216fSAdam Litke return VM_FAULT_OOM; 294286e5216fSAdam Litke 29433935baa9SDavid Gibson /* 29443935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 29453935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 29463935baa9SDavid Gibson * the same page in the page cache. 29473935baa9SDavid Gibson */ 29483935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 29497f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 29507f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 2951788c7df4SHugh Dickins ret = hugetlb_no_page(mm, vma, address, ptep, flags); 2952b4d1d99fSDavid Gibson goto out_mutex; 29533935baa9SDavid Gibson } 295486e5216fSAdam Litke 295583c54070SNick Piggin ret = 0; 29561e8f889bSDavid Gibson 295757303d80SAndy Whitcroft /* 295857303d80SAndy Whitcroft * If we are going to COW the mapping later, we examine the pending 295957303d80SAndy Whitcroft * reservations for this page now. This will ensure that any 296057303d80SAndy Whitcroft * allocations necessary to record that reservation occur outside the 296157303d80SAndy Whitcroft * spinlock. For private mappings, we also lookup the pagecache 296257303d80SAndy Whitcroft * page now as it is used to determine if a reservation has been 296357303d80SAndy Whitcroft * consumed. 296457303d80SAndy Whitcroft */ 2965106c992aSGerald Schaefer if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 29662b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 29672b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 2968b4d1d99fSDavid Gibson goto out_mutex; 29692b26736cSAndy Whitcroft } 297057303d80SAndy Whitcroft 2971f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 297257303d80SAndy Whitcroft pagecache_page = hugetlbfs_pagecache_page(h, 297357303d80SAndy Whitcroft vma, address); 297457303d80SAndy Whitcroft } 297557303d80SAndy Whitcroft 297656c9cfb1SNaoya Horiguchi /* 297756c9cfb1SNaoya Horiguchi * hugetlb_cow() requires page locks of pte_page(entry) and 297856c9cfb1SNaoya Horiguchi * pagecache_page, so here we need take the former one 297956c9cfb1SNaoya Horiguchi * when page != pagecache_page or !pagecache_page. 298056c9cfb1SNaoya Horiguchi * Note that locking order is always pagecache_page -> page, 298156c9cfb1SNaoya Horiguchi * so no worry about deadlock. 298256c9cfb1SNaoya Horiguchi */ 29830fe6e20bSNaoya Horiguchi page = pte_page(entry); 298466aebce7SChris Metcalf get_page(page); 298556c9cfb1SNaoya Horiguchi if (page != pagecache_page) 29860fe6e20bSNaoya Horiguchi lock_page(page); 29870fe6e20bSNaoya Horiguchi 2988cb900f41SKirill A. Shutemov ptl = huge_pte_lockptr(h, mm, ptep); 2989cb900f41SKirill A. Shutemov spin_lock(ptl); 29901e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 2991b4d1d99fSDavid Gibson if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 2992cb900f41SKirill A. Shutemov goto out_ptl; 2993b4d1d99fSDavid Gibson 2994b4d1d99fSDavid Gibson 2995788c7df4SHugh Dickins if (flags & FAULT_FLAG_WRITE) { 2996106c992aSGerald Schaefer if (!huge_pte_write(entry)) { 299757303d80SAndy Whitcroft ret = hugetlb_cow(mm, vma, address, ptep, entry, 2998cb900f41SKirill A. Shutemov pagecache_page, ptl); 2999cb900f41SKirill A. Shutemov goto out_ptl; 3000b4d1d99fSDavid Gibson } 3001106c992aSGerald Schaefer entry = huge_pte_mkdirty(entry); 3002b4d1d99fSDavid Gibson } 3003b4d1d99fSDavid Gibson entry = pte_mkyoung(entry); 3004788c7df4SHugh Dickins if (huge_ptep_set_access_flags(vma, address, ptep, entry, 3005788c7df4SHugh Dickins flags & FAULT_FLAG_WRITE)) 30064b3073e1SRussell King update_mmu_cache(vma, address, ptep); 3007b4d1d99fSDavid Gibson 3008cb900f41SKirill A. Shutemov out_ptl: 3009cb900f41SKirill A. Shutemov spin_unlock(ptl); 301057303d80SAndy Whitcroft 301157303d80SAndy Whitcroft if (pagecache_page) { 301257303d80SAndy Whitcroft unlock_page(pagecache_page); 301357303d80SAndy Whitcroft put_page(pagecache_page); 301457303d80SAndy Whitcroft } 30151f64d69cSDean Nelson if (page != pagecache_page) 301656c9cfb1SNaoya Horiguchi unlock_page(page); 301766aebce7SChris Metcalf put_page(page); 301857303d80SAndy Whitcroft 3019b4d1d99fSDavid Gibson out_mutex: 30203935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 30211e8f889bSDavid Gibson 30221e8f889bSDavid Gibson return ret; 302386e5216fSAdam Litke } 302486e5216fSAdam Litke 302528a35716SMichel Lespinasse long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 302663551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 302728a35716SMichel Lespinasse unsigned long *position, unsigned long *nr_pages, 302828a35716SMichel Lespinasse long i, unsigned int flags) 302963551ae0SDavid Gibson { 3030d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 3031d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 303228a35716SMichel Lespinasse unsigned long remainder = *nr_pages; 3033a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 303463551ae0SDavid Gibson 303563551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 303663551ae0SDavid Gibson pte_t *pte; 3037cb900f41SKirill A. Shutemov spinlock_t *ptl = NULL; 30382a15efc9SHugh Dickins int absent; 303963551ae0SDavid Gibson struct page *page; 304063551ae0SDavid Gibson 30414c887265SAdam Litke /* 30424c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 30432a15efc9SHugh Dickins * each hugepage. We have to make sure we get the 30444c887265SAdam Litke * first, for the page indexing below to work. 3045cb900f41SKirill A. Shutemov * 3046cb900f41SKirill A. Shutemov * Note that page table lock is not held when pte is null. 30474c887265SAdam Litke */ 3048a5516438SAndi Kleen pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 3049cb900f41SKirill A. Shutemov if (pte) 3050cb900f41SKirill A. Shutemov ptl = huge_pte_lock(h, mm, pte); 30512a15efc9SHugh Dickins absent = !pte || huge_pte_none(huge_ptep_get(pte)); 305263551ae0SDavid Gibson 30532a15efc9SHugh Dickins /* 30542a15efc9SHugh Dickins * When coredumping, it suits get_dump_page if we just return 30553ae77f43SHugh Dickins * an error where there's an empty slot with no huge pagecache 30563ae77f43SHugh Dickins * to back it. This way, we avoid allocating a hugepage, and 30573ae77f43SHugh Dickins * the sparse dumpfile avoids allocating disk blocks, but its 30583ae77f43SHugh Dickins * huge holes still show up with zeroes where they need to be. 30592a15efc9SHugh Dickins */ 30603ae77f43SHugh Dickins if (absent && (flags & FOLL_DUMP) && 30613ae77f43SHugh Dickins !hugetlbfs_pagecache_present(h, vma, vaddr)) { 3062cb900f41SKirill A. Shutemov if (pte) 3063cb900f41SKirill A. Shutemov spin_unlock(ptl); 30642a15efc9SHugh Dickins remainder = 0; 30652a15efc9SHugh Dickins break; 30662a15efc9SHugh Dickins } 30672a15efc9SHugh Dickins 30689cc3a5bdSNaoya Horiguchi /* 30699cc3a5bdSNaoya Horiguchi * We need call hugetlb_fault for both hugepages under migration 30709cc3a5bdSNaoya Horiguchi * (in which case hugetlb_fault waits for the migration,) and 30719cc3a5bdSNaoya Horiguchi * hwpoisoned hugepages (in which case we need to prevent the 30729cc3a5bdSNaoya Horiguchi * caller from accessing to them.) In order to do this, we use 30739cc3a5bdSNaoya Horiguchi * here is_swap_pte instead of is_hugetlb_entry_migration and 30749cc3a5bdSNaoya Horiguchi * is_hugetlb_entry_hwpoisoned. This is because it simply covers 30759cc3a5bdSNaoya Horiguchi * both cases, and because we can't follow correct pages 30769cc3a5bdSNaoya Horiguchi * directly from any kind of swap entries. 30779cc3a5bdSNaoya Horiguchi */ 30789cc3a5bdSNaoya Horiguchi if (absent || is_swap_pte(huge_ptep_get(pte)) || 3079106c992aSGerald Schaefer ((flags & FOLL_WRITE) && 3080106c992aSGerald Schaefer !huge_pte_write(huge_ptep_get(pte)))) { 30814c887265SAdam Litke int ret; 30824c887265SAdam Litke 3083cb900f41SKirill A. Shutemov if (pte) 3084cb900f41SKirill A. Shutemov spin_unlock(ptl); 30852a15efc9SHugh Dickins ret = hugetlb_fault(mm, vma, vaddr, 30862a15efc9SHugh Dickins (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); 3087a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 30884c887265SAdam Litke continue; 30894c887265SAdam Litke 30901c59827dSHugh Dickins remainder = 0; 30911c59827dSHugh Dickins break; 30921c59827dSHugh Dickins } 309363551ae0SDavid Gibson 3094a5516438SAndi Kleen pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 30957f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 3096d5d4b0aaSChen, Kenneth W same_page: 3097d6692183SChen, Kenneth W if (pages) { 309869d177c2SAndy Whitcroft pages[i] = mem_map_offset(page, pfn_offset); 30994b2e38adSKOSAKI Motohiro get_page(pages[i]); 3100d6692183SChen, Kenneth W } 310163551ae0SDavid Gibson 310263551ae0SDavid Gibson if (vmas) 310363551ae0SDavid Gibson vmas[i] = vma; 310463551ae0SDavid Gibson 310563551ae0SDavid Gibson vaddr += PAGE_SIZE; 3106d5d4b0aaSChen, Kenneth W ++pfn_offset; 310763551ae0SDavid Gibson --remainder; 310863551ae0SDavid Gibson ++i; 3109d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 3110a5516438SAndi Kleen pfn_offset < pages_per_huge_page(h)) { 3111d5d4b0aaSChen, Kenneth W /* 3112d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 3113d5d4b0aaSChen, Kenneth W * of this compound page. 3114d5d4b0aaSChen, Kenneth W */ 3115d5d4b0aaSChen, Kenneth W goto same_page; 3116d5d4b0aaSChen, Kenneth W } 3117cb900f41SKirill A. Shutemov spin_unlock(ptl); 311863551ae0SDavid Gibson } 311928a35716SMichel Lespinasse *nr_pages = remainder; 312063551ae0SDavid Gibson *position = vaddr; 312163551ae0SDavid Gibson 31222a15efc9SHugh Dickins return i ? i : -EFAULT; 312363551ae0SDavid Gibson } 31248f860591SZhang, Yanmin 31257da4d641SPeter Zijlstra unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 31268f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 31278f860591SZhang, Yanmin { 31288f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 31298f860591SZhang, Yanmin unsigned long start = address; 31308f860591SZhang, Yanmin pte_t *ptep; 31318f860591SZhang, Yanmin pte_t pte; 3132a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 31337da4d641SPeter Zijlstra unsigned long pages = 0; 31348f860591SZhang, Yanmin 31358f860591SZhang, Yanmin BUG_ON(address >= end); 31368f860591SZhang, Yanmin flush_cache_range(vma, address, end); 31378f860591SZhang, Yanmin 31383d48ae45SPeter Zijlstra mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 3139a5516438SAndi Kleen for (; address < end; address += huge_page_size(h)) { 3140cb900f41SKirill A. Shutemov spinlock_t *ptl; 31418f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 31428f860591SZhang, Yanmin if (!ptep) 31438f860591SZhang, Yanmin continue; 3144cb900f41SKirill A. Shutemov ptl = huge_pte_lock(h, mm, ptep); 31457da4d641SPeter Zijlstra if (huge_pmd_unshare(mm, &address, ptep)) { 31467da4d641SPeter Zijlstra pages++; 3147cb900f41SKirill A. Shutemov spin_unlock(ptl); 314839dde65cSChen, Kenneth W continue; 31497da4d641SPeter Zijlstra } 31507f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 31518f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 3152106c992aSGerald Schaefer pte = pte_mkhuge(huge_pte_modify(pte, newprot)); 3153be7517d6STony Lu pte = arch_make_huge_pte(pte, vma, NULL, 0); 31548f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 31557da4d641SPeter Zijlstra pages++; 31568f860591SZhang, Yanmin } 3157cb900f41SKirill A. Shutemov spin_unlock(ptl); 31588f860591SZhang, Yanmin } 3159d833352aSMel Gorman /* 3160d833352aSMel Gorman * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare 3161d833352aSMel Gorman * may have cleared our pud entry and done put_page on the page table: 3162d833352aSMel Gorman * once we release i_mmap_mutex, another task can do the final put_page 3163d833352aSMel Gorman * and that page table be reused and filled with junk. 3164d833352aSMel Gorman */ 31658f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 3166d833352aSMel Gorman mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 31677da4d641SPeter Zijlstra 31687da4d641SPeter Zijlstra return pages << h->order; 31698f860591SZhang, Yanmin } 31708f860591SZhang, Yanmin 3171a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 3172a1e78772SMel Gorman long from, long to, 31735a6fe125SMel Gorman struct vm_area_struct *vma, 3174ca16d140SKOSAKI Motohiro vm_flags_t vm_flags) 3175e4e574b7SAdam Litke { 317617c9d12eSMel Gorman long ret, chg; 3177a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 317890481622SDavid Gibson struct hugepage_subpool *spool = subpool_inode(inode); 3179e4e574b7SAdam Litke 3180a1e78772SMel Gorman /* 318117c9d12eSMel Gorman * Only apply hugepage reservation if asked. At fault time, an 318217c9d12eSMel Gorman * attempt will be made for VM_NORESERVE to allocate a page 318390481622SDavid Gibson * without using reserves 318417c9d12eSMel Gorman */ 3185ca16d140SKOSAKI Motohiro if (vm_flags & VM_NORESERVE) 318617c9d12eSMel Gorman return 0; 318717c9d12eSMel Gorman 318817c9d12eSMel Gorman /* 3189a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 3190a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 3191a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 3192a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 3193a1e78772SMel Gorman */ 3194f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 3195e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 31965a6fe125SMel Gorman else { 31975a6fe125SMel Gorman struct resv_map *resv_map = resv_map_alloc(); 31985a6fe125SMel Gorman if (!resv_map) 31995a6fe125SMel Gorman return -ENOMEM; 32005a6fe125SMel Gorman 320117c9d12eSMel Gorman chg = to - from; 320217c9d12eSMel Gorman 32035a6fe125SMel Gorman set_vma_resv_map(vma, resv_map); 32045a6fe125SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 32055a6fe125SMel Gorman } 32065a6fe125SMel Gorman 3207c50ac050SDave Hansen if (chg < 0) { 3208c50ac050SDave Hansen ret = chg; 3209c50ac050SDave Hansen goto out_err; 3210c50ac050SDave Hansen } 321117c9d12eSMel Gorman 321290481622SDavid Gibson /* There must be enough pages in the subpool for the mapping */ 3213c50ac050SDave Hansen if (hugepage_subpool_get_pages(spool, chg)) { 3214c50ac050SDave Hansen ret = -ENOSPC; 3215c50ac050SDave Hansen goto out_err; 3216c50ac050SDave Hansen } 321717c9d12eSMel Gorman 321817c9d12eSMel Gorman /* 321917c9d12eSMel Gorman * Check enough hugepages are available for the reservation. 322090481622SDavid Gibson * Hand the pages back to the subpool if there are not 322117c9d12eSMel Gorman */ 322217c9d12eSMel Gorman ret = hugetlb_acct_memory(h, chg); 322317c9d12eSMel Gorman if (ret < 0) { 322490481622SDavid Gibson hugepage_subpool_put_pages(spool, chg); 3225c50ac050SDave Hansen goto out_err; 322617c9d12eSMel Gorman } 322717c9d12eSMel Gorman 322817c9d12eSMel Gorman /* 322917c9d12eSMel Gorman * Account for the reservations made. Shared mappings record regions 323017c9d12eSMel Gorman * that have reservations as they are shared by multiple VMAs. 323117c9d12eSMel Gorman * When the last VMA disappears, the region map says how much 323217c9d12eSMel Gorman * the reservation was and the page cache tells how much of 323317c9d12eSMel Gorman * the reservation was consumed. Private mappings are per-VMA and 323417c9d12eSMel Gorman * only the consumed reservations are tracked. When the VMA 323517c9d12eSMel Gorman * disappears, the original reservation is the VMA size and the 323617c9d12eSMel Gorman * consumed reservations are stored in the map. Hence, nothing 323717c9d12eSMel Gorman * else has to be done for private mappings here 323817c9d12eSMel Gorman */ 3239f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 324017c9d12eSMel Gorman region_add(&inode->i_mapping->private_list, from, to); 3241a43a8c39SChen, Kenneth W return 0; 3242c50ac050SDave Hansen out_err: 32434523e145SDave Hansen if (vma) 3244c50ac050SDave Hansen resv_map_put(vma); 3245c50ac050SDave Hansen return ret; 3246a43a8c39SChen, Kenneth W } 3247a43a8c39SChen, Kenneth W 3248a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 3249a43a8c39SChen, Kenneth W { 3250a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 3251a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 325290481622SDavid Gibson struct hugepage_subpool *spool = subpool_inode(inode); 325345c682a6SKen Chen 325445c682a6SKen Chen spin_lock(&inode->i_lock); 3255e4c6f8beSEric Sandeen inode->i_blocks -= (blocks_per_huge_page(h) * freed); 325645c682a6SKen Chen spin_unlock(&inode->i_lock); 325745c682a6SKen Chen 325890481622SDavid Gibson hugepage_subpool_put_pages(spool, (chg - freed)); 3259a5516438SAndi Kleen hugetlb_acct_memory(h, -(chg - freed)); 3260a43a8c39SChen, Kenneth W } 326193f70f90SNaoya Horiguchi 32623212b535SSteve Capper #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 32633212b535SSteve Capper static unsigned long page_table_shareable(struct vm_area_struct *svma, 32643212b535SSteve Capper struct vm_area_struct *vma, 32653212b535SSteve Capper unsigned long addr, pgoff_t idx) 32663212b535SSteve Capper { 32673212b535SSteve Capper unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 32683212b535SSteve Capper svma->vm_start; 32693212b535SSteve Capper unsigned long sbase = saddr & PUD_MASK; 32703212b535SSteve Capper unsigned long s_end = sbase + PUD_SIZE; 32713212b535SSteve Capper 32723212b535SSteve Capper /* Allow segments to share if only one is marked locked */ 32733212b535SSteve Capper unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; 32743212b535SSteve Capper unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED; 32753212b535SSteve Capper 32763212b535SSteve Capper /* 32773212b535SSteve Capper * match the virtual addresses, permission and the alignment of the 32783212b535SSteve Capper * page table page. 32793212b535SSteve Capper */ 32803212b535SSteve Capper if (pmd_index(addr) != pmd_index(saddr) || 32813212b535SSteve Capper vm_flags != svm_flags || 32823212b535SSteve Capper sbase < svma->vm_start || svma->vm_end < s_end) 32833212b535SSteve Capper return 0; 32843212b535SSteve Capper 32853212b535SSteve Capper return saddr; 32863212b535SSteve Capper } 32873212b535SSteve Capper 32883212b535SSteve Capper static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) 32893212b535SSteve Capper { 32903212b535SSteve Capper unsigned long base = addr & PUD_MASK; 32913212b535SSteve Capper unsigned long end = base + PUD_SIZE; 32923212b535SSteve Capper 32933212b535SSteve Capper /* 32943212b535SSteve Capper * check on proper vm_flags and page table alignment 32953212b535SSteve Capper */ 32963212b535SSteve Capper if (vma->vm_flags & VM_MAYSHARE && 32973212b535SSteve Capper vma->vm_start <= base && end <= vma->vm_end) 32983212b535SSteve Capper return 1; 32993212b535SSteve Capper return 0; 33003212b535SSteve Capper } 33013212b535SSteve Capper 33023212b535SSteve Capper /* 33033212b535SSteve Capper * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 33043212b535SSteve Capper * and returns the corresponding pte. While this is not necessary for the 33053212b535SSteve Capper * !shared pmd case because we can allocate the pmd later as well, it makes the 33063212b535SSteve Capper * code much cleaner. pmd allocation is essential for the shared case because 33073212b535SSteve Capper * pud has to be populated inside the same i_mmap_mutex section - otherwise 33083212b535SSteve Capper * racing tasks could either miss the sharing (see huge_pte_offset) or select a 33093212b535SSteve Capper * bad pmd for sharing. 33103212b535SSteve Capper */ 33113212b535SSteve Capper pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 33123212b535SSteve Capper { 33133212b535SSteve Capper struct vm_area_struct *vma = find_vma(mm, addr); 33143212b535SSteve Capper struct address_space *mapping = vma->vm_file->f_mapping; 33153212b535SSteve Capper pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 33163212b535SSteve Capper vma->vm_pgoff; 33173212b535SSteve Capper struct vm_area_struct *svma; 33183212b535SSteve Capper unsigned long saddr; 33193212b535SSteve Capper pte_t *spte = NULL; 33203212b535SSteve Capper pte_t *pte; 3321cb900f41SKirill A. Shutemov spinlock_t *ptl; 33223212b535SSteve Capper 33233212b535SSteve Capper if (!vma_shareable(vma, addr)) 33243212b535SSteve Capper return (pte_t *)pmd_alloc(mm, pud, addr); 33253212b535SSteve Capper 33263212b535SSteve Capper mutex_lock(&mapping->i_mmap_mutex); 33273212b535SSteve Capper vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 33283212b535SSteve Capper if (svma == vma) 33293212b535SSteve Capper continue; 33303212b535SSteve Capper 33313212b535SSteve Capper saddr = page_table_shareable(svma, vma, addr, idx); 33323212b535SSteve Capper if (saddr) { 33333212b535SSteve Capper spte = huge_pte_offset(svma->vm_mm, saddr); 33343212b535SSteve Capper if (spte) { 33353212b535SSteve Capper get_page(virt_to_page(spte)); 33363212b535SSteve Capper break; 33373212b535SSteve Capper } 33383212b535SSteve Capper } 33393212b535SSteve Capper } 33403212b535SSteve Capper 33413212b535SSteve Capper if (!spte) 33423212b535SSteve Capper goto out; 33433212b535SSteve Capper 3344cb900f41SKirill A. Shutemov ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); 3345cb900f41SKirill A. Shutemov spin_lock(ptl); 33463212b535SSteve Capper if (pud_none(*pud)) 33473212b535SSteve Capper pud_populate(mm, pud, 33483212b535SSteve Capper (pmd_t *)((unsigned long)spte & PAGE_MASK)); 33493212b535SSteve Capper else 33503212b535SSteve Capper put_page(virt_to_page(spte)); 3351cb900f41SKirill A. Shutemov spin_unlock(ptl); 33523212b535SSteve Capper out: 33533212b535SSteve Capper pte = (pte_t *)pmd_alloc(mm, pud, addr); 33543212b535SSteve Capper mutex_unlock(&mapping->i_mmap_mutex); 33553212b535SSteve Capper return pte; 33563212b535SSteve Capper } 33573212b535SSteve Capper 33583212b535SSteve Capper /* 33593212b535SSteve Capper * unmap huge page backed by shared pte. 33603212b535SSteve Capper * 33613212b535SSteve Capper * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 33623212b535SSteve Capper * indicated by page_count > 1, unmap is achieved by clearing pud and 33633212b535SSteve Capper * decrementing the ref count. If count == 1, the pte page is not shared. 33643212b535SSteve Capper * 3365cb900f41SKirill A. Shutemov * called with page table lock held. 33663212b535SSteve Capper * 33673212b535SSteve Capper * returns: 1 successfully unmapped a shared pte page 33683212b535SSteve Capper * 0 the underlying pte page is not shared, or it is the last user 33693212b535SSteve Capper */ 33703212b535SSteve Capper int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 33713212b535SSteve Capper { 33723212b535SSteve Capper pgd_t *pgd = pgd_offset(mm, *addr); 33733212b535SSteve Capper pud_t *pud = pud_offset(pgd, *addr); 33743212b535SSteve Capper 33753212b535SSteve Capper BUG_ON(page_count(virt_to_page(ptep)) == 0); 33763212b535SSteve Capper if (page_count(virt_to_page(ptep)) == 1) 33773212b535SSteve Capper return 0; 33783212b535SSteve Capper 33793212b535SSteve Capper pud_clear(pud); 33803212b535SSteve Capper put_page(virt_to_page(ptep)); 33813212b535SSteve Capper *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 33823212b535SSteve Capper return 1; 33833212b535SSteve Capper } 33849e5fc74cSSteve Capper #define want_pmd_share() (1) 33859e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 33869e5fc74cSSteve Capper pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 33879e5fc74cSSteve Capper { 33889e5fc74cSSteve Capper return NULL; 33899e5fc74cSSteve Capper } 33909e5fc74cSSteve Capper #define want_pmd_share() (0) 33913212b535SSteve Capper #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 33923212b535SSteve Capper 33939e5fc74cSSteve Capper #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 33949e5fc74cSSteve Capper pte_t *huge_pte_alloc(struct mm_struct *mm, 33959e5fc74cSSteve Capper unsigned long addr, unsigned long sz) 33969e5fc74cSSteve Capper { 33979e5fc74cSSteve Capper pgd_t *pgd; 33989e5fc74cSSteve Capper pud_t *pud; 33999e5fc74cSSteve Capper pte_t *pte = NULL; 34009e5fc74cSSteve Capper 34019e5fc74cSSteve Capper pgd = pgd_offset(mm, addr); 34029e5fc74cSSteve Capper pud = pud_alloc(mm, pgd, addr); 34039e5fc74cSSteve Capper if (pud) { 34049e5fc74cSSteve Capper if (sz == PUD_SIZE) { 34059e5fc74cSSteve Capper pte = (pte_t *)pud; 34069e5fc74cSSteve Capper } else { 34079e5fc74cSSteve Capper BUG_ON(sz != PMD_SIZE); 34089e5fc74cSSteve Capper if (want_pmd_share() && pud_none(*pud)) 34099e5fc74cSSteve Capper pte = huge_pmd_share(mm, addr, pud); 34109e5fc74cSSteve Capper else 34119e5fc74cSSteve Capper pte = (pte_t *)pmd_alloc(mm, pud, addr); 34129e5fc74cSSteve Capper } 34139e5fc74cSSteve Capper } 34149e5fc74cSSteve Capper BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 34159e5fc74cSSteve Capper 34169e5fc74cSSteve Capper return pte; 34179e5fc74cSSteve Capper } 34189e5fc74cSSteve Capper 34199e5fc74cSSteve Capper pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 34209e5fc74cSSteve Capper { 34219e5fc74cSSteve Capper pgd_t *pgd; 34229e5fc74cSSteve Capper pud_t *pud; 34239e5fc74cSSteve Capper pmd_t *pmd = NULL; 34249e5fc74cSSteve Capper 34259e5fc74cSSteve Capper pgd = pgd_offset(mm, addr); 34269e5fc74cSSteve Capper if (pgd_present(*pgd)) { 34279e5fc74cSSteve Capper pud = pud_offset(pgd, addr); 34289e5fc74cSSteve Capper if (pud_present(*pud)) { 34299e5fc74cSSteve Capper if (pud_huge(*pud)) 34309e5fc74cSSteve Capper return (pte_t *)pud; 34319e5fc74cSSteve Capper pmd = pmd_offset(pud, addr); 34329e5fc74cSSteve Capper } 34339e5fc74cSSteve Capper } 34349e5fc74cSSteve Capper return (pte_t *) pmd; 34359e5fc74cSSteve Capper } 34369e5fc74cSSteve Capper 34379e5fc74cSSteve Capper struct page * 34389e5fc74cSSteve Capper follow_huge_pmd(struct mm_struct *mm, unsigned long address, 34399e5fc74cSSteve Capper pmd_t *pmd, int write) 34409e5fc74cSSteve Capper { 34419e5fc74cSSteve Capper struct page *page; 34429e5fc74cSSteve Capper 34439e5fc74cSSteve Capper page = pte_page(*(pte_t *)pmd); 34449e5fc74cSSteve Capper if (page) 34459e5fc74cSSteve Capper page += ((address & ~PMD_MASK) >> PAGE_SHIFT); 34469e5fc74cSSteve Capper return page; 34479e5fc74cSSteve Capper } 34489e5fc74cSSteve Capper 34499e5fc74cSSteve Capper struct page * 34509e5fc74cSSteve Capper follow_huge_pud(struct mm_struct *mm, unsigned long address, 34519e5fc74cSSteve Capper pud_t *pud, int write) 34529e5fc74cSSteve Capper { 34539e5fc74cSSteve Capper struct page *page; 34549e5fc74cSSteve Capper 34559e5fc74cSSteve Capper page = pte_page(*(pte_t *)pud); 34569e5fc74cSSteve Capper if (page) 34579e5fc74cSSteve Capper page += ((address & ~PUD_MASK) >> PAGE_SHIFT); 34589e5fc74cSSteve Capper return page; 34599e5fc74cSSteve Capper } 34609e5fc74cSSteve Capper 34619e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 34629e5fc74cSSteve Capper 34639e5fc74cSSteve Capper /* Can be overriden by architectures */ 34649e5fc74cSSteve Capper __attribute__((weak)) struct page * 34659e5fc74cSSteve Capper follow_huge_pud(struct mm_struct *mm, unsigned long address, 34669e5fc74cSSteve Capper pud_t *pud, int write) 34679e5fc74cSSteve Capper { 34689e5fc74cSSteve Capper BUG(); 34699e5fc74cSSteve Capper return NULL; 34709e5fc74cSSteve Capper } 34719e5fc74cSSteve Capper 34729e5fc74cSSteve Capper #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 34739e5fc74cSSteve Capper 3474d5bd9106SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 3475d5bd9106SAndi Kleen 34766de2b1aaSNaoya Horiguchi /* Should be called in hugetlb_lock */ 34776de2b1aaSNaoya Horiguchi static int is_hugepage_on_freelist(struct page *hpage) 34786de2b1aaSNaoya Horiguchi { 34796de2b1aaSNaoya Horiguchi struct page *page; 34806de2b1aaSNaoya Horiguchi struct page *tmp; 34816de2b1aaSNaoya Horiguchi struct hstate *h = page_hstate(hpage); 34826de2b1aaSNaoya Horiguchi int nid = page_to_nid(hpage); 34836de2b1aaSNaoya Horiguchi 34846de2b1aaSNaoya Horiguchi list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru) 34856de2b1aaSNaoya Horiguchi if (page == hpage) 34866de2b1aaSNaoya Horiguchi return 1; 34876de2b1aaSNaoya Horiguchi return 0; 34886de2b1aaSNaoya Horiguchi } 34896de2b1aaSNaoya Horiguchi 349093f70f90SNaoya Horiguchi /* 349193f70f90SNaoya Horiguchi * This function is called from memory failure code. 349293f70f90SNaoya Horiguchi * Assume the caller holds page lock of the head page. 349393f70f90SNaoya Horiguchi */ 34946de2b1aaSNaoya Horiguchi int dequeue_hwpoisoned_huge_page(struct page *hpage) 349593f70f90SNaoya Horiguchi { 349693f70f90SNaoya Horiguchi struct hstate *h = page_hstate(hpage); 349793f70f90SNaoya Horiguchi int nid = page_to_nid(hpage); 34986de2b1aaSNaoya Horiguchi int ret = -EBUSY; 349993f70f90SNaoya Horiguchi 350093f70f90SNaoya Horiguchi spin_lock(&hugetlb_lock); 35016de2b1aaSNaoya Horiguchi if (is_hugepage_on_freelist(hpage)) { 350256f2fb14SNaoya Horiguchi /* 350356f2fb14SNaoya Horiguchi * Hwpoisoned hugepage isn't linked to activelist or freelist, 350456f2fb14SNaoya Horiguchi * but dangling hpage->lru can trigger list-debug warnings 350556f2fb14SNaoya Horiguchi * (this happens when we call unpoison_memory() on it), 350656f2fb14SNaoya Horiguchi * so let it point to itself with list_del_init(). 350756f2fb14SNaoya Horiguchi */ 350856f2fb14SNaoya Horiguchi list_del_init(&hpage->lru); 35098c6c2ecbSNaoya Horiguchi set_page_refcounted(hpage); 351093f70f90SNaoya Horiguchi h->free_huge_pages--; 351193f70f90SNaoya Horiguchi h->free_huge_pages_node[nid]--; 35126de2b1aaSNaoya Horiguchi ret = 0; 351393f70f90SNaoya Horiguchi } 35146de2b1aaSNaoya Horiguchi spin_unlock(&hugetlb_lock); 35156de2b1aaSNaoya Horiguchi return ret; 35166de2b1aaSNaoya Horiguchi } 35176de2b1aaSNaoya Horiguchi #endif 351831caf665SNaoya Horiguchi 351931caf665SNaoya Horiguchi bool isolate_huge_page(struct page *page, struct list_head *list) 352031caf665SNaoya Horiguchi { 352131caf665SNaoya Horiguchi VM_BUG_ON(!PageHead(page)); 352231caf665SNaoya Horiguchi if (!get_page_unless_zero(page)) 352331caf665SNaoya Horiguchi return false; 352431caf665SNaoya Horiguchi spin_lock(&hugetlb_lock); 352531caf665SNaoya Horiguchi list_move_tail(&page->lru, list); 352631caf665SNaoya Horiguchi spin_unlock(&hugetlb_lock); 352731caf665SNaoya Horiguchi return true; 352831caf665SNaoya Horiguchi } 352931caf665SNaoya Horiguchi 353031caf665SNaoya Horiguchi void putback_active_hugepage(struct page *page) 353131caf665SNaoya Horiguchi { 353231caf665SNaoya Horiguchi VM_BUG_ON(!PageHead(page)); 353331caf665SNaoya Horiguchi spin_lock(&hugetlb_lock); 353431caf665SNaoya Horiguchi list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 353531caf665SNaoya Horiguchi spin_unlock(&hugetlb_lock); 353631caf665SNaoya Horiguchi put_page(page); 353731caf665SNaoya Horiguchi } 3538c8721bbbSNaoya Horiguchi 3539c8721bbbSNaoya Horiguchi bool is_hugepage_active(struct page *page) 3540c8721bbbSNaoya Horiguchi { 3541c8721bbbSNaoya Horiguchi VM_BUG_ON(!PageHuge(page)); 3542c8721bbbSNaoya Horiguchi /* 3543c8721bbbSNaoya Horiguchi * This function can be called for a tail page because the caller, 3544c8721bbbSNaoya Horiguchi * scan_movable_pages, scans through a given pfn-range which typically 3545c8721bbbSNaoya Horiguchi * covers one memory block. In systems using gigantic hugepage (1GB 3546c8721bbbSNaoya Horiguchi * for x86_64,) a hugepage is larger than a memory block, and we don't 3547c8721bbbSNaoya Horiguchi * support migrating such large hugepages for now, so return false 3548c8721bbbSNaoya Horiguchi * when called for tail pages. 3549c8721bbbSNaoya Horiguchi */ 3550c8721bbbSNaoya Horiguchi if (PageTail(page)) 3551c8721bbbSNaoya Horiguchi return false; 3552c8721bbbSNaoya Horiguchi /* 3553c8721bbbSNaoya Horiguchi * Refcount of a hwpoisoned hugepages is 1, but they are not active, 3554c8721bbbSNaoya Horiguchi * so we should return false for them. 3555c8721bbbSNaoya Horiguchi */ 3556c8721bbbSNaoya Horiguchi if (unlikely(PageHWPoison(page))) 3557c8721bbbSNaoya Horiguchi return false; 3558c8721bbbSNaoya Horiguchi return page_count(page) > 0; 3559c8721bbbSNaoya Horiguchi } 3560