11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 36d49e352SNadia Yvette Chambers * (C) Nadia Yvette Chambers, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/list.h> 61da177e4SLinus Torvalds #include <linux/init.h> 71da177e4SLinus Torvalds #include <linux/module.h> 81da177e4SLinus Torvalds #include <linux/mm.h> 9e1759c21SAlexey Dobriyan #include <linux/seq_file.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 12cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 131da177e4SLinus Torvalds #include <linux/nodemask.h> 1463551ae0SDavid Gibson #include <linux/pagemap.h> 155da7ca86SChristoph Lameter #include <linux/mempolicy.h> 16aea47ff3SChristoph Lameter #include <linux/cpuset.h> 173935baa9SDavid Gibson #include <linux/mutex.h> 18aa888a74SAndi Kleen #include <linux/bootmem.h> 19a3437870SNishanth Aravamudan #include <linux/sysfs.h> 205a0e3ad6STejun Heo #include <linux/slab.h> 210fe6e20bSNaoya Horiguchi #include <linux/rmap.h> 22fd6a03edSNaoya Horiguchi #include <linux/swap.h> 23fd6a03edSNaoya Horiguchi #include <linux/swapops.h> 24d6606683SLinus Torvalds 2563551ae0SDavid Gibson #include <asm/page.h> 2663551ae0SDavid Gibson #include <asm/pgtable.h> 2724669e58SAneesh Kumar K.V #include <asm/tlb.h> 2863551ae0SDavid Gibson 2924669e58SAneesh Kumar K.V #include <linux/io.h> 3063551ae0SDavid Gibson #include <linux/hugetlb.h> 319dd540e2SAneesh Kumar K.V #include <linux/hugetlb_cgroup.h> 329a305230SLee Schermerhorn #include <linux/node.h> 337835e98bSNick Piggin #include "internal.h" 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 36396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 37396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 38a5516438SAndi Kleen 39c3f38a38SAneesh Kumar K.V int hugetlb_max_hstate __read_mostly; 40e5ff2159SAndi Kleen unsigned int default_hstate_idx; 41e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE]; 42e5ff2159SAndi Kleen 4353ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages); 4453ba51d2SJon Tollefson 45e5ff2159SAndi Kleen /* for command line parsing */ 46e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate; 47e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages; 48e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size; 49e5ff2159SAndi Kleen 503935baa9SDavid Gibson /* 513935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 523935baa9SDavid Gibson */ 53c3f38a38SAneesh Kumar K.V DEFINE_SPINLOCK(hugetlb_lock); 540bd0f9fbSEric Paris 5590481622SDavid Gibson static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) 5690481622SDavid Gibson { 5790481622SDavid Gibson bool free = (spool->count == 0) && (spool->used_hpages == 0); 5890481622SDavid Gibson 5990481622SDavid Gibson spin_unlock(&spool->lock); 6090481622SDavid Gibson 6190481622SDavid Gibson /* If no pages are used, and no other handles to the subpool 6290481622SDavid Gibson * remain, free the subpool the subpool remain */ 6390481622SDavid Gibson if (free) 6490481622SDavid Gibson kfree(spool); 6590481622SDavid Gibson } 6690481622SDavid Gibson 6790481622SDavid Gibson struct hugepage_subpool *hugepage_new_subpool(long nr_blocks) 6890481622SDavid Gibson { 6990481622SDavid Gibson struct hugepage_subpool *spool; 7090481622SDavid Gibson 7190481622SDavid Gibson spool = kmalloc(sizeof(*spool), GFP_KERNEL); 7290481622SDavid Gibson if (!spool) 7390481622SDavid Gibson return NULL; 7490481622SDavid Gibson 7590481622SDavid Gibson spin_lock_init(&spool->lock); 7690481622SDavid Gibson spool->count = 1; 7790481622SDavid Gibson spool->max_hpages = nr_blocks; 7890481622SDavid Gibson spool->used_hpages = 0; 7990481622SDavid Gibson 8090481622SDavid Gibson return spool; 8190481622SDavid Gibson } 8290481622SDavid Gibson 8390481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool) 8490481622SDavid Gibson { 8590481622SDavid Gibson spin_lock(&spool->lock); 8690481622SDavid Gibson BUG_ON(!spool->count); 8790481622SDavid Gibson spool->count--; 8890481622SDavid Gibson unlock_or_release_subpool(spool); 8990481622SDavid Gibson } 9090481622SDavid Gibson 9190481622SDavid Gibson static int hugepage_subpool_get_pages(struct hugepage_subpool *spool, 9290481622SDavid Gibson long delta) 9390481622SDavid Gibson { 9490481622SDavid Gibson int ret = 0; 9590481622SDavid Gibson 9690481622SDavid Gibson if (!spool) 9790481622SDavid Gibson return 0; 9890481622SDavid Gibson 9990481622SDavid Gibson spin_lock(&spool->lock); 10090481622SDavid Gibson if ((spool->used_hpages + delta) <= spool->max_hpages) { 10190481622SDavid Gibson spool->used_hpages += delta; 10290481622SDavid Gibson } else { 10390481622SDavid Gibson ret = -ENOMEM; 10490481622SDavid Gibson } 10590481622SDavid Gibson spin_unlock(&spool->lock); 10690481622SDavid Gibson 10790481622SDavid Gibson return ret; 10890481622SDavid Gibson } 10990481622SDavid Gibson 11090481622SDavid Gibson static void hugepage_subpool_put_pages(struct hugepage_subpool *spool, 11190481622SDavid Gibson long delta) 11290481622SDavid Gibson { 11390481622SDavid Gibson if (!spool) 11490481622SDavid Gibson return; 11590481622SDavid Gibson 11690481622SDavid Gibson spin_lock(&spool->lock); 11790481622SDavid Gibson spool->used_hpages -= delta; 11890481622SDavid Gibson /* If hugetlbfs_put_super couldn't free spool due to 11990481622SDavid Gibson * an outstanding quota reference, free it now. */ 12090481622SDavid Gibson unlock_or_release_subpool(spool); 12190481622SDavid Gibson } 12290481622SDavid Gibson 12390481622SDavid Gibson static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 12490481622SDavid Gibson { 12590481622SDavid Gibson return HUGETLBFS_SB(inode->i_sb)->spool; 12690481622SDavid Gibson } 12790481622SDavid Gibson 12890481622SDavid Gibson static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 12990481622SDavid Gibson { 130496ad9aaSAl Viro return subpool_inode(file_inode(vma->vm_file)); 13190481622SDavid Gibson } 13290481622SDavid Gibson 133e7c4b0bfSAndy Whitcroft /* 13496822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 13596822904SAndy Whitcroft * across the pages in a mapping. 13684afd99bSAndy Whitcroft * 13784afd99bSAndy Whitcroft * The region data structures are protected by a combination of the mmap_sem 138c748c262SJoonsoo Kim * and the hugetlb_instantiation_mutex. To access or modify a region the caller 13984afd99bSAndy Whitcroft * must either hold the mmap_sem for write, or the mmap_sem for read and 140c748c262SJoonsoo Kim * the hugetlb_instantiation_mutex: 14184afd99bSAndy Whitcroft * 14284afd99bSAndy Whitcroft * down_write(&mm->mmap_sem); 14384afd99bSAndy Whitcroft * or 14484afd99bSAndy Whitcroft * down_read(&mm->mmap_sem); 14584afd99bSAndy Whitcroft * mutex_lock(&hugetlb_instantiation_mutex); 14696822904SAndy Whitcroft */ 14796822904SAndy Whitcroft struct file_region { 14896822904SAndy Whitcroft struct list_head link; 14996822904SAndy Whitcroft long from; 15096822904SAndy Whitcroft long to; 15196822904SAndy Whitcroft }; 15296822904SAndy Whitcroft 15396822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t) 15496822904SAndy Whitcroft { 15596822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 15696822904SAndy Whitcroft 15796822904SAndy Whitcroft /* Locate the region we are either in or before. */ 15896822904SAndy Whitcroft list_for_each_entry(rg, head, link) 15996822904SAndy Whitcroft if (f <= rg->to) 16096822904SAndy Whitcroft break; 16196822904SAndy Whitcroft 16296822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 16396822904SAndy Whitcroft if (f > rg->from) 16496822904SAndy Whitcroft f = rg->from; 16596822904SAndy Whitcroft 16696822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 16796822904SAndy Whitcroft nrg = rg; 16896822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 16996822904SAndy Whitcroft if (&rg->link == head) 17096822904SAndy Whitcroft break; 17196822904SAndy Whitcroft if (rg->from > t) 17296822904SAndy Whitcroft break; 17396822904SAndy Whitcroft 17496822904SAndy Whitcroft /* If this area reaches higher then extend our area to 17596822904SAndy Whitcroft * include it completely. If this is not the first area 17696822904SAndy Whitcroft * which we intend to reuse, free it. */ 17796822904SAndy Whitcroft if (rg->to > t) 17896822904SAndy Whitcroft t = rg->to; 17996822904SAndy Whitcroft if (rg != nrg) { 18096822904SAndy Whitcroft list_del(&rg->link); 18196822904SAndy Whitcroft kfree(rg); 18296822904SAndy Whitcroft } 18396822904SAndy Whitcroft } 18496822904SAndy Whitcroft nrg->from = f; 18596822904SAndy Whitcroft nrg->to = t; 18696822904SAndy Whitcroft return 0; 18796822904SAndy Whitcroft } 18896822904SAndy Whitcroft 18996822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t) 19096822904SAndy Whitcroft { 19196822904SAndy Whitcroft struct file_region *rg, *nrg; 19296822904SAndy Whitcroft long chg = 0; 19396822904SAndy Whitcroft 19496822904SAndy Whitcroft /* Locate the region we are before or in. */ 19596822904SAndy Whitcroft list_for_each_entry(rg, head, link) 19696822904SAndy Whitcroft if (f <= rg->to) 19796822904SAndy Whitcroft break; 19896822904SAndy Whitcroft 19996822904SAndy Whitcroft /* If we are below the current region then a new region is required. 20096822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 20196822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 20296822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 20396822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 20496822904SAndy Whitcroft if (!nrg) 20596822904SAndy Whitcroft return -ENOMEM; 20696822904SAndy Whitcroft nrg->from = f; 20796822904SAndy Whitcroft nrg->to = f; 20896822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 20996822904SAndy Whitcroft list_add(&nrg->link, rg->link.prev); 21096822904SAndy Whitcroft 21196822904SAndy Whitcroft return t - f; 21296822904SAndy Whitcroft } 21396822904SAndy Whitcroft 21496822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 21596822904SAndy Whitcroft if (f > rg->from) 21696822904SAndy Whitcroft f = rg->from; 21796822904SAndy Whitcroft chg = t - f; 21896822904SAndy Whitcroft 21996822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 22096822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 22196822904SAndy Whitcroft if (&rg->link == head) 22296822904SAndy Whitcroft break; 22396822904SAndy Whitcroft if (rg->from > t) 22496822904SAndy Whitcroft return chg; 22596822904SAndy Whitcroft 22625985edcSLucas De Marchi /* We overlap with this area, if it extends further than 22796822904SAndy Whitcroft * us then we must extend ourselves. Account for its 22896822904SAndy Whitcroft * existing reservation. */ 22996822904SAndy Whitcroft if (rg->to > t) { 23096822904SAndy Whitcroft chg += rg->to - t; 23196822904SAndy Whitcroft t = rg->to; 23296822904SAndy Whitcroft } 23396822904SAndy Whitcroft chg -= rg->to - rg->from; 23496822904SAndy Whitcroft } 23596822904SAndy Whitcroft return chg; 23696822904SAndy Whitcroft } 23796822904SAndy Whitcroft 23896822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end) 23996822904SAndy Whitcroft { 24096822904SAndy Whitcroft struct file_region *rg, *trg; 24196822904SAndy Whitcroft long chg = 0; 24296822904SAndy Whitcroft 24396822904SAndy Whitcroft /* Locate the region we are either in or before. */ 24496822904SAndy Whitcroft list_for_each_entry(rg, head, link) 24596822904SAndy Whitcroft if (end <= rg->to) 24696822904SAndy Whitcroft break; 24796822904SAndy Whitcroft if (&rg->link == head) 24896822904SAndy Whitcroft return 0; 24996822904SAndy Whitcroft 25096822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 25196822904SAndy Whitcroft if (end > rg->from) { 25296822904SAndy Whitcroft chg = rg->to - end; 25396822904SAndy Whitcroft rg->to = end; 25496822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 25596822904SAndy Whitcroft } 25696822904SAndy Whitcroft 25796822904SAndy Whitcroft /* Drop any remaining regions. */ 25896822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 25996822904SAndy Whitcroft if (&rg->link == head) 26096822904SAndy Whitcroft break; 26196822904SAndy Whitcroft chg += rg->to - rg->from; 26296822904SAndy Whitcroft list_del(&rg->link); 26396822904SAndy Whitcroft kfree(rg); 26496822904SAndy Whitcroft } 26596822904SAndy Whitcroft return chg; 26696822904SAndy Whitcroft } 26796822904SAndy Whitcroft 26884afd99bSAndy Whitcroft static long region_count(struct list_head *head, long f, long t) 26984afd99bSAndy Whitcroft { 27084afd99bSAndy Whitcroft struct file_region *rg; 27184afd99bSAndy Whitcroft long chg = 0; 27284afd99bSAndy Whitcroft 27384afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */ 27484afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) { 275f2135a4aSWang Sheng-Hui long seg_from; 276f2135a4aSWang Sheng-Hui long seg_to; 27784afd99bSAndy Whitcroft 27884afd99bSAndy Whitcroft if (rg->to <= f) 27984afd99bSAndy Whitcroft continue; 28084afd99bSAndy Whitcroft if (rg->from >= t) 28184afd99bSAndy Whitcroft break; 28284afd99bSAndy Whitcroft 28384afd99bSAndy Whitcroft seg_from = max(rg->from, f); 28484afd99bSAndy Whitcroft seg_to = min(rg->to, t); 28584afd99bSAndy Whitcroft 28684afd99bSAndy Whitcroft chg += seg_to - seg_from; 28784afd99bSAndy Whitcroft } 28884afd99bSAndy Whitcroft 28984afd99bSAndy Whitcroft return chg; 29084afd99bSAndy Whitcroft } 29184afd99bSAndy Whitcroft 29296822904SAndy Whitcroft /* 293e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 294e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 295e7c4b0bfSAndy Whitcroft */ 296a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h, 297a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 298e7c4b0bfSAndy Whitcroft { 299a5516438SAndi Kleen return ((address - vma->vm_start) >> huge_page_shift(h)) + 300a5516438SAndi Kleen (vma->vm_pgoff >> huge_page_order(h)); 301e7c4b0bfSAndy Whitcroft } 302e7c4b0bfSAndy Whitcroft 3030fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 3040fe6e20bSNaoya Horiguchi unsigned long address) 3050fe6e20bSNaoya Horiguchi { 3060fe6e20bSNaoya Horiguchi return vma_hugecache_offset(hstate_vma(vma), vma, address); 3070fe6e20bSNaoya Horiguchi } 3080fe6e20bSNaoya Horiguchi 30984afd99bSAndy Whitcroft /* 31008fba699SMel Gorman * Return the size of the pages allocated when backing a VMA. In the majority 31108fba699SMel Gorman * cases this will be same size as used by the page table entries. 31208fba699SMel Gorman */ 31308fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 31408fba699SMel Gorman { 31508fba699SMel Gorman struct hstate *hstate; 31608fba699SMel Gorman 31708fba699SMel Gorman if (!is_vm_hugetlb_page(vma)) 31808fba699SMel Gorman return PAGE_SIZE; 31908fba699SMel Gorman 32008fba699SMel Gorman hstate = hstate_vma(vma); 32108fba699SMel Gorman 3222415cf12SWanpeng Li return 1UL << huge_page_shift(hstate); 32308fba699SMel Gorman } 324f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 32508fba699SMel Gorman 32608fba699SMel Gorman /* 3273340289dSMel Gorman * Return the page size being used by the MMU to back a VMA. In the majority 3283340289dSMel Gorman * of cases, the page size used by the kernel matches the MMU size. On 3293340289dSMel Gorman * architectures where it differs, an architecture-specific version of this 3303340289dSMel Gorman * function is required. 3313340289dSMel Gorman */ 3323340289dSMel Gorman #ifndef vma_mmu_pagesize 3333340289dSMel Gorman unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 3343340289dSMel Gorman { 3353340289dSMel Gorman return vma_kernel_pagesize(vma); 3363340289dSMel Gorman } 3373340289dSMel Gorman #endif 3383340289dSMel Gorman 3393340289dSMel Gorman /* 34084afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom 34184afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to 34284afd99bSAndy Whitcroft * alignment. 34384afd99bSAndy Whitcroft */ 34484afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0) 34584afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1) 34604f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 34784afd99bSAndy Whitcroft 348a1e78772SMel Gorman /* 349a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 350a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 351a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 352a1e78772SMel Gorman * 353a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 354a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 355a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 356a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 35784afd99bSAndy Whitcroft * 35884afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different 35984afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated 36084afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file 36184afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even 36284afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map 36384afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which 36484afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed 36584afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated. 366a1e78772SMel Gorman */ 367e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 368e7c4b0bfSAndy Whitcroft { 369e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 370e7c4b0bfSAndy Whitcroft } 371e7c4b0bfSAndy Whitcroft 372e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 373e7c4b0bfSAndy Whitcroft unsigned long value) 374e7c4b0bfSAndy Whitcroft { 375e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 376e7c4b0bfSAndy Whitcroft } 377e7c4b0bfSAndy Whitcroft 37884afd99bSAndy Whitcroft struct resv_map { 37984afd99bSAndy Whitcroft struct kref refs; 38084afd99bSAndy Whitcroft struct list_head regions; 38184afd99bSAndy Whitcroft }; 38284afd99bSAndy Whitcroft 3832a4b3dedSHarvey Harrison static struct resv_map *resv_map_alloc(void) 38484afd99bSAndy Whitcroft { 38584afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 38684afd99bSAndy Whitcroft if (!resv_map) 38784afd99bSAndy Whitcroft return NULL; 38884afd99bSAndy Whitcroft 38984afd99bSAndy Whitcroft kref_init(&resv_map->refs); 39084afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions); 39184afd99bSAndy Whitcroft 39284afd99bSAndy Whitcroft return resv_map; 39384afd99bSAndy Whitcroft } 39484afd99bSAndy Whitcroft 3952a4b3dedSHarvey Harrison static void resv_map_release(struct kref *ref) 39684afd99bSAndy Whitcroft { 39784afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 39884afd99bSAndy Whitcroft 39984afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */ 40084afd99bSAndy Whitcroft region_truncate(&resv_map->regions, 0); 40184afd99bSAndy Whitcroft kfree(resv_map); 40284afd99bSAndy Whitcroft } 40384afd99bSAndy Whitcroft 40484afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 405a1e78772SMel Gorman { 406a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 407f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 40884afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) & 40984afd99bSAndy Whitcroft ~HPAGE_RESV_MASK); 4102a4b3dedSHarvey Harrison return NULL; 411a1e78772SMel Gorman } 412a1e78772SMel Gorman 41384afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 414a1e78772SMel Gorman { 415a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 416f83a275dSMel Gorman VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 417a1e78772SMel Gorman 41884afd99bSAndy Whitcroft set_vma_private_data(vma, (get_vma_private_data(vma) & 41984afd99bSAndy Whitcroft HPAGE_RESV_MASK) | (unsigned long)map); 42004f2cbe3SMel Gorman } 42104f2cbe3SMel Gorman 42204f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 42304f2cbe3SMel Gorman { 42404f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 425f83a275dSMel Gorman VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 426e7c4b0bfSAndy Whitcroft 427e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 42804f2cbe3SMel Gorman } 42904f2cbe3SMel Gorman 43004f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 43104f2cbe3SMel Gorman { 43204f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 433e7c4b0bfSAndy Whitcroft 434e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 435a1e78772SMel Gorman } 436a1e78772SMel Gorman 43704f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 438a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 439a1e78772SMel Gorman { 440a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 441f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 442a1e78772SMel Gorman vma->vm_private_data = (void *)0; 443a1e78772SMel Gorman } 444a1e78772SMel Gorman 445a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 446af0ed73eSJoonsoo Kim static int vma_has_reserves(struct vm_area_struct *vma, long chg) 447a1e78772SMel Gorman { 448af0ed73eSJoonsoo Kim if (vma->vm_flags & VM_NORESERVE) { 449af0ed73eSJoonsoo Kim /* 450af0ed73eSJoonsoo Kim * This address is already reserved by other process(chg == 0), 451af0ed73eSJoonsoo Kim * so, we should decrement reserved count. Without decrementing, 452af0ed73eSJoonsoo Kim * reserve count remains after releasing inode, because this 453af0ed73eSJoonsoo Kim * allocated page will go into page cache and is regarded as 454af0ed73eSJoonsoo Kim * coming from reserved pool in releasing step. Currently, we 455af0ed73eSJoonsoo Kim * don't have any other solution to deal with this situation 456af0ed73eSJoonsoo Kim * properly, so add work-around here. 457af0ed73eSJoonsoo Kim */ 458af0ed73eSJoonsoo Kim if (vma->vm_flags & VM_MAYSHARE && chg == 0) 459af0ed73eSJoonsoo Kim return 1; 460af0ed73eSJoonsoo Kim else 46172231b03SJoonsoo Kim return 0; 462af0ed73eSJoonsoo Kim } 463a63884e9SJoonsoo Kim 464a63884e9SJoonsoo Kim /* Shared mappings always use reserves */ 465f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) 466a1e78772SMel Gorman return 1; 467a63884e9SJoonsoo Kim 468a63884e9SJoonsoo Kim /* 469a63884e9SJoonsoo Kim * Only the process that called mmap() has reserves for 470a63884e9SJoonsoo Kim * private mappings. 471a63884e9SJoonsoo Kim */ 4727f09ca51SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4737f09ca51SMel Gorman return 1; 474a63884e9SJoonsoo Kim 4757f09ca51SMel Gorman return 0; 476a1e78772SMel Gorman } 477a1e78772SMel Gorman 4780ebabb41SNaoya Horiguchi static void copy_gigantic_page(struct page *dst, struct page *src) 4790ebabb41SNaoya Horiguchi { 4800ebabb41SNaoya Horiguchi int i; 4810ebabb41SNaoya Horiguchi struct hstate *h = page_hstate(src); 4820ebabb41SNaoya Horiguchi struct page *dst_base = dst; 4830ebabb41SNaoya Horiguchi struct page *src_base = src; 4840ebabb41SNaoya Horiguchi 4850ebabb41SNaoya Horiguchi for (i = 0; i < pages_per_huge_page(h); ) { 4860ebabb41SNaoya Horiguchi cond_resched(); 4870ebabb41SNaoya Horiguchi copy_highpage(dst, src); 4880ebabb41SNaoya Horiguchi 4890ebabb41SNaoya Horiguchi i++; 4900ebabb41SNaoya Horiguchi dst = mem_map_next(dst, dst_base, i); 4910ebabb41SNaoya Horiguchi src = mem_map_next(src, src_base, i); 4920ebabb41SNaoya Horiguchi } 4930ebabb41SNaoya Horiguchi } 4940ebabb41SNaoya Horiguchi 4950ebabb41SNaoya Horiguchi void copy_huge_page(struct page *dst, struct page *src) 4960ebabb41SNaoya Horiguchi { 4970ebabb41SNaoya Horiguchi int i; 4980ebabb41SNaoya Horiguchi struct hstate *h = page_hstate(src); 4990ebabb41SNaoya Horiguchi 5000ebabb41SNaoya Horiguchi if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { 5010ebabb41SNaoya Horiguchi copy_gigantic_page(dst, src); 5020ebabb41SNaoya Horiguchi return; 5030ebabb41SNaoya Horiguchi } 5040ebabb41SNaoya Horiguchi 5050ebabb41SNaoya Horiguchi might_sleep(); 5060ebabb41SNaoya Horiguchi for (i = 0; i < pages_per_huge_page(h); i++) { 5070ebabb41SNaoya Horiguchi cond_resched(); 5080ebabb41SNaoya Horiguchi copy_highpage(dst + i, src + i); 5090ebabb41SNaoya Horiguchi } 5100ebabb41SNaoya Horiguchi } 5110ebabb41SNaoya Horiguchi 512a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page) 5131da177e4SLinus Torvalds { 5141da177e4SLinus Torvalds int nid = page_to_nid(page); 5150edaecfaSAneesh Kumar K.V list_move(&page->lru, &h->hugepage_freelists[nid]); 516a5516438SAndi Kleen h->free_huge_pages++; 517a5516438SAndi Kleen h->free_huge_pages_node[nid]++; 5181da177e4SLinus Torvalds } 5191da177e4SLinus Torvalds 520bf50bab2SNaoya Horiguchi static struct page *dequeue_huge_page_node(struct hstate *h, int nid) 521bf50bab2SNaoya Horiguchi { 522bf50bab2SNaoya Horiguchi struct page *page; 523bf50bab2SNaoya Horiguchi 524bf50bab2SNaoya Horiguchi if (list_empty(&h->hugepage_freelists[nid])) 525bf50bab2SNaoya Horiguchi return NULL; 526bf50bab2SNaoya Horiguchi page = list_entry(h->hugepage_freelists[nid].next, struct page, lru); 5270edaecfaSAneesh Kumar K.V list_move(&page->lru, &h->hugepage_activelist); 528a9869b83SNaoya Horiguchi set_page_refcounted(page); 529bf50bab2SNaoya Horiguchi h->free_huge_pages--; 530bf50bab2SNaoya Horiguchi h->free_huge_pages_node[nid]--; 531bf50bab2SNaoya Horiguchi return page; 532bf50bab2SNaoya Horiguchi } 533bf50bab2SNaoya Horiguchi 534a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h, 535a5516438SAndi Kleen struct vm_area_struct *vma, 536af0ed73eSJoonsoo Kim unsigned long address, int avoid_reserve, 537af0ed73eSJoonsoo Kim long chg) 5381da177e4SLinus Torvalds { 539b1c12cbcSKonstantin Khlebnikov struct page *page = NULL; 540480eccf9SLee Schermerhorn struct mempolicy *mpol; 54119770b32SMel Gorman nodemask_t *nodemask; 542c0ff7453SMiao Xie struct zonelist *zonelist; 543dd1a239fSMel Gorman struct zone *zone; 544dd1a239fSMel Gorman struct zoneref *z; 545cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 5461da177e4SLinus Torvalds 547a1e78772SMel Gorman /* 548a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 549a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 550a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 551a1e78772SMel Gorman */ 552af0ed73eSJoonsoo Kim if (!vma_has_reserves(vma, chg) && 553a5516438SAndi Kleen h->free_huge_pages - h->resv_huge_pages == 0) 554c0ff7453SMiao Xie goto err; 555a1e78772SMel Gorman 55604f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 557a5516438SAndi Kleen if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 5586eab04a8SJustin P. Mattock goto err; 55904f2cbe3SMel Gorman 5609966c4bbSJoonsoo Kim retry_cpuset: 5619966c4bbSJoonsoo Kim cpuset_mems_cookie = get_mems_allowed(); 5629966c4bbSJoonsoo Kim zonelist = huge_zonelist(vma, address, 5639966c4bbSJoonsoo Kim htlb_alloc_mask, &mpol, &nodemask); 5649966c4bbSJoonsoo Kim 56519770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 56619770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 567bf50bab2SNaoya Horiguchi if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) { 568bf50bab2SNaoya Horiguchi page = dequeue_huge_page_node(h, zone_to_nid(zone)); 569bf50bab2SNaoya Horiguchi if (page) { 570af0ed73eSJoonsoo Kim if (avoid_reserve) 571af0ed73eSJoonsoo Kim break; 572af0ed73eSJoonsoo Kim if (!vma_has_reserves(vma, chg)) 573af0ed73eSJoonsoo Kim break; 574af0ed73eSJoonsoo Kim 575a63884e9SJoonsoo Kim h->resv_huge_pages--; 5765ab3ee7bSKen Chen break; 5771da177e4SLinus Torvalds } 5783abf7afdSAndrew Morton } 579bf50bab2SNaoya Horiguchi } 580cc9a6c87SMel Gorman 581cc9a6c87SMel Gorman mpol_cond_put(mpol); 582cc9a6c87SMel Gorman if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 583cc9a6c87SMel Gorman goto retry_cpuset; 584cc9a6c87SMel Gorman return page; 585cc9a6c87SMel Gorman 586c0ff7453SMiao Xie err: 587cc9a6c87SMel Gorman return NULL; 5881da177e4SLinus Torvalds } 5891da177e4SLinus Torvalds 590a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page) 5916af2acb6SAdam Litke { 5926af2acb6SAdam Litke int i; 593a5516438SAndi Kleen 59418229df5SAndy Whitcroft VM_BUG_ON(h->order >= MAX_ORDER); 59518229df5SAndy Whitcroft 596a5516438SAndi Kleen h->nr_huge_pages--; 597a5516438SAndi Kleen h->nr_huge_pages_node[page_to_nid(page)]--; 598a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 59932f84528SChris Forbes page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 60032f84528SChris Forbes 1 << PG_referenced | 1 << PG_dirty | 60132f84528SChris Forbes 1 << PG_active | 1 << PG_reserved | 6026af2acb6SAdam Litke 1 << PG_private | 1 << PG_writeback); 6036af2acb6SAdam Litke } 6049dd540e2SAneesh Kumar K.V VM_BUG_ON(hugetlb_cgroup_from_page(page)); 6056af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 6066af2acb6SAdam Litke set_page_refcounted(page); 6077f2e9525SGerald Schaefer arch_release_hugepage(page); 608a5516438SAndi Kleen __free_pages(page, huge_page_order(h)); 6096af2acb6SAdam Litke } 6106af2acb6SAdam Litke 611e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size) 612e5ff2159SAndi Kleen { 613e5ff2159SAndi Kleen struct hstate *h; 614e5ff2159SAndi Kleen 615e5ff2159SAndi Kleen for_each_hstate(h) { 616e5ff2159SAndi Kleen if (huge_page_size(h) == size) 617e5ff2159SAndi Kleen return h; 618e5ff2159SAndi Kleen } 619e5ff2159SAndi Kleen return NULL; 620e5ff2159SAndi Kleen } 621e5ff2159SAndi Kleen 62227a85ef1SDavid Gibson static void free_huge_page(struct page *page) 62327a85ef1SDavid Gibson { 624a5516438SAndi Kleen /* 625a5516438SAndi Kleen * Can't pass hstate in here because it is called from the 626a5516438SAndi Kleen * compound page destructor. 627a5516438SAndi Kleen */ 628e5ff2159SAndi Kleen struct hstate *h = page_hstate(page); 6297893d1d5SAdam Litke int nid = page_to_nid(page); 63090481622SDavid Gibson struct hugepage_subpool *spool = 63190481622SDavid Gibson (struct hugepage_subpool *)page_private(page); 63227a85ef1SDavid Gibson 633e5df70abSAndy Whitcroft set_page_private(page, 0); 63423be7468SMel Gorman page->mapping = NULL; 6357893d1d5SAdam Litke BUG_ON(page_count(page)); 6360fe6e20bSNaoya Horiguchi BUG_ON(page_mapcount(page)); 63727a85ef1SDavid Gibson 63827a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 6396d76dcf4SAneesh Kumar K.V hugetlb_cgroup_uncharge_page(hstate_index(h), 6406d76dcf4SAneesh Kumar K.V pages_per_huge_page(h), page); 641aa888a74SAndi Kleen if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { 6420edaecfaSAneesh Kumar K.V /* remove the page from active list */ 6430edaecfaSAneesh Kumar K.V list_del(&page->lru); 644a5516438SAndi Kleen update_and_free_page(h, page); 645a5516438SAndi Kleen h->surplus_huge_pages--; 646a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 6477893d1d5SAdam Litke } else { 6485d3a551cSWill Deacon arch_clear_hugepage_flags(page); 649a5516438SAndi Kleen enqueue_huge_page(h, page); 6507893d1d5SAdam Litke } 65127a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 65290481622SDavid Gibson hugepage_subpool_put_pages(spool, 1); 65327a85ef1SDavid Gibson } 65427a85ef1SDavid Gibson 655a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 656b7ba30c6SAndi Kleen { 6570edaecfaSAneesh Kumar K.V INIT_LIST_HEAD(&page->lru); 658b7ba30c6SAndi Kleen set_compound_page_dtor(page, free_huge_page); 659b7ba30c6SAndi Kleen spin_lock(&hugetlb_lock); 6609dd540e2SAneesh Kumar K.V set_hugetlb_cgroup(page, NULL); 661a5516438SAndi Kleen h->nr_huge_pages++; 662a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 663b7ba30c6SAndi Kleen spin_unlock(&hugetlb_lock); 664b7ba30c6SAndi Kleen put_page(page); /* free it into the hugepage allocator */ 665b7ba30c6SAndi Kleen } 666b7ba30c6SAndi Kleen 66720a0307cSWu Fengguang static void prep_compound_gigantic_page(struct page *page, unsigned long order) 66820a0307cSWu Fengguang { 66920a0307cSWu Fengguang int i; 67020a0307cSWu Fengguang int nr_pages = 1 << order; 67120a0307cSWu Fengguang struct page *p = page + 1; 67220a0307cSWu Fengguang 67320a0307cSWu Fengguang /* we rely on prep_new_huge_page to set the destructor */ 67420a0307cSWu Fengguang set_compound_order(page, order); 67520a0307cSWu Fengguang __SetPageHead(page); 67620a0307cSWu Fengguang for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 67720a0307cSWu Fengguang __SetPageTail(p); 67858a84aa9SYouquan Song set_page_count(p, 0); 67920a0307cSWu Fengguang p->first_page = page; 68020a0307cSWu Fengguang } 68120a0307cSWu Fengguang } 68220a0307cSWu Fengguang 6837795912cSAndrew Morton /* 6847795912cSAndrew Morton * PageHuge() only returns true for hugetlbfs pages, but not for normal or 6857795912cSAndrew Morton * transparent huge pages. See the PageTransHuge() documentation for more 6867795912cSAndrew Morton * details. 6877795912cSAndrew Morton */ 68820a0307cSWu Fengguang int PageHuge(struct page *page) 68920a0307cSWu Fengguang { 69020a0307cSWu Fengguang compound_page_dtor *dtor; 69120a0307cSWu Fengguang 69220a0307cSWu Fengguang if (!PageCompound(page)) 69320a0307cSWu Fengguang return 0; 69420a0307cSWu Fengguang 69520a0307cSWu Fengguang page = compound_head(page); 69620a0307cSWu Fengguang dtor = get_compound_page_dtor(page); 69720a0307cSWu Fengguang 69820a0307cSWu Fengguang return dtor == free_huge_page; 69920a0307cSWu Fengguang } 70043131e14SNaoya Horiguchi EXPORT_SYMBOL_GPL(PageHuge); 70143131e14SNaoya Horiguchi 70213d60f4bSZhang Yi pgoff_t __basepage_index(struct page *page) 70313d60f4bSZhang Yi { 70413d60f4bSZhang Yi struct page *page_head = compound_head(page); 70513d60f4bSZhang Yi pgoff_t index = page_index(page_head); 70613d60f4bSZhang Yi unsigned long compound_idx; 70713d60f4bSZhang Yi 70813d60f4bSZhang Yi if (!PageHuge(page_head)) 70913d60f4bSZhang Yi return page_index(page); 71013d60f4bSZhang Yi 71113d60f4bSZhang Yi if (compound_order(page_head) >= MAX_ORDER) 71213d60f4bSZhang Yi compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 71313d60f4bSZhang Yi else 71413d60f4bSZhang Yi compound_idx = page - page_head; 71513d60f4bSZhang Yi 71613d60f4bSZhang Yi return (index << compound_order(page_head)) + compound_idx; 71713d60f4bSZhang Yi } 71813d60f4bSZhang Yi 719a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 7201da177e4SLinus Torvalds { 7211da177e4SLinus Torvalds struct page *page; 722f96efd58SJoe Jin 723aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 724aa888a74SAndi Kleen return NULL; 725aa888a74SAndi Kleen 7266484eb3eSMel Gorman page = alloc_pages_exact_node(nid, 727551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 728551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 729a5516438SAndi Kleen huge_page_order(h)); 7301da177e4SLinus Torvalds if (page) { 7317f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 732caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 7337b8ee84dSHarvey Harrison return NULL; 7347f2e9525SGerald Schaefer } 735a5516438SAndi Kleen prep_new_huge_page(h, page, nid); 7361da177e4SLinus Torvalds } 73763b4613cSNishanth Aravamudan 73863b4613cSNishanth Aravamudan return page; 73963b4613cSNishanth Aravamudan } 74063b4613cSNishanth Aravamudan 7415ced66c9SAndi Kleen /* 7426ae11b27SLee Schermerhorn * common helper functions for hstate_next_node_to_{alloc|free}. 7436ae11b27SLee Schermerhorn * We may have allocated or freed a huge page based on a different 7446ae11b27SLee Schermerhorn * nodes_allowed previously, so h->next_node_to_{alloc|free} might 7456ae11b27SLee Schermerhorn * be outside of *nodes_allowed. Ensure that we use an allowed 7466ae11b27SLee Schermerhorn * node for alloc or free. 7479a76db09SLee Schermerhorn */ 7486ae11b27SLee Schermerhorn static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 7499a76db09SLee Schermerhorn { 7506ae11b27SLee Schermerhorn nid = next_node(nid, *nodes_allowed); 7519a76db09SLee Schermerhorn if (nid == MAX_NUMNODES) 7526ae11b27SLee Schermerhorn nid = first_node(*nodes_allowed); 7539a76db09SLee Schermerhorn VM_BUG_ON(nid >= MAX_NUMNODES); 7549a76db09SLee Schermerhorn 7559a76db09SLee Schermerhorn return nid; 7569a76db09SLee Schermerhorn } 7579a76db09SLee Schermerhorn 7586ae11b27SLee Schermerhorn static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 7595ced66c9SAndi Kleen { 7606ae11b27SLee Schermerhorn if (!node_isset(nid, *nodes_allowed)) 7616ae11b27SLee Schermerhorn nid = next_node_allowed(nid, nodes_allowed); 7629a76db09SLee Schermerhorn return nid; 7635ced66c9SAndi Kleen } 7645ced66c9SAndi Kleen 7656ae11b27SLee Schermerhorn /* 7666ae11b27SLee Schermerhorn * returns the previously saved node ["this node"] from which to 7676ae11b27SLee Schermerhorn * allocate a persistent huge page for the pool and advance the 7686ae11b27SLee Schermerhorn * next node from which to allocate, handling wrap at end of node 7696ae11b27SLee Schermerhorn * mask. 7706ae11b27SLee Schermerhorn */ 7716ae11b27SLee Schermerhorn static int hstate_next_node_to_alloc(struct hstate *h, 7726ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 7736ae11b27SLee Schermerhorn { 7746ae11b27SLee Schermerhorn int nid; 7756ae11b27SLee Schermerhorn 7766ae11b27SLee Schermerhorn VM_BUG_ON(!nodes_allowed); 7776ae11b27SLee Schermerhorn 7786ae11b27SLee Schermerhorn nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 7796ae11b27SLee Schermerhorn h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 7806ae11b27SLee Schermerhorn 7816ae11b27SLee Schermerhorn return nid; 7826ae11b27SLee Schermerhorn } 7836ae11b27SLee Schermerhorn 784e8c5c824SLee Schermerhorn /* 7856ae11b27SLee Schermerhorn * helper for free_pool_huge_page() - return the previously saved 7866ae11b27SLee Schermerhorn * node ["this node"] from which to free a huge page. Advance the 7876ae11b27SLee Schermerhorn * next node id whether or not we find a free huge page to free so 7886ae11b27SLee Schermerhorn * that the next attempt to free addresses the next node. 789e8c5c824SLee Schermerhorn */ 7906ae11b27SLee Schermerhorn static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 791e8c5c824SLee Schermerhorn { 7926ae11b27SLee Schermerhorn int nid; 7939a76db09SLee Schermerhorn 7946ae11b27SLee Schermerhorn VM_BUG_ON(!nodes_allowed); 7956ae11b27SLee Schermerhorn 7966ae11b27SLee Schermerhorn nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 7976ae11b27SLee Schermerhorn h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 7986ae11b27SLee Schermerhorn 7999a76db09SLee Schermerhorn return nid; 800e8c5c824SLee Schermerhorn } 801e8c5c824SLee Schermerhorn 802b2261026SJoonsoo Kim #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 803b2261026SJoonsoo Kim for (nr_nodes = nodes_weight(*mask); \ 804b2261026SJoonsoo Kim nr_nodes > 0 && \ 805b2261026SJoonsoo Kim ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 806b2261026SJoonsoo Kim nr_nodes--) 807b2261026SJoonsoo Kim 808b2261026SJoonsoo Kim #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 809b2261026SJoonsoo Kim for (nr_nodes = nodes_weight(*mask); \ 810b2261026SJoonsoo Kim nr_nodes > 0 && \ 811b2261026SJoonsoo Kim ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 812b2261026SJoonsoo Kim nr_nodes--) 813b2261026SJoonsoo Kim 814b2261026SJoonsoo Kim static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 815b2261026SJoonsoo Kim { 816b2261026SJoonsoo Kim struct page *page; 817b2261026SJoonsoo Kim int nr_nodes, node; 818b2261026SJoonsoo Kim int ret = 0; 819b2261026SJoonsoo Kim 820b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 821b2261026SJoonsoo Kim page = alloc_fresh_huge_page_node(h, node); 822b2261026SJoonsoo Kim if (page) { 823b2261026SJoonsoo Kim ret = 1; 824b2261026SJoonsoo Kim break; 825b2261026SJoonsoo Kim } 826b2261026SJoonsoo Kim } 827b2261026SJoonsoo Kim 828b2261026SJoonsoo Kim if (ret) 829b2261026SJoonsoo Kim count_vm_event(HTLB_BUDDY_PGALLOC); 830b2261026SJoonsoo Kim else 831b2261026SJoonsoo Kim count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 832b2261026SJoonsoo Kim 833b2261026SJoonsoo Kim return ret; 834b2261026SJoonsoo Kim } 835b2261026SJoonsoo Kim 836e8c5c824SLee Schermerhorn /* 837e8c5c824SLee Schermerhorn * Free huge page from pool from next node to free. 838e8c5c824SLee Schermerhorn * Attempt to keep persistent huge pages more or less 839e8c5c824SLee Schermerhorn * balanced over allowed nodes. 840e8c5c824SLee Schermerhorn * Called with hugetlb_lock locked. 841e8c5c824SLee Schermerhorn */ 8426ae11b27SLee Schermerhorn static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 8436ae11b27SLee Schermerhorn bool acct_surplus) 844e8c5c824SLee Schermerhorn { 845b2261026SJoonsoo Kim int nr_nodes, node; 846e8c5c824SLee Schermerhorn int ret = 0; 847e8c5c824SLee Schermerhorn 848b2261026SJoonsoo Kim for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 849685f3457SLee Schermerhorn /* 850685f3457SLee Schermerhorn * If we're returning unused surplus pages, only examine 851685f3457SLee Schermerhorn * nodes with surplus pages. 852685f3457SLee Schermerhorn */ 853b2261026SJoonsoo Kim if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 854b2261026SJoonsoo Kim !list_empty(&h->hugepage_freelists[node])) { 855e8c5c824SLee Schermerhorn struct page *page = 856b2261026SJoonsoo Kim list_entry(h->hugepage_freelists[node].next, 857e8c5c824SLee Schermerhorn struct page, lru); 858e8c5c824SLee Schermerhorn list_del(&page->lru); 859e8c5c824SLee Schermerhorn h->free_huge_pages--; 860b2261026SJoonsoo Kim h->free_huge_pages_node[node]--; 861685f3457SLee Schermerhorn if (acct_surplus) { 862685f3457SLee Schermerhorn h->surplus_huge_pages--; 863b2261026SJoonsoo Kim h->surplus_huge_pages_node[node]--; 864685f3457SLee Schermerhorn } 865e8c5c824SLee Schermerhorn update_and_free_page(h, page); 866e8c5c824SLee Schermerhorn ret = 1; 8679a76db09SLee Schermerhorn break; 868e8c5c824SLee Schermerhorn } 869b2261026SJoonsoo Kim } 870e8c5c824SLee Schermerhorn 871e8c5c824SLee Schermerhorn return ret; 872e8c5c824SLee Schermerhorn } 873e8c5c824SLee Schermerhorn 874bf50bab2SNaoya Horiguchi static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) 8757893d1d5SAdam Litke { 8767893d1d5SAdam Litke struct page *page; 877bf50bab2SNaoya Horiguchi unsigned int r_nid; 8787893d1d5SAdam Litke 879aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 880aa888a74SAndi Kleen return NULL; 881aa888a74SAndi Kleen 882d1c3fb1fSNishanth Aravamudan /* 883d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 884d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 885d1c3fb1fSNishanth Aravamudan * overcommit 886d1c3fb1fSNishanth Aravamudan * 887d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 888d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 889d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 890d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 891d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 892d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 893d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 894d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 895d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 896d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 897d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 898d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 899d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 900d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 901d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 902d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 903d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 904d1c3fb1fSNishanth Aravamudan */ 905d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 906a5516438SAndi Kleen if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 907d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 908d1c3fb1fSNishanth Aravamudan return NULL; 909d1c3fb1fSNishanth Aravamudan } else { 910a5516438SAndi Kleen h->nr_huge_pages++; 911a5516438SAndi Kleen h->surplus_huge_pages++; 912d1c3fb1fSNishanth Aravamudan } 913d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 914d1c3fb1fSNishanth Aravamudan 915bf50bab2SNaoya Horiguchi if (nid == NUMA_NO_NODE) 916551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 917551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 918a5516438SAndi Kleen huge_page_order(h)); 919bf50bab2SNaoya Horiguchi else 920bf50bab2SNaoya Horiguchi page = alloc_pages_exact_node(nid, 921bf50bab2SNaoya Horiguchi htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 922bf50bab2SNaoya Horiguchi __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); 923d1c3fb1fSNishanth Aravamudan 924caff3a2cSGerald Schaefer if (page && arch_prepare_hugepage(page)) { 925caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 926ea5768c7SHillf Danton page = NULL; 927caff3a2cSGerald Schaefer } 928caff3a2cSGerald Schaefer 9297893d1d5SAdam Litke spin_lock(&hugetlb_lock); 930d1c3fb1fSNishanth Aravamudan if (page) { 9310edaecfaSAneesh Kumar K.V INIT_LIST_HEAD(&page->lru); 932bf50bab2SNaoya Horiguchi r_nid = page_to_nid(page); 933d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 9349dd540e2SAneesh Kumar K.V set_hugetlb_cgroup(page, NULL); 935d1c3fb1fSNishanth Aravamudan /* 936d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 937d1c3fb1fSNishanth Aravamudan */ 938bf50bab2SNaoya Horiguchi h->nr_huge_pages_node[r_nid]++; 939bf50bab2SNaoya Horiguchi h->surplus_huge_pages_node[r_nid]++; 9403b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 941d1c3fb1fSNishanth Aravamudan } else { 942a5516438SAndi Kleen h->nr_huge_pages--; 943a5516438SAndi Kleen h->surplus_huge_pages--; 9443b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 9457893d1d5SAdam Litke } 946d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 9477893d1d5SAdam Litke 9487893d1d5SAdam Litke return page; 9497893d1d5SAdam Litke } 9507893d1d5SAdam Litke 951e4e574b7SAdam Litke /* 952bf50bab2SNaoya Horiguchi * This allocation function is useful in the context where vma is irrelevant. 953bf50bab2SNaoya Horiguchi * E.g. soft-offlining uses this function because it only cares physical 954bf50bab2SNaoya Horiguchi * address of error page. 955bf50bab2SNaoya Horiguchi */ 956bf50bab2SNaoya Horiguchi struct page *alloc_huge_page_node(struct hstate *h, int nid) 957bf50bab2SNaoya Horiguchi { 9584ef91848SJoonsoo Kim struct page *page = NULL; 959bf50bab2SNaoya Horiguchi 960bf50bab2SNaoya Horiguchi spin_lock(&hugetlb_lock); 9614ef91848SJoonsoo Kim if (h->free_huge_pages - h->resv_huge_pages > 0) 962bf50bab2SNaoya Horiguchi page = dequeue_huge_page_node(h, nid); 963bf50bab2SNaoya Horiguchi spin_unlock(&hugetlb_lock); 964bf50bab2SNaoya Horiguchi 96594ae8ba7SAneesh Kumar K.V if (!page) 966bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, nid); 967bf50bab2SNaoya Horiguchi 968bf50bab2SNaoya Horiguchi return page; 969bf50bab2SNaoya Horiguchi } 970bf50bab2SNaoya Horiguchi 971bf50bab2SNaoya Horiguchi /* 97225985edcSLucas De Marchi * Increase the hugetlb pool such that it can accommodate a reservation 973e4e574b7SAdam Litke * of size 'delta'. 974e4e574b7SAdam Litke */ 975a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta) 976e4e574b7SAdam Litke { 977e4e574b7SAdam Litke struct list_head surplus_list; 978e4e574b7SAdam Litke struct page *page, *tmp; 979e4e574b7SAdam Litke int ret, i; 980e4e574b7SAdam Litke int needed, allocated; 98128073b02SHillf Danton bool alloc_ok = true; 982e4e574b7SAdam Litke 983a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 984ac09b3a1SAdam Litke if (needed <= 0) { 985a5516438SAndi Kleen h->resv_huge_pages += delta; 986e4e574b7SAdam Litke return 0; 987ac09b3a1SAdam Litke } 988e4e574b7SAdam Litke 989e4e574b7SAdam Litke allocated = 0; 990e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 991e4e574b7SAdam Litke 992e4e574b7SAdam Litke ret = -ENOMEM; 993e4e574b7SAdam Litke retry: 994e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 995e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 996bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 99728073b02SHillf Danton if (!page) { 99828073b02SHillf Danton alloc_ok = false; 99928073b02SHillf Danton break; 100028073b02SHillf Danton } 1001e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 1002e4e574b7SAdam Litke } 100328073b02SHillf Danton allocated += i; 1004e4e574b7SAdam Litke 1005e4e574b7SAdam Litke /* 1006e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 1007e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 1008e4e574b7SAdam Litke */ 1009e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 1010a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - 1011a5516438SAndi Kleen (h->free_huge_pages + allocated); 101228073b02SHillf Danton if (needed > 0) { 101328073b02SHillf Danton if (alloc_ok) 1014e4e574b7SAdam Litke goto retry; 101528073b02SHillf Danton /* 101628073b02SHillf Danton * We were not able to allocate enough pages to 101728073b02SHillf Danton * satisfy the entire reservation so we free what 101828073b02SHillf Danton * we've allocated so far. 101928073b02SHillf Danton */ 102028073b02SHillf Danton goto free; 102128073b02SHillf Danton } 1022e4e574b7SAdam Litke /* 1023e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 102425985edcSLucas De Marchi * needed to accommodate the reservation. Add the appropriate number 1025e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 1026ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 1027ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 1028ac09b3a1SAdam Litke * before they are reserved. 1029e4e574b7SAdam Litke */ 1030e4e574b7SAdam Litke needed += allocated; 1031a5516438SAndi Kleen h->resv_huge_pages += delta; 1032e4e574b7SAdam Litke ret = 0; 1033a9869b83SNaoya Horiguchi 103419fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 103519fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 103619fc3f0aSAdam Litke if ((--needed) < 0) 103719fc3f0aSAdam Litke break; 1038a9869b83SNaoya Horiguchi /* 1039a9869b83SNaoya Horiguchi * This page is now managed by the hugetlb allocator and has 1040a9869b83SNaoya Horiguchi * no users -- drop the buddy allocator's reference. 1041a9869b83SNaoya Horiguchi */ 1042a9869b83SNaoya Horiguchi put_page_testzero(page); 1043a9869b83SNaoya Horiguchi VM_BUG_ON(page_count(page)); 1044a5516438SAndi Kleen enqueue_huge_page(h, page); 104519fc3f0aSAdam Litke } 104628073b02SHillf Danton free: 1047b0365c8dSHillf Danton spin_unlock(&hugetlb_lock); 104819fc3f0aSAdam Litke 104919fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 1050c0d934baSJoonsoo Kim list_for_each_entry_safe(page, tmp, &surplus_list, lru) 1051a9869b83SNaoya Horiguchi put_page(page); 105219fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 1053e4e574b7SAdam Litke 1054e4e574b7SAdam Litke return ret; 1055e4e574b7SAdam Litke } 1056e4e574b7SAdam Litke 1057e4e574b7SAdam Litke /* 1058e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 1059e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 1060e4e574b7SAdam Litke * never used. 1061685f3457SLee Schermerhorn * Called with hugetlb_lock held. 1062e4e574b7SAdam Litke */ 1063a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h, 1064a5516438SAndi Kleen unsigned long unused_resv_pages) 1065e4e574b7SAdam Litke { 1066e4e574b7SAdam Litke unsigned long nr_pages; 1067e4e574b7SAdam Litke 1068ac09b3a1SAdam Litke /* Uncommit the reservation */ 1069a5516438SAndi Kleen h->resv_huge_pages -= unused_resv_pages; 1070ac09b3a1SAdam Litke 1071aa888a74SAndi Kleen /* Cannot return gigantic pages currently */ 1072aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1073aa888a74SAndi Kleen return; 1074aa888a74SAndi Kleen 1075a5516438SAndi Kleen nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 1076e4e574b7SAdam Litke 1077685f3457SLee Schermerhorn /* 1078685f3457SLee Schermerhorn * We want to release as many surplus pages as possible, spread 10799b5e5d0fSLee Schermerhorn * evenly across all nodes with memory. Iterate across these nodes 10809b5e5d0fSLee Schermerhorn * until we can no longer free unreserved surplus pages. This occurs 10819b5e5d0fSLee Schermerhorn * when the nodes with surplus pages have no free pages. 10829b5e5d0fSLee Schermerhorn * free_pool_huge_page() will balance the the freed pages across the 10839b5e5d0fSLee Schermerhorn * on-line nodes with memory and will handle the hstate accounting. 1084685f3457SLee Schermerhorn */ 1085685f3457SLee Schermerhorn while (nr_pages--) { 10868cebfcd0SLai Jiangshan if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) 1087685f3457SLee Schermerhorn break; 1088e4e574b7SAdam Litke } 1089e4e574b7SAdam Litke } 1090e4e574b7SAdam Litke 1091c37f9fb1SAndy Whitcroft /* 1092c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 1093c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 109490481622SDavid Gibson * reservation and actually increase subpool usage before an allocation 109590481622SDavid Gibson * can occur. Where any new reservation would be required the 109690481622SDavid Gibson * reservation change is prepared, but not committed. Once the page 109790481622SDavid Gibson * has been allocated from the subpool and instantiated the change should 109890481622SDavid Gibson * be committed via vma_commit_reservation. No action is required on 109990481622SDavid Gibson * failure. 1100c37f9fb1SAndy Whitcroft */ 1101e2f17d94SRoel Kluin static long vma_needs_reservation(struct hstate *h, 1102a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 1103c37f9fb1SAndy Whitcroft { 1104c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 1105c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 1106c37f9fb1SAndy Whitcroft 1107f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 1108a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1109c37f9fb1SAndy Whitcroft return region_chg(&inode->i_mapping->private_list, 1110c37f9fb1SAndy Whitcroft idx, idx + 1); 1111c37f9fb1SAndy Whitcroft 111284afd99bSAndy Whitcroft } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1113c37f9fb1SAndy Whitcroft return 1; 1114c37f9fb1SAndy Whitcroft 111584afd99bSAndy Whitcroft } else { 1116e2f17d94SRoel Kluin long err; 1117a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1118f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 111984afd99bSAndy Whitcroft 1120f522c3acSJoonsoo Kim err = region_chg(&resv->regions, idx, idx + 1); 112184afd99bSAndy Whitcroft if (err < 0) 112284afd99bSAndy Whitcroft return err; 1123c37f9fb1SAndy Whitcroft return 0; 1124c37f9fb1SAndy Whitcroft } 112584afd99bSAndy Whitcroft } 1126a5516438SAndi Kleen static void vma_commit_reservation(struct hstate *h, 1127a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 1128c37f9fb1SAndy Whitcroft { 1129c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 1130c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 1131c37f9fb1SAndy Whitcroft 1132f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 1133a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1134c37f9fb1SAndy Whitcroft region_add(&inode->i_mapping->private_list, idx, idx + 1); 113584afd99bSAndy Whitcroft 113684afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1137a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1138f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 113984afd99bSAndy Whitcroft 114084afd99bSAndy Whitcroft /* Mark this page used in the map. */ 1141f522c3acSJoonsoo Kim region_add(&resv->regions, idx, idx + 1); 1142c37f9fb1SAndy Whitcroft } 1143c37f9fb1SAndy Whitcroft } 1144c37f9fb1SAndy Whitcroft 1145348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 114604f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 1147348ea204SAdam Litke { 114890481622SDavid Gibson struct hugepage_subpool *spool = subpool_vma(vma); 1149a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1150348ea204SAdam Litke struct page *page; 1151e2f17d94SRoel Kluin long chg; 11526d76dcf4SAneesh Kumar K.V int ret, idx; 11536d76dcf4SAneesh Kumar K.V struct hugetlb_cgroup *h_cg; 11542fc39cecSAdam Litke 11556d76dcf4SAneesh Kumar K.V idx = hstate_index(h); 1156a1e78772SMel Gorman /* 115790481622SDavid Gibson * Processes that did not create the mapping will have no 115890481622SDavid Gibson * reserves and will not have accounted against subpool 115990481622SDavid Gibson * limit. Check that the subpool limit can be made before 116090481622SDavid Gibson * satisfying the allocation MAP_NORESERVE mappings may also 116190481622SDavid Gibson * need pages and subpool limit allocated allocated if no reserve 116290481622SDavid Gibson * mapping overlaps. 1163a1e78772SMel Gorman */ 1164a5516438SAndi Kleen chg = vma_needs_reservation(h, vma, addr); 1165c37f9fb1SAndy Whitcroft if (chg < 0) 116676dcee75SAneesh Kumar K.V return ERR_PTR(-ENOMEM); 1167c37f9fb1SAndy Whitcroft if (chg) 116890481622SDavid Gibson if (hugepage_subpool_get_pages(spool, chg)) 116976dcee75SAneesh Kumar K.V return ERR_PTR(-ENOSPC); 117090d8b7e6SAdam Litke 11716d76dcf4SAneesh Kumar K.V ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 11726d76dcf4SAneesh Kumar K.V if (ret) { 11736d76dcf4SAneesh Kumar K.V hugepage_subpool_put_pages(spool, chg); 11746d76dcf4SAneesh Kumar K.V return ERR_PTR(-ENOSPC); 11756d76dcf4SAneesh Kumar K.V } 1176a1e78772SMel Gorman spin_lock(&hugetlb_lock); 1177af0ed73eSJoonsoo Kim page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); 117881a6fcaeSJoonsoo Kim if (!page) { 117994ae8ba7SAneesh Kumar K.V spin_unlock(&hugetlb_lock); 1180bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 1181a1e78772SMel Gorman if (!page) { 11826d76dcf4SAneesh Kumar K.V hugetlb_cgroup_uncharge_cgroup(idx, 11836d76dcf4SAneesh Kumar K.V pages_per_huge_page(h), 11846d76dcf4SAneesh Kumar K.V h_cg); 118590481622SDavid Gibson hugepage_subpool_put_pages(spool, chg); 118676dcee75SAneesh Kumar K.V return ERR_PTR(-ENOSPC); 1187a1e78772SMel Gorman } 118879dbb236SAneesh Kumar K.V spin_lock(&hugetlb_lock); 118979dbb236SAneesh Kumar K.V list_move(&page->lru, &h->hugepage_activelist); 119081a6fcaeSJoonsoo Kim /* Fall through */ 1191a1e78772SMel Gorman } 119281a6fcaeSJoonsoo Kim hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 119381a6fcaeSJoonsoo Kim spin_unlock(&hugetlb_lock); 1194a1e78772SMel Gorman 119590481622SDavid Gibson set_page_private(page, (unsigned long)spool); 1196a1e78772SMel Gorman 1197a5516438SAndi Kleen vma_commit_reservation(h, vma, addr); 11987893d1d5SAdam Litke return page; 1199b45b5bd6SDavid Gibson } 1200b45b5bd6SDavid Gibson 120191f47662SCyrill Gorcunov int __weak alloc_bootmem_huge_page(struct hstate *h) 1202aa888a74SAndi Kleen { 1203aa888a74SAndi Kleen struct huge_bootmem_page *m; 1204b2261026SJoonsoo Kim int nr_nodes, node; 1205aa888a74SAndi Kleen 1206b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 1207aa888a74SAndi Kleen void *addr; 1208aa888a74SAndi Kleen 1209b2261026SJoonsoo Kim addr = __alloc_bootmem_node_nopanic(NODE_DATA(node), 1210aa888a74SAndi Kleen huge_page_size(h), huge_page_size(h), 0); 1211aa888a74SAndi Kleen 1212aa888a74SAndi Kleen if (addr) { 1213aa888a74SAndi Kleen /* 1214aa888a74SAndi Kleen * Use the beginning of the huge page to store the 1215aa888a74SAndi Kleen * huge_bootmem_page struct (until gather_bootmem 1216aa888a74SAndi Kleen * puts them into the mem_map). 1217aa888a74SAndi Kleen */ 1218aa888a74SAndi Kleen m = addr; 1219aa888a74SAndi Kleen goto found; 1220aa888a74SAndi Kleen } 1221aa888a74SAndi Kleen } 1222aa888a74SAndi Kleen return 0; 1223aa888a74SAndi Kleen 1224aa888a74SAndi Kleen found: 1225aa888a74SAndi Kleen BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); 1226aa888a74SAndi Kleen /* Put them into a private list first because mem_map is not up yet */ 1227aa888a74SAndi Kleen list_add(&m->list, &huge_boot_pages); 1228aa888a74SAndi Kleen m->hstate = h; 1229aa888a74SAndi Kleen return 1; 1230aa888a74SAndi Kleen } 1231aa888a74SAndi Kleen 123218229df5SAndy Whitcroft static void prep_compound_huge_page(struct page *page, int order) 123318229df5SAndy Whitcroft { 123418229df5SAndy Whitcroft if (unlikely(order > (MAX_ORDER - 1))) 123518229df5SAndy Whitcroft prep_compound_gigantic_page(page, order); 123618229df5SAndy Whitcroft else 123718229df5SAndy Whitcroft prep_compound_page(page, order); 123818229df5SAndy Whitcroft } 123918229df5SAndy Whitcroft 1240aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */ 1241aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void) 1242aa888a74SAndi Kleen { 1243aa888a74SAndi Kleen struct huge_bootmem_page *m; 1244aa888a74SAndi Kleen 1245aa888a74SAndi Kleen list_for_each_entry(m, &huge_boot_pages, list) { 1246aa888a74SAndi Kleen struct hstate *h = m->hstate; 1247ee8f248dSBecky Bruce struct page *page; 1248ee8f248dSBecky Bruce 1249ee8f248dSBecky Bruce #ifdef CONFIG_HIGHMEM 1250ee8f248dSBecky Bruce page = pfn_to_page(m->phys >> PAGE_SHIFT); 1251ee8f248dSBecky Bruce free_bootmem_late((unsigned long)m, 1252ee8f248dSBecky Bruce sizeof(struct huge_bootmem_page)); 1253ee8f248dSBecky Bruce #else 1254ee8f248dSBecky Bruce page = virt_to_page(m); 1255ee8f248dSBecky Bruce #endif 1256aa888a74SAndi Kleen __ClearPageReserved(page); 1257aa888a74SAndi Kleen WARN_ON(page_count(page) != 1); 125818229df5SAndy Whitcroft prep_compound_huge_page(page, h->order); 1259aa888a74SAndi Kleen prep_new_huge_page(h, page, page_to_nid(page)); 1260b0320c7bSRafael Aquini /* 1261b0320c7bSRafael Aquini * If we had gigantic hugepages allocated at boot time, we need 1262b0320c7bSRafael Aquini * to restore the 'stolen' pages to totalram_pages in order to 1263b0320c7bSRafael Aquini * fix confusing memory reports from free(1) and another 1264b0320c7bSRafael Aquini * side-effects, like CommitLimit going negative. 1265b0320c7bSRafael Aquini */ 1266b0320c7bSRafael Aquini if (h->order > (MAX_ORDER - 1)) 12673dcc0571SJiang Liu adjust_managed_page_count(page, 1 << h->order); 1268aa888a74SAndi Kleen } 1269aa888a74SAndi Kleen } 1270aa888a74SAndi Kleen 12718faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 12721da177e4SLinus Torvalds { 12731da177e4SLinus Torvalds unsigned long i; 12741da177e4SLinus Torvalds 1275e5ff2159SAndi Kleen for (i = 0; i < h->max_huge_pages; ++i) { 1276aa888a74SAndi Kleen if (h->order >= MAX_ORDER) { 1277aa888a74SAndi Kleen if (!alloc_bootmem_huge_page(h)) 1278aa888a74SAndi Kleen break; 12799b5e5d0fSLee Schermerhorn } else if (!alloc_fresh_huge_page(h, 12808cebfcd0SLai Jiangshan &node_states[N_MEMORY])) 12811da177e4SLinus Torvalds break; 12821da177e4SLinus Torvalds } 12838faa8b07SAndi Kleen h->max_huge_pages = i; 1284e5ff2159SAndi Kleen } 1285e5ff2159SAndi Kleen 1286e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void) 1287e5ff2159SAndi Kleen { 1288e5ff2159SAndi Kleen struct hstate *h; 1289e5ff2159SAndi Kleen 1290e5ff2159SAndi Kleen for_each_hstate(h) { 12918faa8b07SAndi Kleen /* oversize hugepages were init'ed in early boot */ 12928faa8b07SAndi Kleen if (h->order < MAX_ORDER) 12938faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(h); 1294e5ff2159SAndi Kleen } 1295e5ff2159SAndi Kleen } 1296e5ff2159SAndi Kleen 12974abd32dbSAndi Kleen static char * __init memfmt(char *buf, unsigned long n) 12984abd32dbSAndi Kleen { 12994abd32dbSAndi Kleen if (n >= (1UL << 30)) 13004abd32dbSAndi Kleen sprintf(buf, "%lu GB", n >> 30); 13014abd32dbSAndi Kleen else if (n >= (1UL << 20)) 13024abd32dbSAndi Kleen sprintf(buf, "%lu MB", n >> 20); 13034abd32dbSAndi Kleen else 13044abd32dbSAndi Kleen sprintf(buf, "%lu KB", n >> 10); 13054abd32dbSAndi Kleen return buf; 13064abd32dbSAndi Kleen } 13074abd32dbSAndi Kleen 1308e5ff2159SAndi Kleen static void __init report_hugepages(void) 1309e5ff2159SAndi Kleen { 1310e5ff2159SAndi Kleen struct hstate *h; 1311e5ff2159SAndi Kleen 1312e5ff2159SAndi Kleen for_each_hstate(h) { 13134abd32dbSAndi Kleen char buf[32]; 1314ffb22af5SAndrew Morton pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", 13154abd32dbSAndi Kleen memfmt(buf, huge_page_size(h)), 13164abd32dbSAndi Kleen h->free_huge_pages); 1317e5ff2159SAndi Kleen } 1318e5ff2159SAndi Kleen } 1319e5ff2159SAndi Kleen 13201da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 13216ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count, 13226ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 13231da177e4SLinus Torvalds { 13244415cc8dSChristoph Lameter int i; 13254415cc8dSChristoph Lameter 1326aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1327aa888a74SAndi Kleen return; 1328aa888a74SAndi Kleen 13296ae11b27SLee Schermerhorn for_each_node_mask(i, *nodes_allowed) { 13301da177e4SLinus Torvalds struct page *page, *next; 1331a5516438SAndi Kleen struct list_head *freel = &h->hugepage_freelists[i]; 1332a5516438SAndi Kleen list_for_each_entry_safe(page, next, freel, lru) { 1333a5516438SAndi Kleen if (count >= h->nr_huge_pages) 13346b0c880dSAdam Litke return; 13351da177e4SLinus Torvalds if (PageHighMem(page)) 13361da177e4SLinus Torvalds continue; 13371da177e4SLinus Torvalds list_del(&page->lru); 1338e5ff2159SAndi Kleen update_and_free_page(h, page); 1339a5516438SAndi Kleen h->free_huge_pages--; 1340a5516438SAndi Kleen h->free_huge_pages_node[page_to_nid(page)]--; 13411da177e4SLinus Torvalds } 13421da177e4SLinus Torvalds } 13431da177e4SLinus Torvalds } 13441da177e4SLinus Torvalds #else 13456ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count, 13466ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 13471da177e4SLinus Torvalds { 13481da177e4SLinus Torvalds } 13491da177e4SLinus Torvalds #endif 13501da177e4SLinus Torvalds 135120a0307cSWu Fengguang /* 135220a0307cSWu Fengguang * Increment or decrement surplus_huge_pages. Keep node-specific counters 135320a0307cSWu Fengguang * balanced by operating on them in a round-robin fashion. 135420a0307cSWu Fengguang * Returns 1 if an adjustment was made. 135520a0307cSWu Fengguang */ 13566ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 13576ae11b27SLee Schermerhorn int delta) 135820a0307cSWu Fengguang { 1359b2261026SJoonsoo Kim int nr_nodes, node; 136020a0307cSWu Fengguang 136120a0307cSWu Fengguang VM_BUG_ON(delta != -1 && delta != 1); 136220a0307cSWu Fengguang 1363e8c5c824SLee Schermerhorn if (delta < 0) { 1364b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1365b2261026SJoonsoo Kim if (h->surplus_huge_pages_node[node]) 1366b2261026SJoonsoo Kim goto found; 1367b2261026SJoonsoo Kim } 1368b2261026SJoonsoo Kim } else { 1369b2261026SJoonsoo Kim for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 1370b2261026SJoonsoo Kim if (h->surplus_huge_pages_node[node] < 1371b2261026SJoonsoo Kim h->nr_huge_pages_node[node]) 1372b2261026SJoonsoo Kim goto found; 1373e8c5c824SLee Schermerhorn } 13749a76db09SLee Schermerhorn } 1375b2261026SJoonsoo Kim return 0; 137620a0307cSWu Fengguang 1377b2261026SJoonsoo Kim found: 137820a0307cSWu Fengguang h->surplus_huge_pages += delta; 1379b2261026SJoonsoo Kim h->surplus_huge_pages_node[node] += delta; 1380b2261026SJoonsoo Kim return 1; 138120a0307cSWu Fengguang } 138220a0307cSWu Fengguang 1383a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 13846ae11b27SLee Schermerhorn static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, 13856ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 13861da177e4SLinus Torvalds { 13877893d1d5SAdam Litke unsigned long min_count, ret; 13881da177e4SLinus Torvalds 1389aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1390aa888a74SAndi Kleen return h->max_huge_pages; 1391aa888a74SAndi Kleen 13927893d1d5SAdam Litke /* 13937893d1d5SAdam Litke * Increase the pool size 13947893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 13957893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 1396d1c3fb1fSNishanth Aravamudan * 1397d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 1398d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 1399d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 1400d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 1401d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 14027893d1d5SAdam Litke */ 14031da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 1404a5516438SAndi Kleen while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 14056ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, -1)) 14067893d1d5SAdam Litke break; 14077893d1d5SAdam Litke } 14087893d1d5SAdam Litke 1409a5516438SAndi Kleen while (count > persistent_huge_pages(h)) { 14107893d1d5SAdam Litke /* 14117893d1d5SAdam Litke * If this allocation races such that we no longer need the 14127893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 14137893d1d5SAdam Litke * and reducing the surplus. 14147893d1d5SAdam Litke */ 14157893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 14166ae11b27SLee Schermerhorn ret = alloc_fresh_huge_page(h, nodes_allowed); 14177893d1d5SAdam Litke spin_lock(&hugetlb_lock); 14187893d1d5SAdam Litke if (!ret) 14197893d1d5SAdam Litke goto out; 14207893d1d5SAdam Litke 1421536240f2SMel Gorman /* Bail for signals. Probably ctrl-c from user */ 1422536240f2SMel Gorman if (signal_pending(current)) 1423536240f2SMel Gorman goto out; 14247893d1d5SAdam Litke } 14257893d1d5SAdam Litke 14267893d1d5SAdam Litke /* 14277893d1d5SAdam Litke * Decrease the pool size 14287893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 14297893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 14307893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 14317893d1d5SAdam Litke * to the desired size as pages become free. 1432d1c3fb1fSNishanth Aravamudan * 1433d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 1434d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 1435d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 1436d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 1437d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 1438d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 1439d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 14407893d1d5SAdam Litke */ 1441a5516438SAndi Kleen min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 14426b0c880dSAdam Litke min_count = max(count, min_count); 14436ae11b27SLee Schermerhorn try_to_free_low(h, min_count, nodes_allowed); 1444a5516438SAndi Kleen while (min_count < persistent_huge_pages(h)) { 14456ae11b27SLee Schermerhorn if (!free_pool_huge_page(h, nodes_allowed, 0)) 14461da177e4SLinus Torvalds break; 14471da177e4SLinus Torvalds } 1448a5516438SAndi Kleen while (count < persistent_huge_pages(h)) { 14496ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, 1)) 14507893d1d5SAdam Litke break; 14517893d1d5SAdam Litke } 14527893d1d5SAdam Litke out: 1453a5516438SAndi Kleen ret = persistent_huge_pages(h); 14541da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 14557893d1d5SAdam Litke return ret; 14561da177e4SLinus Torvalds } 14571da177e4SLinus Torvalds 1458a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \ 1459a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 1460a3437870SNishanth Aravamudan 1461a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \ 1462a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = \ 1463a3437870SNishanth Aravamudan __ATTR(_name, 0644, _name##_show, _name##_store) 1464a3437870SNishanth Aravamudan 1465a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj; 1466a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1467a3437870SNishanth Aravamudan 14689a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 14699a305230SLee Schermerhorn 14709a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 1471a3437870SNishanth Aravamudan { 1472a3437870SNishanth Aravamudan int i; 14739a305230SLee Schermerhorn 1474a3437870SNishanth Aravamudan for (i = 0; i < HUGE_MAX_HSTATE; i++) 14759a305230SLee Schermerhorn if (hstate_kobjs[i] == kobj) { 14769a305230SLee Schermerhorn if (nidp) 14779a305230SLee Schermerhorn *nidp = NUMA_NO_NODE; 1478a3437870SNishanth Aravamudan return &hstates[i]; 14799a305230SLee Schermerhorn } 14809a305230SLee Schermerhorn 14819a305230SLee Schermerhorn return kobj_to_node_hstate(kobj, nidp); 1482a3437870SNishanth Aravamudan } 1483a3437870SNishanth Aravamudan 148406808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj, 1485a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1486a3437870SNishanth Aravamudan { 14879a305230SLee Schermerhorn struct hstate *h; 14889a305230SLee Schermerhorn unsigned long nr_huge_pages; 14899a305230SLee Schermerhorn int nid; 14909a305230SLee Schermerhorn 14919a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 14929a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 14939a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages; 14949a305230SLee Schermerhorn else 14959a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages_node[nid]; 14969a305230SLee Schermerhorn 14979a305230SLee Schermerhorn return sprintf(buf, "%lu\n", nr_huge_pages); 1498a3437870SNishanth Aravamudan } 1499adbe8726SEric B Munson 150006808b08SLee Schermerhorn static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 150106808b08SLee Schermerhorn struct kobject *kobj, struct kobj_attribute *attr, 150206808b08SLee Schermerhorn const char *buf, size_t len) 1503a3437870SNishanth Aravamudan { 1504a3437870SNishanth Aravamudan int err; 15059a305230SLee Schermerhorn int nid; 150606808b08SLee Schermerhorn unsigned long count; 15079a305230SLee Schermerhorn struct hstate *h; 1508bad44b5bSDavid Rientjes NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); 1509a3437870SNishanth Aravamudan 15103dbb95f7SJingoo Han err = kstrtoul(buf, 10, &count); 151173ae31e5SEric B Munson if (err) 1512adbe8726SEric B Munson goto out; 1513a3437870SNishanth Aravamudan 15149a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 1515adbe8726SEric B Munson if (h->order >= MAX_ORDER) { 1516adbe8726SEric B Munson err = -EINVAL; 1517adbe8726SEric B Munson goto out; 1518adbe8726SEric B Munson } 1519adbe8726SEric B Munson 15209a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) { 15219a305230SLee Schermerhorn /* 15229a305230SLee Schermerhorn * global hstate attribute 15239a305230SLee Schermerhorn */ 15249a305230SLee Schermerhorn if (!(obey_mempolicy && 15259a305230SLee Schermerhorn init_nodemask_of_mempolicy(nodes_allowed))) { 152606808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 15278cebfcd0SLai Jiangshan nodes_allowed = &node_states[N_MEMORY]; 152806808b08SLee Schermerhorn } 15299a305230SLee Schermerhorn } else if (nodes_allowed) { 15309a305230SLee Schermerhorn /* 15319a305230SLee Schermerhorn * per node hstate attribute: adjust count to global, 15329a305230SLee Schermerhorn * but restrict alloc/free to the specified node. 15339a305230SLee Schermerhorn */ 15349a305230SLee Schermerhorn count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 15359a305230SLee Schermerhorn init_nodemask_of_node(nodes_allowed, nid); 15369a305230SLee Schermerhorn } else 15378cebfcd0SLai Jiangshan nodes_allowed = &node_states[N_MEMORY]; 15389a305230SLee Schermerhorn 153906808b08SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); 1540a3437870SNishanth Aravamudan 15418cebfcd0SLai Jiangshan if (nodes_allowed != &node_states[N_MEMORY]) 154206808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 154306808b08SLee Schermerhorn 154406808b08SLee Schermerhorn return len; 1545adbe8726SEric B Munson out: 1546adbe8726SEric B Munson NODEMASK_FREE(nodes_allowed); 1547adbe8726SEric B Munson return err; 154806808b08SLee Schermerhorn } 154906808b08SLee Schermerhorn 155006808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj, 155106808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf) 155206808b08SLee Schermerhorn { 155306808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf); 155406808b08SLee Schermerhorn } 155506808b08SLee Schermerhorn 155606808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj, 155706808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len) 155806808b08SLee Schermerhorn { 155906808b08SLee Schermerhorn return nr_hugepages_store_common(false, kobj, attr, buf, len); 1560a3437870SNishanth Aravamudan } 1561a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages); 1562a3437870SNishanth Aravamudan 156306808b08SLee Schermerhorn #ifdef CONFIG_NUMA 156406808b08SLee Schermerhorn 156506808b08SLee Schermerhorn /* 156606808b08SLee Schermerhorn * hstate attribute for optionally mempolicy-based constraint on persistent 156706808b08SLee Schermerhorn * huge page alloc/free. 156806808b08SLee Schermerhorn */ 156906808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 157006808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf) 157106808b08SLee Schermerhorn { 157206808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf); 157306808b08SLee Schermerhorn } 157406808b08SLee Schermerhorn 157506808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 157606808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len) 157706808b08SLee Schermerhorn { 157806808b08SLee Schermerhorn return nr_hugepages_store_common(true, kobj, attr, buf, len); 157906808b08SLee Schermerhorn } 158006808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy); 158106808b08SLee Schermerhorn #endif 158206808b08SLee Schermerhorn 158306808b08SLee Schermerhorn 1584a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 1585a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1586a3437870SNishanth Aravamudan { 15879a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1588a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 1589a3437870SNishanth Aravamudan } 1590adbe8726SEric B Munson 1591a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 1592a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1593a3437870SNishanth Aravamudan { 1594a3437870SNishanth Aravamudan int err; 1595a3437870SNishanth Aravamudan unsigned long input; 15969a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1597a3437870SNishanth Aravamudan 1598adbe8726SEric B Munson if (h->order >= MAX_ORDER) 1599adbe8726SEric B Munson return -EINVAL; 1600adbe8726SEric B Munson 16013dbb95f7SJingoo Han err = kstrtoul(buf, 10, &input); 1602a3437870SNishanth Aravamudan if (err) 160373ae31e5SEric B Munson return err; 1604a3437870SNishanth Aravamudan 1605a3437870SNishanth Aravamudan spin_lock(&hugetlb_lock); 1606a3437870SNishanth Aravamudan h->nr_overcommit_huge_pages = input; 1607a3437870SNishanth Aravamudan spin_unlock(&hugetlb_lock); 1608a3437870SNishanth Aravamudan 1609a3437870SNishanth Aravamudan return count; 1610a3437870SNishanth Aravamudan } 1611a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages); 1612a3437870SNishanth Aravamudan 1613a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj, 1614a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1615a3437870SNishanth Aravamudan { 16169a305230SLee Schermerhorn struct hstate *h; 16179a305230SLee Schermerhorn unsigned long free_huge_pages; 16189a305230SLee Schermerhorn int nid; 16199a305230SLee Schermerhorn 16209a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 16219a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 16229a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages; 16239a305230SLee Schermerhorn else 16249a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages_node[nid]; 16259a305230SLee Schermerhorn 16269a305230SLee Schermerhorn return sprintf(buf, "%lu\n", free_huge_pages); 1627a3437870SNishanth Aravamudan } 1628a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages); 1629a3437870SNishanth Aravamudan 1630a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj, 1631a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1632a3437870SNishanth Aravamudan { 16339a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1634a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->resv_huge_pages); 1635a3437870SNishanth Aravamudan } 1636a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages); 1637a3437870SNishanth Aravamudan 1638a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj, 1639a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1640a3437870SNishanth Aravamudan { 16419a305230SLee Schermerhorn struct hstate *h; 16429a305230SLee Schermerhorn unsigned long surplus_huge_pages; 16439a305230SLee Schermerhorn int nid; 16449a305230SLee Schermerhorn 16459a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 16469a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 16479a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages; 16489a305230SLee Schermerhorn else 16499a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages_node[nid]; 16509a305230SLee Schermerhorn 16519a305230SLee Schermerhorn return sprintf(buf, "%lu\n", surplus_huge_pages); 1652a3437870SNishanth Aravamudan } 1653a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages); 1654a3437870SNishanth Aravamudan 1655a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = { 1656a3437870SNishanth Aravamudan &nr_hugepages_attr.attr, 1657a3437870SNishanth Aravamudan &nr_overcommit_hugepages_attr.attr, 1658a3437870SNishanth Aravamudan &free_hugepages_attr.attr, 1659a3437870SNishanth Aravamudan &resv_hugepages_attr.attr, 1660a3437870SNishanth Aravamudan &surplus_hugepages_attr.attr, 166106808b08SLee Schermerhorn #ifdef CONFIG_NUMA 166206808b08SLee Schermerhorn &nr_hugepages_mempolicy_attr.attr, 166306808b08SLee Schermerhorn #endif 1664a3437870SNishanth Aravamudan NULL, 1665a3437870SNishanth Aravamudan }; 1666a3437870SNishanth Aravamudan 1667a3437870SNishanth Aravamudan static struct attribute_group hstate_attr_group = { 1668a3437870SNishanth Aravamudan .attrs = hstate_attrs, 1669a3437870SNishanth Aravamudan }; 1670a3437870SNishanth Aravamudan 1671094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 16729a305230SLee Schermerhorn struct kobject **hstate_kobjs, 16739a305230SLee Schermerhorn struct attribute_group *hstate_attr_group) 1674a3437870SNishanth Aravamudan { 1675a3437870SNishanth Aravamudan int retval; 1676972dc4deSAneesh Kumar K.V int hi = hstate_index(h); 1677a3437870SNishanth Aravamudan 16789a305230SLee Schermerhorn hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 16799a305230SLee Schermerhorn if (!hstate_kobjs[hi]) 1680a3437870SNishanth Aravamudan return -ENOMEM; 1681a3437870SNishanth Aravamudan 16829a305230SLee Schermerhorn retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 1683a3437870SNishanth Aravamudan if (retval) 16849a305230SLee Schermerhorn kobject_put(hstate_kobjs[hi]); 1685a3437870SNishanth Aravamudan 1686a3437870SNishanth Aravamudan return retval; 1687a3437870SNishanth Aravamudan } 1688a3437870SNishanth Aravamudan 1689a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void) 1690a3437870SNishanth Aravamudan { 1691a3437870SNishanth Aravamudan struct hstate *h; 1692a3437870SNishanth Aravamudan int err; 1693a3437870SNishanth Aravamudan 1694a3437870SNishanth Aravamudan hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 1695a3437870SNishanth Aravamudan if (!hugepages_kobj) 1696a3437870SNishanth Aravamudan return; 1697a3437870SNishanth Aravamudan 1698a3437870SNishanth Aravamudan for_each_hstate(h) { 16999a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 17009a305230SLee Schermerhorn hstate_kobjs, &hstate_attr_group); 1701a3437870SNishanth Aravamudan if (err) 1702ffb22af5SAndrew Morton pr_err("Hugetlb: Unable to add hstate %s", h->name); 1703a3437870SNishanth Aravamudan } 1704a3437870SNishanth Aravamudan } 1705a3437870SNishanth Aravamudan 17069a305230SLee Schermerhorn #ifdef CONFIG_NUMA 17079a305230SLee Schermerhorn 17089a305230SLee Schermerhorn /* 17099a305230SLee Schermerhorn * node_hstate/s - associate per node hstate attributes, via their kobjects, 171010fbcf4cSKay Sievers * with node devices in node_devices[] using a parallel array. The array 171110fbcf4cSKay Sievers * index of a node device or _hstate == node id. 171210fbcf4cSKay Sievers * This is here to avoid any static dependency of the node device driver, in 17139a305230SLee Schermerhorn * the base kernel, on the hugetlb module. 17149a305230SLee Schermerhorn */ 17159a305230SLee Schermerhorn struct node_hstate { 17169a305230SLee Schermerhorn struct kobject *hugepages_kobj; 17179a305230SLee Schermerhorn struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 17189a305230SLee Schermerhorn }; 17199a305230SLee Schermerhorn struct node_hstate node_hstates[MAX_NUMNODES]; 17209a305230SLee Schermerhorn 17219a305230SLee Schermerhorn /* 172210fbcf4cSKay Sievers * A subset of global hstate attributes for node devices 17239a305230SLee Schermerhorn */ 17249a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = { 17259a305230SLee Schermerhorn &nr_hugepages_attr.attr, 17269a305230SLee Schermerhorn &free_hugepages_attr.attr, 17279a305230SLee Schermerhorn &surplus_hugepages_attr.attr, 17289a305230SLee Schermerhorn NULL, 17299a305230SLee Schermerhorn }; 17309a305230SLee Schermerhorn 17319a305230SLee Schermerhorn static struct attribute_group per_node_hstate_attr_group = { 17329a305230SLee Schermerhorn .attrs = per_node_hstate_attrs, 17339a305230SLee Schermerhorn }; 17349a305230SLee Schermerhorn 17359a305230SLee Schermerhorn /* 173610fbcf4cSKay Sievers * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 17379a305230SLee Schermerhorn * Returns node id via non-NULL nidp. 17389a305230SLee Schermerhorn */ 17399a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 17409a305230SLee Schermerhorn { 17419a305230SLee Schermerhorn int nid; 17429a305230SLee Schermerhorn 17439a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) { 17449a305230SLee Schermerhorn struct node_hstate *nhs = &node_hstates[nid]; 17459a305230SLee Schermerhorn int i; 17469a305230SLee Schermerhorn for (i = 0; i < HUGE_MAX_HSTATE; i++) 17479a305230SLee Schermerhorn if (nhs->hstate_kobjs[i] == kobj) { 17489a305230SLee Schermerhorn if (nidp) 17499a305230SLee Schermerhorn *nidp = nid; 17509a305230SLee Schermerhorn return &hstates[i]; 17519a305230SLee Schermerhorn } 17529a305230SLee Schermerhorn } 17539a305230SLee Schermerhorn 17549a305230SLee Schermerhorn BUG(); 17559a305230SLee Schermerhorn return NULL; 17569a305230SLee Schermerhorn } 17579a305230SLee Schermerhorn 17589a305230SLee Schermerhorn /* 175910fbcf4cSKay Sievers * Unregister hstate attributes from a single node device. 17609a305230SLee Schermerhorn * No-op if no hstate attributes attached. 17619a305230SLee Schermerhorn */ 17623cd8b44fSClaudiu Ghioc static void hugetlb_unregister_node(struct node *node) 17639a305230SLee Schermerhorn { 17649a305230SLee Schermerhorn struct hstate *h; 176510fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id]; 17669a305230SLee Schermerhorn 17679a305230SLee Schermerhorn if (!nhs->hugepages_kobj) 17689b5e5d0fSLee Schermerhorn return; /* no hstate attributes */ 17699a305230SLee Schermerhorn 1770972dc4deSAneesh Kumar K.V for_each_hstate(h) { 1771972dc4deSAneesh Kumar K.V int idx = hstate_index(h); 1772972dc4deSAneesh Kumar K.V if (nhs->hstate_kobjs[idx]) { 1773972dc4deSAneesh Kumar K.V kobject_put(nhs->hstate_kobjs[idx]); 1774972dc4deSAneesh Kumar K.V nhs->hstate_kobjs[idx] = NULL; 1775972dc4deSAneesh Kumar K.V } 17769a305230SLee Schermerhorn } 17779a305230SLee Schermerhorn 17789a305230SLee Schermerhorn kobject_put(nhs->hugepages_kobj); 17799a305230SLee Schermerhorn nhs->hugepages_kobj = NULL; 17809a305230SLee Schermerhorn } 17819a305230SLee Schermerhorn 17829a305230SLee Schermerhorn /* 178310fbcf4cSKay Sievers * hugetlb module exit: unregister hstate attributes from node devices 17849a305230SLee Schermerhorn * that have them. 17859a305230SLee Schermerhorn */ 17869a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) 17879a305230SLee Schermerhorn { 17889a305230SLee Schermerhorn int nid; 17899a305230SLee Schermerhorn 17909a305230SLee Schermerhorn /* 179110fbcf4cSKay Sievers * disable node device registrations. 17929a305230SLee Schermerhorn */ 17939a305230SLee Schermerhorn register_hugetlbfs_with_node(NULL, NULL); 17949a305230SLee Schermerhorn 17959a305230SLee Schermerhorn /* 17969a305230SLee Schermerhorn * remove hstate attributes from any nodes that have them. 17979a305230SLee Schermerhorn */ 17989a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) 17998732794bSWen Congyang hugetlb_unregister_node(node_devices[nid]); 18009a305230SLee Schermerhorn } 18019a305230SLee Schermerhorn 18029a305230SLee Schermerhorn /* 180310fbcf4cSKay Sievers * Register hstate attributes for a single node device. 18049a305230SLee Schermerhorn * No-op if attributes already registered. 18059a305230SLee Schermerhorn */ 18063cd8b44fSClaudiu Ghioc static void hugetlb_register_node(struct node *node) 18079a305230SLee Schermerhorn { 18089a305230SLee Schermerhorn struct hstate *h; 180910fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id]; 18109a305230SLee Schermerhorn int err; 18119a305230SLee Schermerhorn 18129a305230SLee Schermerhorn if (nhs->hugepages_kobj) 18139a305230SLee Schermerhorn return; /* already allocated */ 18149a305230SLee Schermerhorn 18159a305230SLee Schermerhorn nhs->hugepages_kobj = kobject_create_and_add("hugepages", 181610fbcf4cSKay Sievers &node->dev.kobj); 18179a305230SLee Schermerhorn if (!nhs->hugepages_kobj) 18189a305230SLee Schermerhorn return; 18199a305230SLee Schermerhorn 18209a305230SLee Schermerhorn for_each_hstate(h) { 18219a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 18229a305230SLee Schermerhorn nhs->hstate_kobjs, 18239a305230SLee Schermerhorn &per_node_hstate_attr_group); 18249a305230SLee Schermerhorn if (err) { 1825ffb22af5SAndrew Morton pr_err("Hugetlb: Unable to add hstate %s for node %d\n", 182610fbcf4cSKay Sievers h->name, node->dev.id); 18279a305230SLee Schermerhorn hugetlb_unregister_node(node); 18289a305230SLee Schermerhorn break; 18299a305230SLee Schermerhorn } 18309a305230SLee Schermerhorn } 18319a305230SLee Schermerhorn } 18329a305230SLee Schermerhorn 18339a305230SLee Schermerhorn /* 18349b5e5d0fSLee Schermerhorn * hugetlb init time: register hstate attributes for all registered node 183510fbcf4cSKay Sievers * devices of nodes that have memory. All on-line nodes should have 183610fbcf4cSKay Sievers * registered their associated device by this time. 18379a305230SLee Schermerhorn */ 18389a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) 18399a305230SLee Schermerhorn { 18409a305230SLee Schermerhorn int nid; 18419a305230SLee Schermerhorn 18428cebfcd0SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 18438732794bSWen Congyang struct node *node = node_devices[nid]; 184410fbcf4cSKay Sievers if (node->dev.id == nid) 18459a305230SLee Schermerhorn hugetlb_register_node(node); 18469a305230SLee Schermerhorn } 18479a305230SLee Schermerhorn 18489a305230SLee Schermerhorn /* 184910fbcf4cSKay Sievers * Let the node device driver know we're here so it can 18509a305230SLee Schermerhorn * [un]register hstate attributes on node hotplug. 18519a305230SLee Schermerhorn */ 18529a305230SLee Schermerhorn register_hugetlbfs_with_node(hugetlb_register_node, 18539a305230SLee Schermerhorn hugetlb_unregister_node); 18549a305230SLee Schermerhorn } 18559a305230SLee Schermerhorn #else /* !CONFIG_NUMA */ 18569a305230SLee Schermerhorn 18579a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 18589a305230SLee Schermerhorn { 18599a305230SLee Schermerhorn BUG(); 18609a305230SLee Schermerhorn if (nidp) 18619a305230SLee Schermerhorn *nidp = -1; 18629a305230SLee Schermerhorn return NULL; 18639a305230SLee Schermerhorn } 18649a305230SLee Schermerhorn 18659a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) { } 18669a305230SLee Schermerhorn 18679a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { } 18689a305230SLee Schermerhorn 18699a305230SLee Schermerhorn #endif 18709a305230SLee Schermerhorn 1871a3437870SNishanth Aravamudan static void __exit hugetlb_exit(void) 1872a3437870SNishanth Aravamudan { 1873a3437870SNishanth Aravamudan struct hstate *h; 1874a3437870SNishanth Aravamudan 18759a305230SLee Schermerhorn hugetlb_unregister_all_nodes(); 18769a305230SLee Schermerhorn 1877a3437870SNishanth Aravamudan for_each_hstate(h) { 1878972dc4deSAneesh Kumar K.V kobject_put(hstate_kobjs[hstate_index(h)]); 1879a3437870SNishanth Aravamudan } 1880a3437870SNishanth Aravamudan 1881a3437870SNishanth Aravamudan kobject_put(hugepages_kobj); 1882a3437870SNishanth Aravamudan } 1883a3437870SNishanth Aravamudan module_exit(hugetlb_exit); 1884a3437870SNishanth Aravamudan 1885a3437870SNishanth Aravamudan static int __init hugetlb_init(void) 1886a3437870SNishanth Aravamudan { 18870ef89d25SBenjamin Herrenschmidt /* Some platform decide whether they support huge pages at boot 18880ef89d25SBenjamin Herrenschmidt * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 18890ef89d25SBenjamin Herrenschmidt * there is no such support 18900ef89d25SBenjamin Herrenschmidt */ 18910ef89d25SBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 18920ef89d25SBenjamin Herrenschmidt return 0; 1893a3437870SNishanth Aravamudan 1894e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) { 1895e11bfbfcSNick Piggin default_hstate_size = HPAGE_SIZE; 1896e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) 1897a3437870SNishanth Aravamudan hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 1898a3437870SNishanth Aravamudan } 1899972dc4deSAneesh Kumar K.V default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 1900e11bfbfcSNick Piggin if (default_hstate_max_huge_pages) 1901e11bfbfcSNick Piggin default_hstate.max_huge_pages = default_hstate_max_huge_pages; 1902a3437870SNishanth Aravamudan 1903a3437870SNishanth Aravamudan hugetlb_init_hstates(); 1904aa888a74SAndi Kleen gather_bootmem_prealloc(); 1905a3437870SNishanth Aravamudan report_hugepages(); 1906a3437870SNishanth Aravamudan 1907a3437870SNishanth Aravamudan hugetlb_sysfs_init(); 19089a305230SLee Schermerhorn hugetlb_register_all_nodes(); 19097179e7bfSJianguo Wu hugetlb_cgroup_file_init(); 19109a305230SLee Schermerhorn 1911a3437870SNishanth Aravamudan return 0; 1912a3437870SNishanth Aravamudan } 1913a3437870SNishanth Aravamudan module_init(hugetlb_init); 1914a3437870SNishanth Aravamudan 1915a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */ 1916a3437870SNishanth Aravamudan void __init hugetlb_add_hstate(unsigned order) 1917a3437870SNishanth Aravamudan { 1918a3437870SNishanth Aravamudan struct hstate *h; 19198faa8b07SAndi Kleen unsigned long i; 19208faa8b07SAndi Kleen 1921a3437870SNishanth Aravamudan if (size_to_hstate(PAGE_SIZE << order)) { 1922ffb22af5SAndrew Morton pr_warning("hugepagesz= specified twice, ignoring\n"); 1923a3437870SNishanth Aravamudan return; 1924a3437870SNishanth Aravamudan } 192547d38344SAneesh Kumar K.V BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 1926a3437870SNishanth Aravamudan BUG_ON(order == 0); 192747d38344SAneesh Kumar K.V h = &hstates[hugetlb_max_hstate++]; 1928a3437870SNishanth Aravamudan h->order = order; 1929a3437870SNishanth Aravamudan h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 19308faa8b07SAndi Kleen h->nr_huge_pages = 0; 19318faa8b07SAndi Kleen h->free_huge_pages = 0; 19328faa8b07SAndi Kleen for (i = 0; i < MAX_NUMNODES; ++i) 19338faa8b07SAndi Kleen INIT_LIST_HEAD(&h->hugepage_freelists[i]); 19340edaecfaSAneesh Kumar K.V INIT_LIST_HEAD(&h->hugepage_activelist); 19358cebfcd0SLai Jiangshan h->next_nid_to_alloc = first_node(node_states[N_MEMORY]); 19368cebfcd0SLai Jiangshan h->next_nid_to_free = first_node(node_states[N_MEMORY]); 1937a3437870SNishanth Aravamudan snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1938a3437870SNishanth Aravamudan huge_page_size(h)/1024); 19398faa8b07SAndi Kleen 1940a3437870SNishanth Aravamudan parsed_hstate = h; 1941a3437870SNishanth Aravamudan } 1942a3437870SNishanth Aravamudan 1943e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s) 1944a3437870SNishanth Aravamudan { 1945a3437870SNishanth Aravamudan unsigned long *mhp; 19468faa8b07SAndi Kleen static unsigned long *last_mhp; 1947a3437870SNishanth Aravamudan 1948a3437870SNishanth Aravamudan /* 194947d38344SAneesh Kumar K.V * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, 1950a3437870SNishanth Aravamudan * so this hugepages= parameter goes to the "default hstate". 1951a3437870SNishanth Aravamudan */ 195247d38344SAneesh Kumar K.V if (!hugetlb_max_hstate) 1953a3437870SNishanth Aravamudan mhp = &default_hstate_max_huge_pages; 1954a3437870SNishanth Aravamudan else 1955a3437870SNishanth Aravamudan mhp = &parsed_hstate->max_huge_pages; 1956a3437870SNishanth Aravamudan 19578faa8b07SAndi Kleen if (mhp == last_mhp) { 1958ffb22af5SAndrew Morton pr_warning("hugepages= specified twice without " 19598faa8b07SAndi Kleen "interleaving hugepagesz=, ignoring\n"); 19608faa8b07SAndi Kleen return 1; 19618faa8b07SAndi Kleen } 19628faa8b07SAndi Kleen 1963a3437870SNishanth Aravamudan if (sscanf(s, "%lu", mhp) <= 0) 1964a3437870SNishanth Aravamudan *mhp = 0; 1965a3437870SNishanth Aravamudan 19668faa8b07SAndi Kleen /* 19678faa8b07SAndi Kleen * Global state is always initialized later in hugetlb_init. 19688faa8b07SAndi Kleen * But we need to allocate >= MAX_ORDER hstates here early to still 19698faa8b07SAndi Kleen * use the bootmem allocator. 19708faa8b07SAndi Kleen */ 197147d38344SAneesh Kumar K.V if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) 19728faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(parsed_hstate); 19738faa8b07SAndi Kleen 19748faa8b07SAndi Kleen last_mhp = mhp; 19758faa8b07SAndi Kleen 1976a3437870SNishanth Aravamudan return 1; 1977a3437870SNishanth Aravamudan } 1978e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup); 1979e11bfbfcSNick Piggin 1980e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s) 1981e11bfbfcSNick Piggin { 1982e11bfbfcSNick Piggin default_hstate_size = memparse(s, &s); 1983e11bfbfcSNick Piggin return 1; 1984e11bfbfcSNick Piggin } 1985e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup); 1986a3437870SNishanth Aravamudan 19878a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array) 19888a213460SNishanth Aravamudan { 19898a213460SNishanth Aravamudan int node; 19908a213460SNishanth Aravamudan unsigned int nr = 0; 19918a213460SNishanth Aravamudan 19928a213460SNishanth Aravamudan for_each_node_mask(node, cpuset_current_mems_allowed) 19938a213460SNishanth Aravamudan nr += array[node]; 19948a213460SNishanth Aravamudan 19958a213460SNishanth Aravamudan return nr; 19968a213460SNishanth Aravamudan } 19978a213460SNishanth Aravamudan 19988a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL 199906808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 200006808b08SLee Schermerhorn struct ctl_table *table, int write, 200106808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 20021da177e4SLinus Torvalds { 2003e5ff2159SAndi Kleen struct hstate *h = &default_hstate; 2004e5ff2159SAndi Kleen unsigned long tmp; 200508d4a246SMichal Hocko int ret; 2006e5ff2159SAndi Kleen 2007e5ff2159SAndi Kleen tmp = h->max_huge_pages; 2008e5ff2159SAndi Kleen 2009adbe8726SEric B Munson if (write && h->order >= MAX_ORDER) 2010adbe8726SEric B Munson return -EINVAL; 2011adbe8726SEric B Munson 2012e5ff2159SAndi Kleen table->data = &tmp; 2013e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 201408d4a246SMichal Hocko ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 201508d4a246SMichal Hocko if (ret) 201608d4a246SMichal Hocko goto out; 2017e5ff2159SAndi Kleen 201806808b08SLee Schermerhorn if (write) { 2019bad44b5bSDavid Rientjes NODEMASK_ALLOC(nodemask_t, nodes_allowed, 2020bad44b5bSDavid Rientjes GFP_KERNEL | __GFP_NORETRY); 202106808b08SLee Schermerhorn if (!(obey_mempolicy && 202206808b08SLee Schermerhorn init_nodemask_of_mempolicy(nodes_allowed))) { 202306808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 20248cebfcd0SLai Jiangshan nodes_allowed = &node_states[N_MEMORY]; 202506808b08SLee Schermerhorn } 202606808b08SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed); 202706808b08SLee Schermerhorn 20288cebfcd0SLai Jiangshan if (nodes_allowed != &node_states[N_MEMORY]) 202906808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 203006808b08SLee Schermerhorn } 203108d4a246SMichal Hocko out: 203208d4a246SMichal Hocko return ret; 20331da177e4SLinus Torvalds } 2034396faf03SMel Gorman 203506808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write, 203606808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 203706808b08SLee Schermerhorn { 203806808b08SLee Schermerhorn 203906808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(false, table, write, 204006808b08SLee Schermerhorn buffer, length, ppos); 204106808b08SLee Schermerhorn } 204206808b08SLee Schermerhorn 204306808b08SLee Schermerhorn #ifdef CONFIG_NUMA 204406808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 204506808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 204606808b08SLee Schermerhorn { 204706808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(true, table, write, 204806808b08SLee Schermerhorn buffer, length, ppos); 204906808b08SLee Schermerhorn } 205006808b08SLee Schermerhorn #endif /* CONFIG_NUMA */ 205106808b08SLee Schermerhorn 2052396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 20538d65af78SAlexey Dobriyan void __user *buffer, 2054396faf03SMel Gorman size_t *length, loff_t *ppos) 2055396faf03SMel Gorman { 20568d65af78SAlexey Dobriyan proc_dointvec(table, write, buffer, length, ppos); 2057396faf03SMel Gorman if (hugepages_treat_as_movable) 2058396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 2059396faf03SMel Gorman else 2060396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 2061396faf03SMel Gorman return 0; 2062396faf03SMel Gorman } 2063396faf03SMel Gorman 2064a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 20658d65af78SAlexey Dobriyan void __user *buffer, 2066a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 2067a3d0c6aaSNishanth Aravamudan { 2068a5516438SAndi Kleen struct hstate *h = &default_hstate; 2069e5ff2159SAndi Kleen unsigned long tmp; 207008d4a246SMichal Hocko int ret; 2071e5ff2159SAndi Kleen 2072e5ff2159SAndi Kleen tmp = h->nr_overcommit_huge_pages; 2073e5ff2159SAndi Kleen 2074adbe8726SEric B Munson if (write && h->order >= MAX_ORDER) 2075adbe8726SEric B Munson return -EINVAL; 2076adbe8726SEric B Munson 2077e5ff2159SAndi Kleen table->data = &tmp; 2078e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 207908d4a246SMichal Hocko ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 208008d4a246SMichal Hocko if (ret) 208108d4a246SMichal Hocko goto out; 2082e5ff2159SAndi Kleen 2083e5ff2159SAndi Kleen if (write) { 2084064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 2085e5ff2159SAndi Kleen h->nr_overcommit_huge_pages = tmp; 2086a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 2087e5ff2159SAndi Kleen } 208808d4a246SMichal Hocko out: 208908d4a246SMichal Hocko return ret; 2090a3d0c6aaSNishanth Aravamudan } 2091a3d0c6aaSNishanth Aravamudan 20921da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 20931da177e4SLinus Torvalds 2094e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m) 20951da177e4SLinus Torvalds { 2096a5516438SAndi Kleen struct hstate *h = &default_hstate; 2097e1759c21SAlexey Dobriyan seq_printf(m, 20981da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 20991da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 2100b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 21017893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 21024f98a2feSRik van Riel "Hugepagesize: %8lu kB\n", 2103a5516438SAndi Kleen h->nr_huge_pages, 2104a5516438SAndi Kleen h->free_huge_pages, 2105a5516438SAndi Kleen h->resv_huge_pages, 2106a5516438SAndi Kleen h->surplus_huge_pages, 2107a5516438SAndi Kleen 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 21081da177e4SLinus Torvalds } 21091da177e4SLinus Torvalds 21101da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 21111da177e4SLinus Torvalds { 2112a5516438SAndi Kleen struct hstate *h = &default_hstate; 21131da177e4SLinus Torvalds return sprintf(buf, 21141da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 2115a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 2116a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 2117a5516438SAndi Kleen nid, h->nr_huge_pages_node[nid], 2118a5516438SAndi Kleen nid, h->free_huge_pages_node[nid], 2119a5516438SAndi Kleen nid, h->surplus_huge_pages_node[nid]); 21201da177e4SLinus Torvalds } 21211da177e4SLinus Torvalds 2122949f7ec5SDavid Rientjes void hugetlb_show_meminfo(void) 2123949f7ec5SDavid Rientjes { 2124949f7ec5SDavid Rientjes struct hstate *h; 2125949f7ec5SDavid Rientjes int nid; 2126949f7ec5SDavid Rientjes 2127949f7ec5SDavid Rientjes for_each_node_state(nid, N_MEMORY) 2128949f7ec5SDavid Rientjes for_each_hstate(h) 2129949f7ec5SDavid Rientjes pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 2130949f7ec5SDavid Rientjes nid, 2131949f7ec5SDavid Rientjes h->nr_huge_pages_node[nid], 2132949f7ec5SDavid Rientjes h->free_huge_pages_node[nid], 2133949f7ec5SDavid Rientjes h->surplus_huge_pages_node[nid], 2134949f7ec5SDavid Rientjes 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 2135949f7ec5SDavid Rientjes } 2136949f7ec5SDavid Rientjes 21371da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 21381da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 21391da177e4SLinus Torvalds { 2140d0028588SWanpeng Li struct hstate *h; 2141d0028588SWanpeng Li unsigned long nr_total_pages = 0; 2142d0028588SWanpeng Li 2143d0028588SWanpeng Li for_each_hstate(h) 2144d0028588SWanpeng Li nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 2145d0028588SWanpeng Li return nr_total_pages; 21461da177e4SLinus Torvalds } 21471da177e4SLinus Torvalds 2148a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta) 2149fc1b8a73SMel Gorman { 2150fc1b8a73SMel Gorman int ret = -ENOMEM; 2151fc1b8a73SMel Gorman 2152fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 2153fc1b8a73SMel Gorman /* 2154fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 2155fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 2156fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 2157fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 2158fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 2159fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 2160fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 2161fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 2162fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 2163fc1b8a73SMel Gorman * 2164fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 2165fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 2166fc1b8a73SMel Gorman * we fall back to check against current free page availability as 2167fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 2168fc1b8a73SMel Gorman * semantics that cpuset has. 2169fc1b8a73SMel Gorman */ 2170fc1b8a73SMel Gorman if (delta > 0) { 2171a5516438SAndi Kleen if (gather_surplus_pages(h, delta) < 0) 2172fc1b8a73SMel Gorman goto out; 2173fc1b8a73SMel Gorman 2174a5516438SAndi Kleen if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 2175a5516438SAndi Kleen return_unused_surplus_pages(h, delta); 2176fc1b8a73SMel Gorman goto out; 2177fc1b8a73SMel Gorman } 2178fc1b8a73SMel Gorman } 2179fc1b8a73SMel Gorman 2180fc1b8a73SMel Gorman ret = 0; 2181fc1b8a73SMel Gorman if (delta < 0) 2182a5516438SAndi Kleen return_unused_surplus_pages(h, (unsigned long) -delta); 2183fc1b8a73SMel Gorman 2184fc1b8a73SMel Gorman out: 2185fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 2186fc1b8a73SMel Gorman return ret; 2187fc1b8a73SMel Gorman } 2188fc1b8a73SMel Gorman 218984afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma) 219084afd99bSAndy Whitcroft { 2191f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 219284afd99bSAndy Whitcroft 219384afd99bSAndy Whitcroft /* 219484afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present. 219584afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where 219684afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA 219725985edcSLucas De Marchi * has a reference to the reservation map it cannot disappear until 219884afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a 219984afd99bSAndy Whitcroft * new reference here without additional locking. 220084afd99bSAndy Whitcroft */ 2201f522c3acSJoonsoo Kim if (resv) 2202f522c3acSJoonsoo Kim kref_get(&resv->refs); 220384afd99bSAndy Whitcroft } 220484afd99bSAndy Whitcroft 2205c50ac050SDave Hansen static void resv_map_put(struct vm_area_struct *vma) 2206c50ac050SDave Hansen { 2207f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 2208c50ac050SDave Hansen 2209f522c3acSJoonsoo Kim if (!resv) 2210c50ac050SDave Hansen return; 2211f522c3acSJoonsoo Kim kref_put(&resv->refs, resv_map_release); 2212c50ac050SDave Hansen } 2213c50ac050SDave Hansen 2214a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 2215a1e78772SMel Gorman { 2216a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2217f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 221890481622SDavid Gibson struct hugepage_subpool *spool = subpool_vma(vma); 221984afd99bSAndy Whitcroft unsigned long reserve; 222084afd99bSAndy Whitcroft unsigned long start; 222184afd99bSAndy Whitcroft unsigned long end; 222284afd99bSAndy Whitcroft 2223f522c3acSJoonsoo Kim if (resv) { 2224a5516438SAndi Kleen start = vma_hugecache_offset(h, vma, vma->vm_start); 2225a5516438SAndi Kleen end = vma_hugecache_offset(h, vma, vma->vm_end); 222684afd99bSAndy Whitcroft 222784afd99bSAndy Whitcroft reserve = (end - start) - 2228f522c3acSJoonsoo Kim region_count(&resv->regions, start, end); 222984afd99bSAndy Whitcroft 2230c50ac050SDave Hansen resv_map_put(vma); 223184afd99bSAndy Whitcroft 22327251ff78SAdam Litke if (reserve) { 2233a5516438SAndi Kleen hugetlb_acct_memory(h, -reserve); 223490481622SDavid Gibson hugepage_subpool_put_pages(spool, reserve); 22357251ff78SAdam Litke } 2236a1e78772SMel Gorman } 223784afd99bSAndy Whitcroft } 2238a1e78772SMel Gorman 22391da177e4SLinus Torvalds /* 22401da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 22411da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 22421da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 22431da177e4SLinus Torvalds * this far. 22441da177e4SLinus Torvalds */ 2245d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 22461da177e4SLinus Torvalds { 22471da177e4SLinus Torvalds BUG(); 2248d0217ac0SNick Piggin return 0; 22491da177e4SLinus Torvalds } 22501da177e4SLinus Torvalds 2251f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = { 2252d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 225384afd99bSAndy Whitcroft .open = hugetlb_vm_op_open, 2254a1e78772SMel Gorman .close = hugetlb_vm_op_close, 22551da177e4SLinus Torvalds }; 22561da177e4SLinus Torvalds 22571e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 22581e8f889bSDavid Gibson int writable) 225963551ae0SDavid Gibson { 226063551ae0SDavid Gibson pte_t entry; 226163551ae0SDavid Gibson 22621e8f889bSDavid Gibson if (writable) { 2263106c992aSGerald Schaefer entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 2264106c992aSGerald Schaefer vma->vm_page_prot))); 226563551ae0SDavid Gibson } else { 2266106c992aSGerald Schaefer entry = huge_pte_wrprotect(mk_huge_pte(page, 2267106c992aSGerald Schaefer vma->vm_page_prot)); 226863551ae0SDavid Gibson } 226963551ae0SDavid Gibson entry = pte_mkyoung(entry); 227063551ae0SDavid Gibson entry = pte_mkhuge(entry); 2271d9ed9faaSChris Metcalf entry = arch_make_huge_pte(entry, vma, page, writable); 227263551ae0SDavid Gibson 227363551ae0SDavid Gibson return entry; 227463551ae0SDavid Gibson } 227563551ae0SDavid Gibson 22761e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 22771e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 22781e8f889bSDavid Gibson { 22791e8f889bSDavid Gibson pte_t entry; 22801e8f889bSDavid Gibson 2281106c992aSGerald Schaefer entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 228232f84528SChris Forbes if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 22834b3073e1SRussell King update_mmu_cache(vma, address, ptep); 22841e8f889bSDavid Gibson } 22851e8f889bSDavid Gibson 22861e8f889bSDavid Gibson 228763551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 228863551ae0SDavid Gibson struct vm_area_struct *vma) 228963551ae0SDavid Gibson { 229063551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 229163551ae0SDavid Gibson struct page *ptepage; 22921c59827dSHugh Dickins unsigned long addr; 22931e8f889bSDavid Gibson int cow; 2294a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2295a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 22961e8f889bSDavid Gibson 22971e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 229863551ae0SDavid Gibson 2299a5516438SAndi Kleen for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 2300c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 2301c74df32cSHugh Dickins if (!src_pte) 2302c74df32cSHugh Dickins continue; 2303a5516438SAndi Kleen dst_pte = huge_pte_alloc(dst, addr, sz); 230463551ae0SDavid Gibson if (!dst_pte) 230563551ae0SDavid Gibson goto nomem; 2306c5c99429SLarry Woodman 2307c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 2308c5c99429SLarry Woodman if (dst_pte == src_pte) 2309c5c99429SLarry Woodman continue; 2310c5c99429SLarry Woodman 2311c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 231246478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 23137f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 23141e8f889bSDavid Gibson if (cow) 23157f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 23167f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 231763551ae0SDavid Gibson ptepage = pte_page(entry); 231863551ae0SDavid Gibson get_page(ptepage); 23190fe6e20bSNaoya Horiguchi page_dup_rmap(ptepage); 232063551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 23211c59827dSHugh Dickins } 23221c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 2323c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 232463551ae0SDavid Gibson } 232563551ae0SDavid Gibson return 0; 232663551ae0SDavid Gibson 232763551ae0SDavid Gibson nomem: 232863551ae0SDavid Gibson return -ENOMEM; 232963551ae0SDavid Gibson } 233063551ae0SDavid Gibson 2331290408d4SNaoya Horiguchi static int is_hugetlb_entry_migration(pte_t pte) 2332290408d4SNaoya Horiguchi { 2333290408d4SNaoya Horiguchi swp_entry_t swp; 2334290408d4SNaoya Horiguchi 2335290408d4SNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte)) 2336290408d4SNaoya Horiguchi return 0; 2337290408d4SNaoya Horiguchi swp = pte_to_swp_entry(pte); 233832f84528SChris Forbes if (non_swap_entry(swp) && is_migration_entry(swp)) 2339290408d4SNaoya Horiguchi return 1; 234032f84528SChris Forbes else 2341290408d4SNaoya Horiguchi return 0; 2342290408d4SNaoya Horiguchi } 2343290408d4SNaoya Horiguchi 2344fd6a03edSNaoya Horiguchi static int is_hugetlb_entry_hwpoisoned(pte_t pte) 2345fd6a03edSNaoya Horiguchi { 2346fd6a03edSNaoya Horiguchi swp_entry_t swp; 2347fd6a03edSNaoya Horiguchi 2348fd6a03edSNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte)) 2349fd6a03edSNaoya Horiguchi return 0; 2350fd6a03edSNaoya Horiguchi swp = pte_to_swp_entry(pte); 235132f84528SChris Forbes if (non_swap_entry(swp) && is_hwpoison_entry(swp)) 2352fd6a03edSNaoya Horiguchi return 1; 235332f84528SChris Forbes else 2354fd6a03edSNaoya Horiguchi return 0; 2355fd6a03edSNaoya Horiguchi } 2356fd6a03edSNaoya Horiguchi 235724669e58SAneesh Kumar K.V void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 235824669e58SAneesh Kumar K.V unsigned long start, unsigned long end, 235924669e58SAneesh Kumar K.V struct page *ref_page) 236063551ae0SDavid Gibson { 236124669e58SAneesh Kumar K.V int force_flush = 0; 236263551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 236363551ae0SDavid Gibson unsigned long address; 2364c7546f8fSDavid Gibson pte_t *ptep; 236563551ae0SDavid Gibson pte_t pte; 236663551ae0SDavid Gibson struct page *page; 2367a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2368a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 23692ec74c3eSSagi Grimberg const unsigned long mmun_start = start; /* For mmu_notifiers */ 23702ec74c3eSSagi Grimberg const unsigned long mmun_end = end; /* For mmu_notifiers */ 2371a5516438SAndi Kleen 237263551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 2373a5516438SAndi Kleen BUG_ON(start & ~huge_page_mask(h)); 2374a5516438SAndi Kleen BUG_ON(end & ~huge_page_mask(h)); 237563551ae0SDavid Gibson 237624669e58SAneesh Kumar K.V tlb_start_vma(tlb, vma); 23772ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 237824669e58SAneesh Kumar K.V again: 2379508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 2380a5516438SAndi Kleen for (address = start; address < end; address += sz) { 2381c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 2382c7546f8fSDavid Gibson if (!ptep) 2383c7546f8fSDavid Gibson continue; 2384c7546f8fSDavid Gibson 238539dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 238639dde65cSChen, Kenneth W continue; 238739dde65cSChen, Kenneth W 23886629326bSHillf Danton pte = huge_ptep_get(ptep); 23896629326bSHillf Danton if (huge_pte_none(pte)) 23906629326bSHillf Danton continue; 23916629326bSHillf Danton 23926629326bSHillf Danton /* 23936629326bSHillf Danton * HWPoisoned hugepage is already unmapped and dropped reference 23946629326bSHillf Danton */ 23958c4894c6SNaoya Horiguchi if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 2396106c992aSGerald Schaefer huge_pte_clear(mm, address, ptep); 23976629326bSHillf Danton continue; 23988c4894c6SNaoya Horiguchi } 23996629326bSHillf Danton 24006629326bSHillf Danton page = pte_page(pte); 240104f2cbe3SMel Gorman /* 240204f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 240304f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 240404f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 240504f2cbe3SMel Gorman */ 240604f2cbe3SMel Gorman if (ref_page) { 240704f2cbe3SMel Gorman if (page != ref_page) 240804f2cbe3SMel Gorman continue; 240904f2cbe3SMel Gorman 241004f2cbe3SMel Gorman /* 241104f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 241204f2cbe3SMel Gorman * future faults in this VMA will fail rather than 241304f2cbe3SMel Gorman * looking like data was lost 241404f2cbe3SMel Gorman */ 241504f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 241604f2cbe3SMel Gorman } 241704f2cbe3SMel Gorman 2418c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 241924669e58SAneesh Kumar K.V tlb_remove_tlb_entry(tlb, ptep, address); 2420106c992aSGerald Schaefer if (huge_pte_dirty(pte)) 24216649a386SKen Chen set_page_dirty(page); 24229e81130bSHillf Danton 242324669e58SAneesh Kumar K.V page_remove_rmap(page); 242424669e58SAneesh Kumar K.V force_flush = !__tlb_remove_page(tlb, page); 242524669e58SAneesh Kumar K.V if (force_flush) 242624669e58SAneesh Kumar K.V break; 24279e81130bSHillf Danton /* Bail out after unmapping reference page if supplied */ 24289e81130bSHillf Danton if (ref_page) 24299e81130bSHillf Danton break; 243063551ae0SDavid Gibson } 2431cd2934a3SAl Viro spin_unlock(&mm->page_table_lock); 243224669e58SAneesh Kumar K.V /* 243324669e58SAneesh Kumar K.V * mmu_gather ran out of room to batch pages, we break out of 243424669e58SAneesh Kumar K.V * the PTE lock to avoid doing the potential expensive TLB invalidate 243524669e58SAneesh Kumar K.V * and page-free while holding it. 243624669e58SAneesh Kumar K.V */ 243724669e58SAneesh Kumar K.V if (force_flush) { 243824669e58SAneesh Kumar K.V force_flush = 0; 243924669e58SAneesh Kumar K.V tlb_flush_mmu(tlb); 244024669e58SAneesh Kumar K.V if (address < end && !ref_page) 244124669e58SAneesh Kumar K.V goto again; 2442fe1668aeSChen, Kenneth W } 24432ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 244424669e58SAneesh Kumar K.V tlb_end_vma(tlb, vma); 24451da177e4SLinus Torvalds } 244663551ae0SDavid Gibson 2447d833352aSMel Gorman void __unmap_hugepage_range_final(struct mmu_gather *tlb, 2448d833352aSMel Gorman struct vm_area_struct *vma, unsigned long start, 2449d833352aSMel Gorman unsigned long end, struct page *ref_page) 2450d833352aSMel Gorman { 2451d833352aSMel Gorman __unmap_hugepage_range(tlb, vma, start, end, ref_page); 2452d833352aSMel Gorman 2453d833352aSMel Gorman /* 2454d833352aSMel Gorman * Clear this flag so that x86's huge_pmd_share page_table_shareable 2455d833352aSMel Gorman * test will fail on a vma being torn down, and not grab a page table 2456d833352aSMel Gorman * on its way out. We're lucky that the flag has such an appropriate 2457d833352aSMel Gorman * name, and can in fact be safely cleared here. We could clear it 2458d833352aSMel Gorman * before the __unmap_hugepage_range above, but all that's necessary 2459d833352aSMel Gorman * is to clear it before releasing the i_mmap_mutex. This works 2460d833352aSMel Gorman * because in the context this is called, the VMA is about to be 2461d833352aSMel Gorman * destroyed and the i_mmap_mutex is held. 2462d833352aSMel Gorman */ 2463d833352aSMel Gorman vma->vm_flags &= ~VM_MAYSHARE; 2464d833352aSMel Gorman } 2465d833352aSMel Gorman 2466502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 246704f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 2468502717f4SChen, Kenneth W { 246924669e58SAneesh Kumar K.V struct mm_struct *mm; 247024669e58SAneesh Kumar K.V struct mmu_gather tlb; 247124669e58SAneesh Kumar K.V 247224669e58SAneesh Kumar K.V mm = vma->vm_mm; 247324669e58SAneesh Kumar K.V 24742b047252SLinus Torvalds tlb_gather_mmu(&tlb, mm, start, end); 247524669e58SAneesh Kumar K.V __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 247624669e58SAneesh Kumar K.V tlb_finish_mmu(&tlb, start, end); 2477502717f4SChen, Kenneth W } 2478502717f4SChen, Kenneth W 247904f2cbe3SMel Gorman /* 248004f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 248104f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 248204f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 248304f2cbe3SMel Gorman * same region. 248404f2cbe3SMel Gorman */ 24852a4b3dedSHarvey Harrison static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 24862a4b3dedSHarvey Harrison struct page *page, unsigned long address) 248704f2cbe3SMel Gorman { 24887526674dSAdam Litke struct hstate *h = hstate_vma(vma); 248904f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 249004f2cbe3SMel Gorman struct address_space *mapping; 249104f2cbe3SMel Gorman pgoff_t pgoff; 249204f2cbe3SMel Gorman 249304f2cbe3SMel Gorman /* 249404f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 249504f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 249604f2cbe3SMel Gorman */ 24977526674dSAdam Litke address = address & huge_page_mask(h); 249836e4f20aSMichal Hocko pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 249936e4f20aSMichal Hocko vma->vm_pgoff; 2500496ad9aaSAl Viro mapping = file_inode(vma->vm_file)->i_mapping; 250104f2cbe3SMel Gorman 25024eb2b1dcSMel Gorman /* 25034eb2b1dcSMel Gorman * Take the mapping lock for the duration of the table walk. As 25044eb2b1dcSMel Gorman * this mapping should be shared between all the VMAs, 25054eb2b1dcSMel Gorman * __unmap_hugepage_range() is called as the lock is already held 25064eb2b1dcSMel Gorman */ 25073d48ae45SPeter Zijlstra mutex_lock(&mapping->i_mmap_mutex); 25086b2dbba8SMichel Lespinasse vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 250904f2cbe3SMel Gorman /* Do not unmap the current VMA */ 251004f2cbe3SMel Gorman if (iter_vma == vma) 251104f2cbe3SMel Gorman continue; 251204f2cbe3SMel Gorman 251304f2cbe3SMel Gorman /* 251404f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 251504f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 251604f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 251704f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 251804f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 251904f2cbe3SMel Gorman */ 252004f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 252124669e58SAneesh Kumar K.V unmap_hugepage_range(iter_vma, address, 252224669e58SAneesh Kumar K.V address + huge_page_size(h), page); 252304f2cbe3SMel Gorman } 25243d48ae45SPeter Zijlstra mutex_unlock(&mapping->i_mmap_mutex); 252504f2cbe3SMel Gorman 252604f2cbe3SMel Gorman return 1; 252704f2cbe3SMel Gorman } 252804f2cbe3SMel Gorman 25290fe6e20bSNaoya Horiguchi /* 25300fe6e20bSNaoya Horiguchi * Hugetlb_cow() should be called with page lock of the original hugepage held. 2531ef009b25SMichal Hocko * Called with hugetlb_instantiation_mutex held and pte_page locked so we 2532ef009b25SMichal Hocko * cannot race with other handlers or page migration. 2533ef009b25SMichal Hocko * Keep the pte_same checks anyway to make transition from the mutex easier. 25340fe6e20bSNaoya Horiguchi */ 25351e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 253604f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 253704f2cbe3SMel Gorman struct page *pagecache_page) 25381e8f889bSDavid Gibson { 2539a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 25401e8f889bSDavid Gibson struct page *old_page, *new_page; 254104f2cbe3SMel Gorman int outside_reserve = 0; 25422ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 25432ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 25441e8f889bSDavid Gibson 25451e8f889bSDavid Gibson old_page = pte_page(pte); 25461e8f889bSDavid Gibson 254704f2cbe3SMel Gorman retry_avoidcopy: 25481e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 25491e8f889bSDavid Gibson * and just make the page writable */ 255037a2140dSJoonsoo Kim if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 25510fe6e20bSNaoya Horiguchi page_move_anon_rmap(old_page, vma, address); 25521e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 255383c54070SNick Piggin return 0; 25541e8f889bSDavid Gibson } 25551e8f889bSDavid Gibson 255604f2cbe3SMel Gorman /* 255704f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 255804f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 255904f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 256004f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 256104f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 256204f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 256304f2cbe3SMel Gorman * of the full address range. 256404f2cbe3SMel Gorman */ 2565f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE) && 256604f2cbe3SMel Gorman is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 256704f2cbe3SMel Gorman old_page != pagecache_page) 256804f2cbe3SMel Gorman outside_reserve = 1; 256904f2cbe3SMel Gorman 25701e8f889bSDavid Gibson page_cache_get(old_page); 2571b76c8cfbSLarry Woodman 2572b76c8cfbSLarry Woodman /* Drop page_table_lock as buddy allocator may be called */ 2573b76c8cfbSLarry Woodman spin_unlock(&mm->page_table_lock); 257404f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 25751e8f889bSDavid Gibson 25762fc39cecSAdam Litke if (IS_ERR(new_page)) { 257776dcee75SAneesh Kumar K.V long err = PTR_ERR(new_page); 25781e8f889bSDavid Gibson page_cache_release(old_page); 257904f2cbe3SMel Gorman 258004f2cbe3SMel Gorman /* 258104f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 258204f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 258304f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 258404f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 258504f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 258604f2cbe3SMel Gorman */ 258704f2cbe3SMel Gorman if (outside_reserve) { 258804f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 258904f2cbe3SMel Gorman if (unmap_ref_private(mm, vma, old_page, address)) { 259004f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 2591b76c8cfbSLarry Woodman spin_lock(&mm->page_table_lock); 2592a734bcc8SHillf Danton ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2593a734bcc8SHillf Danton if (likely(pte_same(huge_ptep_get(ptep), pte))) 259404f2cbe3SMel Gorman goto retry_avoidcopy; 2595a734bcc8SHillf Danton /* 2596a734bcc8SHillf Danton * race occurs while re-acquiring page_table_lock, and 2597a734bcc8SHillf Danton * our job is done. 2598a734bcc8SHillf Danton */ 2599a734bcc8SHillf Danton return 0; 260004f2cbe3SMel Gorman } 260104f2cbe3SMel Gorman WARN_ON_ONCE(1); 260204f2cbe3SMel Gorman } 260304f2cbe3SMel Gorman 2604b76c8cfbSLarry Woodman /* Caller expects lock to be held */ 2605b76c8cfbSLarry Woodman spin_lock(&mm->page_table_lock); 260676dcee75SAneesh Kumar K.V if (err == -ENOMEM) 260776dcee75SAneesh Kumar K.V return VM_FAULT_OOM; 260876dcee75SAneesh Kumar K.V else 260976dcee75SAneesh Kumar K.V return VM_FAULT_SIGBUS; 26101e8f889bSDavid Gibson } 26111e8f889bSDavid Gibson 26120fe6e20bSNaoya Horiguchi /* 26130fe6e20bSNaoya Horiguchi * When the original hugepage is shared one, it does not have 26140fe6e20bSNaoya Horiguchi * anon_vma prepared. 26150fe6e20bSNaoya Horiguchi */ 261644e2aa93SDean Nelson if (unlikely(anon_vma_prepare(vma))) { 2617ea4039a3SHillf Danton page_cache_release(new_page); 2618ea4039a3SHillf Danton page_cache_release(old_page); 261944e2aa93SDean Nelson /* Caller expects lock to be held */ 262044e2aa93SDean Nelson spin_lock(&mm->page_table_lock); 26210fe6e20bSNaoya Horiguchi return VM_FAULT_OOM; 262244e2aa93SDean Nelson } 26230fe6e20bSNaoya Horiguchi 262447ad8475SAndrea Arcangeli copy_user_huge_page(new_page, old_page, address, vma, 262547ad8475SAndrea Arcangeli pages_per_huge_page(h)); 26260ed361deSNick Piggin __SetPageUptodate(new_page); 26271e8f889bSDavid Gibson 26282ec74c3eSSagi Grimberg mmun_start = address & huge_page_mask(h); 26292ec74c3eSSagi Grimberg mmun_end = mmun_start + huge_page_size(h); 26302ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2631b76c8cfbSLarry Woodman /* 2632b76c8cfbSLarry Woodman * Retake the page_table_lock to check for racing updates 2633b76c8cfbSLarry Woodman * before the page tables are altered 2634b76c8cfbSLarry Woodman */ 2635b76c8cfbSLarry Woodman spin_lock(&mm->page_table_lock); 2636a5516438SAndi Kleen ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 26377f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 26381e8f889bSDavid Gibson /* Break COW */ 26398fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 26401e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 26411e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 26420fe6e20bSNaoya Horiguchi page_remove_rmap(old_page); 2643cd67f0d2SNaoya Horiguchi hugepage_add_new_anon_rmap(new_page, vma, address); 26441e8f889bSDavid Gibson /* Make the old page be freed below */ 26451e8f889bSDavid Gibson new_page = old_page; 26461e8f889bSDavid Gibson } 26472ec74c3eSSagi Grimberg spin_unlock(&mm->page_table_lock); 26482ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 26492ec74c3eSSagi Grimberg /* Caller expects lock to be held */ 26502ec74c3eSSagi Grimberg spin_lock(&mm->page_table_lock); 26511e8f889bSDavid Gibson page_cache_release(new_page); 26521e8f889bSDavid Gibson page_cache_release(old_page); 265383c54070SNick Piggin return 0; 26541e8f889bSDavid Gibson } 26551e8f889bSDavid Gibson 265604f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 2657a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h, 2658a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 265904f2cbe3SMel Gorman { 266004f2cbe3SMel Gorman struct address_space *mapping; 2661e7c4b0bfSAndy Whitcroft pgoff_t idx; 266204f2cbe3SMel Gorman 266304f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 2664a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 266504f2cbe3SMel Gorman 266604f2cbe3SMel Gorman return find_lock_page(mapping, idx); 266704f2cbe3SMel Gorman } 266804f2cbe3SMel Gorman 26693ae77f43SHugh Dickins /* 26703ae77f43SHugh Dickins * Return whether there is a pagecache page to back given address within VMA. 26713ae77f43SHugh Dickins * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 26723ae77f43SHugh Dickins */ 26733ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h, 26742a15efc9SHugh Dickins struct vm_area_struct *vma, unsigned long address) 26752a15efc9SHugh Dickins { 26762a15efc9SHugh Dickins struct address_space *mapping; 26772a15efc9SHugh Dickins pgoff_t idx; 26782a15efc9SHugh Dickins struct page *page; 26792a15efc9SHugh Dickins 26802a15efc9SHugh Dickins mapping = vma->vm_file->f_mapping; 26812a15efc9SHugh Dickins idx = vma_hugecache_offset(h, vma, address); 26822a15efc9SHugh Dickins 26832a15efc9SHugh Dickins page = find_get_page(mapping, idx); 26842a15efc9SHugh Dickins if (page) 26852a15efc9SHugh Dickins put_page(page); 26862a15efc9SHugh Dickins return page != NULL; 26872a15efc9SHugh Dickins } 26882a15efc9SHugh Dickins 2689a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 2690788c7df4SHugh Dickins unsigned long address, pte_t *ptep, unsigned int flags) 2691ac9b9c66SHugh Dickins { 2692a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2693ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 2694409eb8c2SHillf Danton int anon_rmap = 0; 2695e7c4b0bfSAndy Whitcroft pgoff_t idx; 26964c887265SAdam Litke unsigned long size; 26974c887265SAdam Litke struct page *page; 26984c887265SAdam Litke struct address_space *mapping; 26991e8f889bSDavid Gibson pte_t new_pte; 27004c887265SAdam Litke 270104f2cbe3SMel Gorman /* 270204f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 270304f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 270425985edcSLucas De Marchi * COW. Warn that such a situation has occurred as it may not be obvious 270504f2cbe3SMel Gorman */ 270604f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 2707ffb22af5SAndrew Morton pr_warning("PID %d killed due to inadequate hugepage pool\n", 270804f2cbe3SMel Gorman current->pid); 270904f2cbe3SMel Gorman return ret; 271004f2cbe3SMel Gorman } 271104f2cbe3SMel Gorman 27124c887265SAdam Litke mapping = vma->vm_file->f_mapping; 2713a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 27144c887265SAdam Litke 27154c887265SAdam Litke /* 27164c887265SAdam Litke * Use page lock to guard against racing truncation 27174c887265SAdam Litke * before we get page_table_lock. 27184c887265SAdam Litke */ 27196bda666aSChristoph Lameter retry: 27206bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 27216bda666aSChristoph Lameter if (!page) { 2722a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 2723ebed4bfcSHugh Dickins if (idx >= size) 2724ebed4bfcSHugh Dickins goto out; 272504f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 27262fc39cecSAdam Litke if (IS_ERR(page)) { 272776dcee75SAneesh Kumar K.V ret = PTR_ERR(page); 272876dcee75SAneesh Kumar K.V if (ret == -ENOMEM) 272976dcee75SAneesh Kumar K.V ret = VM_FAULT_OOM; 273076dcee75SAneesh Kumar K.V else 273176dcee75SAneesh Kumar K.V ret = VM_FAULT_SIGBUS; 27326bda666aSChristoph Lameter goto out; 27336bda666aSChristoph Lameter } 273447ad8475SAndrea Arcangeli clear_huge_page(page, address, pages_per_huge_page(h)); 27350ed361deSNick Piggin __SetPageUptodate(page); 2736ac9b9c66SHugh Dickins 2737f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 27386bda666aSChristoph Lameter int err; 273945c682a6SKen Chen struct inode *inode = mapping->host; 27406bda666aSChristoph Lameter 27416bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 27426bda666aSChristoph Lameter if (err) { 27436bda666aSChristoph Lameter put_page(page); 27446bda666aSChristoph Lameter if (err == -EEXIST) 27456bda666aSChristoph Lameter goto retry; 27466bda666aSChristoph Lameter goto out; 27476bda666aSChristoph Lameter } 274845c682a6SKen Chen 274945c682a6SKen Chen spin_lock(&inode->i_lock); 2750a5516438SAndi Kleen inode->i_blocks += blocks_per_huge_page(h); 275145c682a6SKen Chen spin_unlock(&inode->i_lock); 275223be7468SMel Gorman } else { 27536bda666aSChristoph Lameter lock_page(page); 27540fe6e20bSNaoya Horiguchi if (unlikely(anon_vma_prepare(vma))) { 27550fe6e20bSNaoya Horiguchi ret = VM_FAULT_OOM; 27560fe6e20bSNaoya Horiguchi goto backout_unlocked; 275723be7468SMel Gorman } 2758409eb8c2SHillf Danton anon_rmap = 1; 27590fe6e20bSNaoya Horiguchi } 27600fe6e20bSNaoya Horiguchi } else { 276157303d80SAndy Whitcroft /* 2762998b4382SNaoya Horiguchi * If memory error occurs between mmap() and fault, some process 2763998b4382SNaoya Horiguchi * don't have hwpoisoned swap entry for errored virtual address. 2764998b4382SNaoya Horiguchi * So we need to block hugepage fault by PG_hwpoison bit check. 2765fd6a03edSNaoya Horiguchi */ 2766fd6a03edSNaoya Horiguchi if (unlikely(PageHWPoison(page))) { 2767aa50d3a7SAndi Kleen ret = VM_FAULT_HWPOISON | 2768972dc4deSAneesh Kumar K.V VM_FAULT_SET_HINDEX(hstate_index(h)); 2769fd6a03edSNaoya Horiguchi goto backout_unlocked; 27706bda666aSChristoph Lameter } 2771998b4382SNaoya Horiguchi } 27721e8f889bSDavid Gibson 277357303d80SAndy Whitcroft /* 277457303d80SAndy Whitcroft * If we are going to COW a private mapping later, we examine the 277557303d80SAndy Whitcroft * pending reservations for this page now. This will ensure that 277657303d80SAndy Whitcroft * any allocations necessary to record that reservation occur outside 277757303d80SAndy Whitcroft * the spinlock. 277857303d80SAndy Whitcroft */ 2779788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) 27802b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 27812b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 27822b26736cSAndy Whitcroft goto backout_unlocked; 27832b26736cSAndy Whitcroft } 278457303d80SAndy Whitcroft 2785ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 2786a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 27874c887265SAdam Litke if (idx >= size) 27884c887265SAdam Litke goto backout; 27894c887265SAdam Litke 279083c54070SNick Piggin ret = 0; 27917f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 27924c887265SAdam Litke goto backout; 27934c887265SAdam Litke 2794409eb8c2SHillf Danton if (anon_rmap) 2795409eb8c2SHillf Danton hugepage_add_new_anon_rmap(page, vma, address); 2796409eb8c2SHillf Danton else 2797409eb8c2SHillf Danton page_dup_rmap(page); 27981e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 27991e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 28001e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 28011e8f889bSDavid Gibson 2802788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 28031e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 280404f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 28051e8f889bSDavid Gibson } 28061e8f889bSDavid Gibson 2807ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 28084c887265SAdam Litke unlock_page(page); 28094c887265SAdam Litke out: 2810ac9b9c66SHugh Dickins return ret; 28114c887265SAdam Litke 28124c887265SAdam Litke backout: 28134c887265SAdam Litke spin_unlock(&mm->page_table_lock); 28142b26736cSAndy Whitcroft backout_unlocked: 28154c887265SAdam Litke unlock_page(page); 28164c887265SAdam Litke put_page(page); 28174c887265SAdam Litke goto out; 2818ac9b9c66SHugh Dickins } 2819ac9b9c66SHugh Dickins 282086e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2821788c7df4SHugh Dickins unsigned long address, unsigned int flags) 282286e5216fSAdam Litke { 282386e5216fSAdam Litke pte_t *ptep; 282486e5216fSAdam Litke pte_t entry; 28251e8f889bSDavid Gibson int ret; 28260fe6e20bSNaoya Horiguchi struct page *page = NULL; 282757303d80SAndy Whitcroft struct page *pagecache_page = NULL; 28283935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 2829a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 283086e5216fSAdam Litke 28311e16a539SKAMEZAWA Hiroyuki address &= huge_page_mask(h); 28321e16a539SKAMEZAWA Hiroyuki 2833fd6a03edSNaoya Horiguchi ptep = huge_pte_offset(mm, address); 2834fd6a03edSNaoya Horiguchi if (ptep) { 2835fd6a03edSNaoya Horiguchi entry = huge_ptep_get(ptep); 2836290408d4SNaoya Horiguchi if (unlikely(is_hugetlb_entry_migration(entry))) { 283730dad309SNaoya Horiguchi migration_entry_wait_huge(mm, ptep); 2838290408d4SNaoya Horiguchi return 0; 2839290408d4SNaoya Horiguchi } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2840aa50d3a7SAndi Kleen return VM_FAULT_HWPOISON_LARGE | 2841972dc4deSAneesh Kumar K.V VM_FAULT_SET_HINDEX(hstate_index(h)); 2842fd6a03edSNaoya Horiguchi } 2843fd6a03edSNaoya Horiguchi 2844a5516438SAndi Kleen ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 284586e5216fSAdam Litke if (!ptep) 284686e5216fSAdam Litke return VM_FAULT_OOM; 284786e5216fSAdam Litke 28483935baa9SDavid Gibson /* 28493935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 28503935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 28513935baa9SDavid Gibson * the same page in the page cache. 28523935baa9SDavid Gibson */ 28533935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 28547f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 28557f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 2856788c7df4SHugh Dickins ret = hugetlb_no_page(mm, vma, address, ptep, flags); 2857b4d1d99fSDavid Gibson goto out_mutex; 28583935baa9SDavid Gibson } 285986e5216fSAdam Litke 286083c54070SNick Piggin ret = 0; 28611e8f889bSDavid Gibson 286257303d80SAndy Whitcroft /* 286357303d80SAndy Whitcroft * If we are going to COW the mapping later, we examine the pending 286457303d80SAndy Whitcroft * reservations for this page now. This will ensure that any 286557303d80SAndy Whitcroft * allocations necessary to record that reservation occur outside the 286657303d80SAndy Whitcroft * spinlock. For private mappings, we also lookup the pagecache 286757303d80SAndy Whitcroft * page now as it is used to determine if a reservation has been 286857303d80SAndy Whitcroft * consumed. 286957303d80SAndy Whitcroft */ 2870106c992aSGerald Schaefer if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 28712b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 28722b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 2873b4d1d99fSDavid Gibson goto out_mutex; 28742b26736cSAndy Whitcroft } 287557303d80SAndy Whitcroft 2876f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 287757303d80SAndy Whitcroft pagecache_page = hugetlbfs_pagecache_page(h, 287857303d80SAndy Whitcroft vma, address); 287957303d80SAndy Whitcroft } 288057303d80SAndy Whitcroft 288156c9cfb1SNaoya Horiguchi /* 288256c9cfb1SNaoya Horiguchi * hugetlb_cow() requires page locks of pte_page(entry) and 288356c9cfb1SNaoya Horiguchi * pagecache_page, so here we need take the former one 288456c9cfb1SNaoya Horiguchi * when page != pagecache_page or !pagecache_page. 288556c9cfb1SNaoya Horiguchi * Note that locking order is always pagecache_page -> page, 288656c9cfb1SNaoya Horiguchi * so no worry about deadlock. 288756c9cfb1SNaoya Horiguchi */ 28880fe6e20bSNaoya Horiguchi page = pte_page(entry); 288966aebce7SChris Metcalf get_page(page); 289056c9cfb1SNaoya Horiguchi if (page != pagecache_page) 28910fe6e20bSNaoya Horiguchi lock_page(page); 28920fe6e20bSNaoya Horiguchi 28931e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 28941e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 2895b4d1d99fSDavid Gibson if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 2896b4d1d99fSDavid Gibson goto out_page_table_lock; 2897b4d1d99fSDavid Gibson 2898b4d1d99fSDavid Gibson 2899788c7df4SHugh Dickins if (flags & FAULT_FLAG_WRITE) { 2900106c992aSGerald Schaefer if (!huge_pte_write(entry)) { 290157303d80SAndy Whitcroft ret = hugetlb_cow(mm, vma, address, ptep, entry, 290257303d80SAndy Whitcroft pagecache_page); 2903b4d1d99fSDavid Gibson goto out_page_table_lock; 2904b4d1d99fSDavid Gibson } 2905106c992aSGerald Schaefer entry = huge_pte_mkdirty(entry); 2906b4d1d99fSDavid Gibson } 2907b4d1d99fSDavid Gibson entry = pte_mkyoung(entry); 2908788c7df4SHugh Dickins if (huge_ptep_set_access_flags(vma, address, ptep, entry, 2909788c7df4SHugh Dickins flags & FAULT_FLAG_WRITE)) 29104b3073e1SRussell King update_mmu_cache(vma, address, ptep); 2911b4d1d99fSDavid Gibson 2912b4d1d99fSDavid Gibson out_page_table_lock: 29131e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 291457303d80SAndy Whitcroft 291557303d80SAndy Whitcroft if (pagecache_page) { 291657303d80SAndy Whitcroft unlock_page(pagecache_page); 291757303d80SAndy Whitcroft put_page(pagecache_page); 291857303d80SAndy Whitcroft } 29191f64d69cSDean Nelson if (page != pagecache_page) 292056c9cfb1SNaoya Horiguchi unlock_page(page); 292166aebce7SChris Metcalf put_page(page); 292257303d80SAndy Whitcroft 2923b4d1d99fSDavid Gibson out_mutex: 29243935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 29251e8f889bSDavid Gibson 29261e8f889bSDavid Gibson return ret; 292786e5216fSAdam Litke } 292886e5216fSAdam Litke 292928a35716SMichel Lespinasse long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 293063551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 293128a35716SMichel Lespinasse unsigned long *position, unsigned long *nr_pages, 293228a35716SMichel Lespinasse long i, unsigned int flags) 293363551ae0SDavid Gibson { 2934d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 2935d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 293628a35716SMichel Lespinasse unsigned long remainder = *nr_pages; 2937a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 293863551ae0SDavid Gibson 29391c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 294063551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 294163551ae0SDavid Gibson pte_t *pte; 29422a15efc9SHugh Dickins int absent; 294363551ae0SDavid Gibson struct page *page; 294463551ae0SDavid Gibson 29454c887265SAdam Litke /* 29464c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 29472a15efc9SHugh Dickins * each hugepage. We have to make sure we get the 29484c887265SAdam Litke * first, for the page indexing below to work. 29494c887265SAdam Litke */ 2950a5516438SAndi Kleen pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 29512a15efc9SHugh Dickins absent = !pte || huge_pte_none(huge_ptep_get(pte)); 295263551ae0SDavid Gibson 29532a15efc9SHugh Dickins /* 29542a15efc9SHugh Dickins * When coredumping, it suits get_dump_page if we just return 29553ae77f43SHugh Dickins * an error where there's an empty slot with no huge pagecache 29563ae77f43SHugh Dickins * to back it. This way, we avoid allocating a hugepage, and 29573ae77f43SHugh Dickins * the sparse dumpfile avoids allocating disk blocks, but its 29583ae77f43SHugh Dickins * huge holes still show up with zeroes where they need to be. 29592a15efc9SHugh Dickins */ 29603ae77f43SHugh Dickins if (absent && (flags & FOLL_DUMP) && 29613ae77f43SHugh Dickins !hugetlbfs_pagecache_present(h, vma, vaddr)) { 29622a15efc9SHugh Dickins remainder = 0; 29632a15efc9SHugh Dickins break; 29642a15efc9SHugh Dickins } 29652a15efc9SHugh Dickins 29669cc3a5bdSNaoya Horiguchi /* 29679cc3a5bdSNaoya Horiguchi * We need call hugetlb_fault for both hugepages under migration 29689cc3a5bdSNaoya Horiguchi * (in which case hugetlb_fault waits for the migration,) and 29699cc3a5bdSNaoya Horiguchi * hwpoisoned hugepages (in which case we need to prevent the 29709cc3a5bdSNaoya Horiguchi * caller from accessing to them.) In order to do this, we use 29719cc3a5bdSNaoya Horiguchi * here is_swap_pte instead of is_hugetlb_entry_migration and 29729cc3a5bdSNaoya Horiguchi * is_hugetlb_entry_hwpoisoned. This is because it simply covers 29739cc3a5bdSNaoya Horiguchi * both cases, and because we can't follow correct pages 29749cc3a5bdSNaoya Horiguchi * directly from any kind of swap entries. 29759cc3a5bdSNaoya Horiguchi */ 29769cc3a5bdSNaoya Horiguchi if (absent || is_swap_pte(huge_ptep_get(pte)) || 2977106c992aSGerald Schaefer ((flags & FOLL_WRITE) && 2978106c992aSGerald Schaefer !huge_pte_write(huge_ptep_get(pte)))) { 29794c887265SAdam Litke int ret; 29804c887265SAdam Litke 29814c887265SAdam Litke spin_unlock(&mm->page_table_lock); 29822a15efc9SHugh Dickins ret = hugetlb_fault(mm, vma, vaddr, 29832a15efc9SHugh Dickins (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); 29844c887265SAdam Litke spin_lock(&mm->page_table_lock); 2985a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 29864c887265SAdam Litke continue; 29874c887265SAdam Litke 29881c59827dSHugh Dickins remainder = 0; 29891c59827dSHugh Dickins break; 29901c59827dSHugh Dickins } 299163551ae0SDavid Gibson 2992a5516438SAndi Kleen pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 29937f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 2994d5d4b0aaSChen, Kenneth W same_page: 2995d6692183SChen, Kenneth W if (pages) { 299669d177c2SAndy Whitcroft pages[i] = mem_map_offset(page, pfn_offset); 29974b2e38adSKOSAKI Motohiro get_page(pages[i]); 2998d6692183SChen, Kenneth W } 299963551ae0SDavid Gibson 300063551ae0SDavid Gibson if (vmas) 300163551ae0SDavid Gibson vmas[i] = vma; 300263551ae0SDavid Gibson 300363551ae0SDavid Gibson vaddr += PAGE_SIZE; 3004d5d4b0aaSChen, Kenneth W ++pfn_offset; 300563551ae0SDavid Gibson --remainder; 300663551ae0SDavid Gibson ++i; 3007d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 3008a5516438SAndi Kleen pfn_offset < pages_per_huge_page(h)) { 3009d5d4b0aaSChen, Kenneth W /* 3010d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 3011d5d4b0aaSChen, Kenneth W * of this compound page. 3012d5d4b0aaSChen, Kenneth W */ 3013d5d4b0aaSChen, Kenneth W goto same_page; 3014d5d4b0aaSChen, Kenneth W } 301563551ae0SDavid Gibson } 30161c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 301728a35716SMichel Lespinasse *nr_pages = remainder; 301863551ae0SDavid Gibson *position = vaddr; 301963551ae0SDavid Gibson 30202a15efc9SHugh Dickins return i ? i : -EFAULT; 302163551ae0SDavid Gibson } 30228f860591SZhang, Yanmin 30237da4d641SPeter Zijlstra unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 30248f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 30258f860591SZhang, Yanmin { 30268f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 30278f860591SZhang, Yanmin unsigned long start = address; 30288f860591SZhang, Yanmin pte_t *ptep; 30298f860591SZhang, Yanmin pte_t pte; 3030a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 30317da4d641SPeter Zijlstra unsigned long pages = 0; 30328f860591SZhang, Yanmin 30338f860591SZhang, Yanmin BUG_ON(address >= end); 30348f860591SZhang, Yanmin flush_cache_range(vma, address, end); 30358f860591SZhang, Yanmin 30363d48ae45SPeter Zijlstra mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 30378f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 3038a5516438SAndi Kleen for (; address < end; address += huge_page_size(h)) { 30398f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 30408f860591SZhang, Yanmin if (!ptep) 30418f860591SZhang, Yanmin continue; 30427da4d641SPeter Zijlstra if (huge_pmd_unshare(mm, &address, ptep)) { 30437da4d641SPeter Zijlstra pages++; 304439dde65cSChen, Kenneth W continue; 30457da4d641SPeter Zijlstra } 30467f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 30478f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 3048106c992aSGerald Schaefer pte = pte_mkhuge(huge_pte_modify(pte, newprot)); 3049be7517d6STony Lu pte = arch_make_huge_pte(pte, vma, NULL, 0); 30508f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 30517da4d641SPeter Zijlstra pages++; 30528f860591SZhang, Yanmin } 30538f860591SZhang, Yanmin } 30548f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 3055d833352aSMel Gorman /* 3056d833352aSMel Gorman * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare 3057d833352aSMel Gorman * may have cleared our pud entry and done put_page on the page table: 3058d833352aSMel Gorman * once we release i_mmap_mutex, another task can do the final put_page 3059d833352aSMel Gorman * and that page table be reused and filled with junk. 3060d833352aSMel Gorman */ 30618f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 3062d833352aSMel Gorman mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 30637da4d641SPeter Zijlstra 30647da4d641SPeter Zijlstra return pages << h->order; 30658f860591SZhang, Yanmin } 30668f860591SZhang, Yanmin 3067a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 3068a1e78772SMel Gorman long from, long to, 30695a6fe125SMel Gorman struct vm_area_struct *vma, 3070ca16d140SKOSAKI Motohiro vm_flags_t vm_flags) 3071e4e574b7SAdam Litke { 307217c9d12eSMel Gorman long ret, chg; 3073a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 307490481622SDavid Gibson struct hugepage_subpool *spool = subpool_inode(inode); 3075e4e574b7SAdam Litke 3076a1e78772SMel Gorman /* 307717c9d12eSMel Gorman * Only apply hugepage reservation if asked. At fault time, an 307817c9d12eSMel Gorman * attempt will be made for VM_NORESERVE to allocate a page 307990481622SDavid Gibson * without using reserves 308017c9d12eSMel Gorman */ 3081ca16d140SKOSAKI Motohiro if (vm_flags & VM_NORESERVE) 308217c9d12eSMel Gorman return 0; 308317c9d12eSMel Gorman 308417c9d12eSMel Gorman /* 3085a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 3086a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 3087a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 3088a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 3089a1e78772SMel Gorman */ 3090f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 3091e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 30925a6fe125SMel Gorman else { 30935a6fe125SMel Gorman struct resv_map *resv_map = resv_map_alloc(); 30945a6fe125SMel Gorman if (!resv_map) 30955a6fe125SMel Gorman return -ENOMEM; 30965a6fe125SMel Gorman 309717c9d12eSMel Gorman chg = to - from; 309817c9d12eSMel Gorman 30995a6fe125SMel Gorman set_vma_resv_map(vma, resv_map); 31005a6fe125SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 31015a6fe125SMel Gorman } 31025a6fe125SMel Gorman 3103c50ac050SDave Hansen if (chg < 0) { 3104c50ac050SDave Hansen ret = chg; 3105c50ac050SDave Hansen goto out_err; 3106c50ac050SDave Hansen } 310717c9d12eSMel Gorman 310890481622SDavid Gibson /* There must be enough pages in the subpool for the mapping */ 3109c50ac050SDave Hansen if (hugepage_subpool_get_pages(spool, chg)) { 3110c50ac050SDave Hansen ret = -ENOSPC; 3111c50ac050SDave Hansen goto out_err; 3112c50ac050SDave Hansen } 311317c9d12eSMel Gorman 311417c9d12eSMel Gorman /* 311517c9d12eSMel Gorman * Check enough hugepages are available for the reservation. 311690481622SDavid Gibson * Hand the pages back to the subpool if there are not 311717c9d12eSMel Gorman */ 311817c9d12eSMel Gorman ret = hugetlb_acct_memory(h, chg); 311917c9d12eSMel Gorman if (ret < 0) { 312090481622SDavid Gibson hugepage_subpool_put_pages(spool, chg); 3121c50ac050SDave Hansen goto out_err; 312217c9d12eSMel Gorman } 312317c9d12eSMel Gorman 312417c9d12eSMel Gorman /* 312517c9d12eSMel Gorman * Account for the reservations made. Shared mappings record regions 312617c9d12eSMel Gorman * that have reservations as they are shared by multiple VMAs. 312717c9d12eSMel Gorman * When the last VMA disappears, the region map says how much 312817c9d12eSMel Gorman * the reservation was and the page cache tells how much of 312917c9d12eSMel Gorman * the reservation was consumed. Private mappings are per-VMA and 313017c9d12eSMel Gorman * only the consumed reservations are tracked. When the VMA 313117c9d12eSMel Gorman * disappears, the original reservation is the VMA size and the 313217c9d12eSMel Gorman * consumed reservations are stored in the map. Hence, nothing 313317c9d12eSMel Gorman * else has to be done for private mappings here 313417c9d12eSMel Gorman */ 3135f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 313617c9d12eSMel Gorman region_add(&inode->i_mapping->private_list, from, to); 3137a43a8c39SChen, Kenneth W return 0; 3138c50ac050SDave Hansen out_err: 31394523e145SDave Hansen if (vma) 3140c50ac050SDave Hansen resv_map_put(vma); 3141c50ac050SDave Hansen return ret; 3142a43a8c39SChen, Kenneth W } 3143a43a8c39SChen, Kenneth W 3144a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 3145a43a8c39SChen, Kenneth W { 3146a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 3147a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 314890481622SDavid Gibson struct hugepage_subpool *spool = subpool_inode(inode); 314945c682a6SKen Chen 315045c682a6SKen Chen spin_lock(&inode->i_lock); 3151e4c6f8beSEric Sandeen inode->i_blocks -= (blocks_per_huge_page(h) * freed); 315245c682a6SKen Chen spin_unlock(&inode->i_lock); 315345c682a6SKen Chen 315490481622SDavid Gibson hugepage_subpool_put_pages(spool, (chg - freed)); 3155a5516438SAndi Kleen hugetlb_acct_memory(h, -(chg - freed)); 3156a43a8c39SChen, Kenneth W } 315793f70f90SNaoya Horiguchi 31583212b535SSteve Capper #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 31593212b535SSteve Capper static unsigned long page_table_shareable(struct vm_area_struct *svma, 31603212b535SSteve Capper struct vm_area_struct *vma, 31613212b535SSteve Capper unsigned long addr, pgoff_t idx) 31623212b535SSteve Capper { 31633212b535SSteve Capper unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 31643212b535SSteve Capper svma->vm_start; 31653212b535SSteve Capper unsigned long sbase = saddr & PUD_MASK; 31663212b535SSteve Capper unsigned long s_end = sbase + PUD_SIZE; 31673212b535SSteve Capper 31683212b535SSteve Capper /* Allow segments to share if only one is marked locked */ 31693212b535SSteve Capper unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; 31703212b535SSteve Capper unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED; 31713212b535SSteve Capper 31723212b535SSteve Capper /* 31733212b535SSteve Capper * match the virtual addresses, permission and the alignment of the 31743212b535SSteve Capper * page table page. 31753212b535SSteve Capper */ 31763212b535SSteve Capper if (pmd_index(addr) != pmd_index(saddr) || 31773212b535SSteve Capper vm_flags != svm_flags || 31783212b535SSteve Capper sbase < svma->vm_start || svma->vm_end < s_end) 31793212b535SSteve Capper return 0; 31803212b535SSteve Capper 31813212b535SSteve Capper return saddr; 31823212b535SSteve Capper } 31833212b535SSteve Capper 31843212b535SSteve Capper static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) 31853212b535SSteve Capper { 31863212b535SSteve Capper unsigned long base = addr & PUD_MASK; 31873212b535SSteve Capper unsigned long end = base + PUD_SIZE; 31883212b535SSteve Capper 31893212b535SSteve Capper /* 31903212b535SSteve Capper * check on proper vm_flags and page table alignment 31913212b535SSteve Capper */ 31923212b535SSteve Capper if (vma->vm_flags & VM_MAYSHARE && 31933212b535SSteve Capper vma->vm_start <= base && end <= vma->vm_end) 31943212b535SSteve Capper return 1; 31953212b535SSteve Capper return 0; 31963212b535SSteve Capper } 31973212b535SSteve Capper 31983212b535SSteve Capper /* 31993212b535SSteve Capper * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 32003212b535SSteve Capper * and returns the corresponding pte. While this is not necessary for the 32013212b535SSteve Capper * !shared pmd case because we can allocate the pmd later as well, it makes the 32023212b535SSteve Capper * code much cleaner. pmd allocation is essential for the shared case because 32033212b535SSteve Capper * pud has to be populated inside the same i_mmap_mutex section - otherwise 32043212b535SSteve Capper * racing tasks could either miss the sharing (see huge_pte_offset) or select a 32053212b535SSteve Capper * bad pmd for sharing. 32063212b535SSteve Capper */ 32073212b535SSteve Capper pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 32083212b535SSteve Capper { 32093212b535SSteve Capper struct vm_area_struct *vma = find_vma(mm, addr); 32103212b535SSteve Capper struct address_space *mapping = vma->vm_file->f_mapping; 32113212b535SSteve Capper pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 32123212b535SSteve Capper vma->vm_pgoff; 32133212b535SSteve Capper struct vm_area_struct *svma; 32143212b535SSteve Capper unsigned long saddr; 32153212b535SSteve Capper pte_t *spte = NULL; 32163212b535SSteve Capper pte_t *pte; 32173212b535SSteve Capper 32183212b535SSteve Capper if (!vma_shareable(vma, addr)) 32193212b535SSteve Capper return (pte_t *)pmd_alloc(mm, pud, addr); 32203212b535SSteve Capper 32213212b535SSteve Capper mutex_lock(&mapping->i_mmap_mutex); 32223212b535SSteve Capper vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 32233212b535SSteve Capper if (svma == vma) 32243212b535SSteve Capper continue; 32253212b535SSteve Capper 32263212b535SSteve Capper saddr = page_table_shareable(svma, vma, addr, idx); 32273212b535SSteve Capper if (saddr) { 32283212b535SSteve Capper spte = huge_pte_offset(svma->vm_mm, saddr); 32293212b535SSteve Capper if (spte) { 32303212b535SSteve Capper get_page(virt_to_page(spte)); 32313212b535SSteve Capper break; 32323212b535SSteve Capper } 32333212b535SSteve Capper } 32343212b535SSteve Capper } 32353212b535SSteve Capper 32363212b535SSteve Capper if (!spte) 32373212b535SSteve Capper goto out; 32383212b535SSteve Capper 32393212b535SSteve Capper spin_lock(&mm->page_table_lock); 32403212b535SSteve Capper if (pud_none(*pud)) 32413212b535SSteve Capper pud_populate(mm, pud, 32423212b535SSteve Capper (pmd_t *)((unsigned long)spte & PAGE_MASK)); 32433212b535SSteve Capper else 32443212b535SSteve Capper put_page(virt_to_page(spte)); 32453212b535SSteve Capper spin_unlock(&mm->page_table_lock); 32463212b535SSteve Capper out: 32473212b535SSteve Capper pte = (pte_t *)pmd_alloc(mm, pud, addr); 32483212b535SSteve Capper mutex_unlock(&mapping->i_mmap_mutex); 32493212b535SSteve Capper return pte; 32503212b535SSteve Capper } 32513212b535SSteve Capper 32523212b535SSteve Capper /* 32533212b535SSteve Capper * unmap huge page backed by shared pte. 32543212b535SSteve Capper * 32553212b535SSteve Capper * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 32563212b535SSteve Capper * indicated by page_count > 1, unmap is achieved by clearing pud and 32573212b535SSteve Capper * decrementing the ref count. If count == 1, the pte page is not shared. 32583212b535SSteve Capper * 32593212b535SSteve Capper * called with vma->vm_mm->page_table_lock held. 32603212b535SSteve Capper * 32613212b535SSteve Capper * returns: 1 successfully unmapped a shared pte page 32623212b535SSteve Capper * 0 the underlying pte page is not shared, or it is the last user 32633212b535SSteve Capper */ 32643212b535SSteve Capper int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 32653212b535SSteve Capper { 32663212b535SSteve Capper pgd_t *pgd = pgd_offset(mm, *addr); 32673212b535SSteve Capper pud_t *pud = pud_offset(pgd, *addr); 32683212b535SSteve Capper 32693212b535SSteve Capper BUG_ON(page_count(virt_to_page(ptep)) == 0); 32703212b535SSteve Capper if (page_count(virt_to_page(ptep)) == 1) 32713212b535SSteve Capper return 0; 32723212b535SSteve Capper 32733212b535SSteve Capper pud_clear(pud); 32743212b535SSteve Capper put_page(virt_to_page(ptep)); 32753212b535SSteve Capper *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 32763212b535SSteve Capper return 1; 32773212b535SSteve Capper } 32789e5fc74cSSteve Capper #define want_pmd_share() (1) 32799e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 32809e5fc74cSSteve Capper pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 32819e5fc74cSSteve Capper { 32829e5fc74cSSteve Capper return NULL; 32839e5fc74cSSteve Capper } 32849e5fc74cSSteve Capper #define want_pmd_share() (0) 32853212b535SSteve Capper #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 32863212b535SSteve Capper 32879e5fc74cSSteve Capper #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 32889e5fc74cSSteve Capper pte_t *huge_pte_alloc(struct mm_struct *mm, 32899e5fc74cSSteve Capper unsigned long addr, unsigned long sz) 32909e5fc74cSSteve Capper { 32919e5fc74cSSteve Capper pgd_t *pgd; 32929e5fc74cSSteve Capper pud_t *pud; 32939e5fc74cSSteve Capper pte_t *pte = NULL; 32949e5fc74cSSteve Capper 32959e5fc74cSSteve Capper pgd = pgd_offset(mm, addr); 32969e5fc74cSSteve Capper pud = pud_alloc(mm, pgd, addr); 32979e5fc74cSSteve Capper if (pud) { 32989e5fc74cSSteve Capper if (sz == PUD_SIZE) { 32999e5fc74cSSteve Capper pte = (pte_t *)pud; 33009e5fc74cSSteve Capper } else { 33019e5fc74cSSteve Capper BUG_ON(sz != PMD_SIZE); 33029e5fc74cSSteve Capper if (want_pmd_share() && pud_none(*pud)) 33039e5fc74cSSteve Capper pte = huge_pmd_share(mm, addr, pud); 33049e5fc74cSSteve Capper else 33059e5fc74cSSteve Capper pte = (pte_t *)pmd_alloc(mm, pud, addr); 33069e5fc74cSSteve Capper } 33079e5fc74cSSteve Capper } 33089e5fc74cSSteve Capper BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 33099e5fc74cSSteve Capper 33109e5fc74cSSteve Capper return pte; 33119e5fc74cSSteve Capper } 33129e5fc74cSSteve Capper 33139e5fc74cSSteve Capper pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 33149e5fc74cSSteve Capper { 33159e5fc74cSSteve Capper pgd_t *pgd; 33169e5fc74cSSteve Capper pud_t *pud; 33179e5fc74cSSteve Capper pmd_t *pmd = NULL; 33189e5fc74cSSteve Capper 33199e5fc74cSSteve Capper pgd = pgd_offset(mm, addr); 33209e5fc74cSSteve Capper if (pgd_present(*pgd)) { 33219e5fc74cSSteve Capper pud = pud_offset(pgd, addr); 33229e5fc74cSSteve Capper if (pud_present(*pud)) { 33239e5fc74cSSteve Capper if (pud_huge(*pud)) 33249e5fc74cSSteve Capper return (pte_t *)pud; 33259e5fc74cSSteve Capper pmd = pmd_offset(pud, addr); 33269e5fc74cSSteve Capper } 33279e5fc74cSSteve Capper } 33289e5fc74cSSteve Capper return (pte_t *) pmd; 33299e5fc74cSSteve Capper } 33309e5fc74cSSteve Capper 33319e5fc74cSSteve Capper struct page * 33329e5fc74cSSteve Capper follow_huge_pmd(struct mm_struct *mm, unsigned long address, 33339e5fc74cSSteve Capper pmd_t *pmd, int write) 33349e5fc74cSSteve Capper { 33359e5fc74cSSteve Capper struct page *page; 33369e5fc74cSSteve Capper 33379e5fc74cSSteve Capper page = pte_page(*(pte_t *)pmd); 33389e5fc74cSSteve Capper if (page) 33399e5fc74cSSteve Capper page += ((address & ~PMD_MASK) >> PAGE_SHIFT); 33409e5fc74cSSteve Capper return page; 33419e5fc74cSSteve Capper } 33429e5fc74cSSteve Capper 33439e5fc74cSSteve Capper struct page * 33449e5fc74cSSteve Capper follow_huge_pud(struct mm_struct *mm, unsigned long address, 33459e5fc74cSSteve Capper pud_t *pud, int write) 33469e5fc74cSSteve Capper { 33479e5fc74cSSteve Capper struct page *page; 33489e5fc74cSSteve Capper 33499e5fc74cSSteve Capper page = pte_page(*(pte_t *)pud); 33509e5fc74cSSteve Capper if (page) 33519e5fc74cSSteve Capper page += ((address & ~PUD_MASK) >> PAGE_SHIFT); 33529e5fc74cSSteve Capper return page; 33539e5fc74cSSteve Capper } 33549e5fc74cSSteve Capper 33559e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 33569e5fc74cSSteve Capper 33579e5fc74cSSteve Capper /* Can be overriden by architectures */ 33589e5fc74cSSteve Capper __attribute__((weak)) struct page * 33599e5fc74cSSteve Capper follow_huge_pud(struct mm_struct *mm, unsigned long address, 33609e5fc74cSSteve Capper pud_t *pud, int write) 33619e5fc74cSSteve Capper { 33629e5fc74cSSteve Capper BUG(); 33639e5fc74cSSteve Capper return NULL; 33649e5fc74cSSteve Capper } 33659e5fc74cSSteve Capper 33669e5fc74cSSteve Capper #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 33679e5fc74cSSteve Capper 3368d5bd9106SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 3369d5bd9106SAndi Kleen 33706de2b1aaSNaoya Horiguchi /* Should be called in hugetlb_lock */ 33716de2b1aaSNaoya Horiguchi static int is_hugepage_on_freelist(struct page *hpage) 33726de2b1aaSNaoya Horiguchi { 33736de2b1aaSNaoya Horiguchi struct page *page; 33746de2b1aaSNaoya Horiguchi struct page *tmp; 33756de2b1aaSNaoya Horiguchi struct hstate *h = page_hstate(hpage); 33766de2b1aaSNaoya Horiguchi int nid = page_to_nid(hpage); 33776de2b1aaSNaoya Horiguchi 33786de2b1aaSNaoya Horiguchi list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru) 33796de2b1aaSNaoya Horiguchi if (page == hpage) 33806de2b1aaSNaoya Horiguchi return 1; 33816de2b1aaSNaoya Horiguchi return 0; 33826de2b1aaSNaoya Horiguchi } 33836de2b1aaSNaoya Horiguchi 338493f70f90SNaoya Horiguchi /* 338593f70f90SNaoya Horiguchi * This function is called from memory failure code. 338693f70f90SNaoya Horiguchi * Assume the caller holds page lock of the head page. 338793f70f90SNaoya Horiguchi */ 33886de2b1aaSNaoya Horiguchi int dequeue_hwpoisoned_huge_page(struct page *hpage) 338993f70f90SNaoya Horiguchi { 339093f70f90SNaoya Horiguchi struct hstate *h = page_hstate(hpage); 339193f70f90SNaoya Horiguchi int nid = page_to_nid(hpage); 33926de2b1aaSNaoya Horiguchi int ret = -EBUSY; 339393f70f90SNaoya Horiguchi 339493f70f90SNaoya Horiguchi spin_lock(&hugetlb_lock); 33956de2b1aaSNaoya Horiguchi if (is_hugepage_on_freelist(hpage)) { 339656f2fb14SNaoya Horiguchi /* 339756f2fb14SNaoya Horiguchi * Hwpoisoned hugepage isn't linked to activelist or freelist, 339856f2fb14SNaoya Horiguchi * but dangling hpage->lru can trigger list-debug warnings 339956f2fb14SNaoya Horiguchi * (this happens when we call unpoison_memory() on it), 340056f2fb14SNaoya Horiguchi * so let it point to itself with list_del_init(). 340156f2fb14SNaoya Horiguchi */ 340256f2fb14SNaoya Horiguchi list_del_init(&hpage->lru); 34038c6c2ecbSNaoya Horiguchi set_page_refcounted(hpage); 340493f70f90SNaoya Horiguchi h->free_huge_pages--; 340593f70f90SNaoya Horiguchi h->free_huge_pages_node[nid]--; 34066de2b1aaSNaoya Horiguchi ret = 0; 340793f70f90SNaoya Horiguchi } 34086de2b1aaSNaoya Horiguchi spin_unlock(&hugetlb_lock); 34096de2b1aaSNaoya Horiguchi return ret; 34106de2b1aaSNaoya Horiguchi } 34116de2b1aaSNaoya Horiguchi #endif 3412