11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/list.h> 61da177e4SLinus Torvalds #include <linux/init.h> 71da177e4SLinus Torvalds #include <linux/module.h> 81da177e4SLinus Torvalds #include <linux/mm.h> 9e1759c21SAlexey Dobriyan #include <linux/seq_file.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 12cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 131da177e4SLinus Torvalds #include <linux/nodemask.h> 1463551ae0SDavid Gibson #include <linux/pagemap.h> 155da7ca86SChristoph Lameter #include <linux/mempolicy.h> 16aea47ff3SChristoph Lameter #include <linux/cpuset.h> 173935baa9SDavid Gibson #include <linux/mutex.h> 18aa888a74SAndi Kleen #include <linux/bootmem.h> 19a3437870SNishanth Aravamudan #include <linux/sysfs.h> 205a0e3ad6STejun Heo #include <linux/slab.h> 210fe6e20bSNaoya Horiguchi #include <linux/rmap.h> 22fd6a03edSNaoya Horiguchi #include <linux/swap.h> 23fd6a03edSNaoya Horiguchi #include <linux/swapops.h> 24d6606683SLinus Torvalds 2563551ae0SDavid Gibson #include <asm/page.h> 2663551ae0SDavid Gibson #include <asm/pgtable.h> 2732f84528SChris Forbes #include <linux/io.h> 2863551ae0SDavid Gibson 2963551ae0SDavid Gibson #include <linux/hugetlb.h> 309a305230SLee Schermerhorn #include <linux/node.h> 317835e98bSNick Piggin #include "internal.h" 321da177e4SLinus Torvalds 331da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 34396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 35396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 36a5516438SAndi Kleen 37e5ff2159SAndi Kleen static int max_hstate; 38e5ff2159SAndi Kleen unsigned int default_hstate_idx; 39e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE]; 40e5ff2159SAndi Kleen 4153ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages); 4253ba51d2SJon Tollefson 43e5ff2159SAndi Kleen /* for command line parsing */ 44e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate; 45e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages; 46e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size; 47e5ff2159SAndi Kleen 48e5ff2159SAndi Kleen #define for_each_hstate(h) \ 49e5ff2159SAndi Kleen for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++) 50396faf03SMel Gorman 513935baa9SDavid Gibson /* 523935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 533935baa9SDavid Gibson */ 543935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 550bd0f9fbSEric Paris 5690481622SDavid Gibson static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) 5790481622SDavid Gibson { 5890481622SDavid Gibson bool free = (spool->count == 0) && (spool->used_hpages == 0); 5990481622SDavid Gibson 6090481622SDavid Gibson spin_unlock(&spool->lock); 6190481622SDavid Gibson 6290481622SDavid Gibson /* If no pages are used, and no other handles to the subpool 6390481622SDavid Gibson * remain, free the subpool the subpool remain */ 6490481622SDavid Gibson if (free) 6590481622SDavid Gibson kfree(spool); 6690481622SDavid Gibson } 6790481622SDavid Gibson 6890481622SDavid Gibson struct hugepage_subpool *hugepage_new_subpool(long nr_blocks) 6990481622SDavid Gibson { 7090481622SDavid Gibson struct hugepage_subpool *spool; 7190481622SDavid Gibson 7290481622SDavid Gibson spool = kmalloc(sizeof(*spool), GFP_KERNEL); 7390481622SDavid Gibson if (!spool) 7490481622SDavid Gibson return NULL; 7590481622SDavid Gibson 7690481622SDavid Gibson spin_lock_init(&spool->lock); 7790481622SDavid Gibson spool->count = 1; 7890481622SDavid Gibson spool->max_hpages = nr_blocks; 7990481622SDavid Gibson spool->used_hpages = 0; 8090481622SDavid Gibson 8190481622SDavid Gibson return spool; 8290481622SDavid Gibson } 8390481622SDavid Gibson 8490481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool) 8590481622SDavid Gibson { 8690481622SDavid Gibson spin_lock(&spool->lock); 8790481622SDavid Gibson BUG_ON(!spool->count); 8890481622SDavid Gibson spool->count--; 8990481622SDavid Gibson unlock_or_release_subpool(spool); 9090481622SDavid Gibson } 9190481622SDavid Gibson 9290481622SDavid Gibson static int hugepage_subpool_get_pages(struct hugepage_subpool *spool, 9390481622SDavid Gibson long delta) 9490481622SDavid Gibson { 9590481622SDavid Gibson int ret = 0; 9690481622SDavid Gibson 9790481622SDavid Gibson if (!spool) 9890481622SDavid Gibson return 0; 9990481622SDavid Gibson 10090481622SDavid Gibson spin_lock(&spool->lock); 10190481622SDavid Gibson if ((spool->used_hpages + delta) <= spool->max_hpages) { 10290481622SDavid Gibson spool->used_hpages += delta; 10390481622SDavid Gibson } else { 10490481622SDavid Gibson ret = -ENOMEM; 10590481622SDavid Gibson } 10690481622SDavid Gibson spin_unlock(&spool->lock); 10790481622SDavid Gibson 10890481622SDavid Gibson return ret; 10990481622SDavid Gibson } 11090481622SDavid Gibson 11190481622SDavid Gibson static void hugepage_subpool_put_pages(struct hugepage_subpool *spool, 11290481622SDavid Gibson long delta) 11390481622SDavid Gibson { 11490481622SDavid Gibson if (!spool) 11590481622SDavid Gibson return; 11690481622SDavid Gibson 11790481622SDavid Gibson spin_lock(&spool->lock); 11890481622SDavid Gibson spool->used_hpages -= delta; 11990481622SDavid Gibson /* If hugetlbfs_put_super couldn't free spool due to 12090481622SDavid Gibson * an outstanding quota reference, free it now. */ 12190481622SDavid Gibson unlock_or_release_subpool(spool); 12290481622SDavid Gibson } 12390481622SDavid Gibson 12490481622SDavid Gibson static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 12590481622SDavid Gibson { 12690481622SDavid Gibson return HUGETLBFS_SB(inode->i_sb)->spool; 12790481622SDavid Gibson } 12890481622SDavid Gibson 12990481622SDavid Gibson static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 13090481622SDavid Gibson { 13190481622SDavid Gibson return subpool_inode(vma->vm_file->f_dentry->d_inode); 13290481622SDavid Gibson } 13390481622SDavid Gibson 134e7c4b0bfSAndy Whitcroft /* 13596822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 13696822904SAndy Whitcroft * across the pages in a mapping. 13784afd99bSAndy Whitcroft * 13884afd99bSAndy Whitcroft * The region data structures are protected by a combination of the mmap_sem 13984afd99bSAndy Whitcroft * and the hugetlb_instantion_mutex. To access or modify a region the caller 14084afd99bSAndy Whitcroft * must either hold the mmap_sem for write, or the mmap_sem for read and 14184afd99bSAndy Whitcroft * the hugetlb_instantiation mutex: 14284afd99bSAndy Whitcroft * 14384afd99bSAndy Whitcroft * down_write(&mm->mmap_sem); 14484afd99bSAndy Whitcroft * or 14584afd99bSAndy Whitcroft * down_read(&mm->mmap_sem); 14684afd99bSAndy Whitcroft * mutex_lock(&hugetlb_instantiation_mutex); 14796822904SAndy Whitcroft */ 14896822904SAndy Whitcroft struct file_region { 14996822904SAndy Whitcroft struct list_head link; 15096822904SAndy Whitcroft long from; 15196822904SAndy Whitcroft long to; 15296822904SAndy Whitcroft }; 15396822904SAndy Whitcroft 15496822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t) 15596822904SAndy Whitcroft { 15696822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 15796822904SAndy Whitcroft 15896822904SAndy Whitcroft /* Locate the region we are either in or before. */ 15996822904SAndy Whitcroft list_for_each_entry(rg, head, link) 16096822904SAndy Whitcroft if (f <= rg->to) 16196822904SAndy Whitcroft break; 16296822904SAndy Whitcroft 16396822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 16496822904SAndy Whitcroft if (f > rg->from) 16596822904SAndy Whitcroft f = rg->from; 16696822904SAndy Whitcroft 16796822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 16896822904SAndy Whitcroft nrg = rg; 16996822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 17096822904SAndy Whitcroft if (&rg->link == head) 17196822904SAndy Whitcroft break; 17296822904SAndy Whitcroft if (rg->from > t) 17396822904SAndy Whitcroft break; 17496822904SAndy Whitcroft 17596822904SAndy Whitcroft /* If this area reaches higher then extend our area to 17696822904SAndy Whitcroft * include it completely. If this is not the first area 17796822904SAndy Whitcroft * which we intend to reuse, free it. */ 17896822904SAndy Whitcroft if (rg->to > t) 17996822904SAndy Whitcroft t = rg->to; 18096822904SAndy Whitcroft if (rg != nrg) { 18196822904SAndy Whitcroft list_del(&rg->link); 18296822904SAndy Whitcroft kfree(rg); 18396822904SAndy Whitcroft } 18496822904SAndy Whitcroft } 18596822904SAndy Whitcroft nrg->from = f; 18696822904SAndy Whitcroft nrg->to = t; 18796822904SAndy Whitcroft return 0; 18896822904SAndy Whitcroft } 18996822904SAndy Whitcroft 19096822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t) 19196822904SAndy Whitcroft { 19296822904SAndy Whitcroft struct file_region *rg, *nrg; 19396822904SAndy Whitcroft long chg = 0; 19496822904SAndy Whitcroft 19596822904SAndy Whitcroft /* Locate the region we are before or in. */ 19696822904SAndy Whitcroft list_for_each_entry(rg, head, link) 19796822904SAndy Whitcroft if (f <= rg->to) 19896822904SAndy Whitcroft break; 19996822904SAndy Whitcroft 20096822904SAndy Whitcroft /* If we are below the current region then a new region is required. 20196822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 20296822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 20396822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 20496822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 20596822904SAndy Whitcroft if (!nrg) 20696822904SAndy Whitcroft return -ENOMEM; 20796822904SAndy Whitcroft nrg->from = f; 20896822904SAndy Whitcroft nrg->to = f; 20996822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 21096822904SAndy Whitcroft list_add(&nrg->link, rg->link.prev); 21196822904SAndy Whitcroft 21296822904SAndy Whitcroft return t - f; 21396822904SAndy Whitcroft } 21496822904SAndy Whitcroft 21596822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 21696822904SAndy Whitcroft if (f > rg->from) 21796822904SAndy Whitcroft f = rg->from; 21896822904SAndy Whitcroft chg = t - f; 21996822904SAndy Whitcroft 22096822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 22196822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 22296822904SAndy Whitcroft if (&rg->link == head) 22396822904SAndy Whitcroft break; 22496822904SAndy Whitcroft if (rg->from > t) 22596822904SAndy Whitcroft return chg; 22696822904SAndy Whitcroft 22725985edcSLucas De Marchi /* We overlap with this area, if it extends further than 22896822904SAndy Whitcroft * us then we must extend ourselves. Account for its 22996822904SAndy Whitcroft * existing reservation. */ 23096822904SAndy Whitcroft if (rg->to > t) { 23196822904SAndy Whitcroft chg += rg->to - t; 23296822904SAndy Whitcroft t = rg->to; 23396822904SAndy Whitcroft } 23496822904SAndy Whitcroft chg -= rg->to - rg->from; 23596822904SAndy Whitcroft } 23696822904SAndy Whitcroft return chg; 23796822904SAndy Whitcroft } 23896822904SAndy Whitcroft 23996822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end) 24096822904SAndy Whitcroft { 24196822904SAndy Whitcroft struct file_region *rg, *trg; 24296822904SAndy Whitcroft long chg = 0; 24396822904SAndy Whitcroft 24496822904SAndy Whitcroft /* Locate the region we are either in or before. */ 24596822904SAndy Whitcroft list_for_each_entry(rg, head, link) 24696822904SAndy Whitcroft if (end <= rg->to) 24796822904SAndy Whitcroft break; 24896822904SAndy Whitcroft if (&rg->link == head) 24996822904SAndy Whitcroft return 0; 25096822904SAndy Whitcroft 25196822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 25296822904SAndy Whitcroft if (end > rg->from) { 25396822904SAndy Whitcroft chg = rg->to - end; 25496822904SAndy Whitcroft rg->to = end; 25596822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 25696822904SAndy Whitcroft } 25796822904SAndy Whitcroft 25896822904SAndy Whitcroft /* Drop any remaining regions. */ 25996822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 26096822904SAndy Whitcroft if (&rg->link == head) 26196822904SAndy Whitcroft break; 26296822904SAndy Whitcroft chg += rg->to - rg->from; 26396822904SAndy Whitcroft list_del(&rg->link); 26496822904SAndy Whitcroft kfree(rg); 26596822904SAndy Whitcroft } 26696822904SAndy Whitcroft return chg; 26796822904SAndy Whitcroft } 26896822904SAndy Whitcroft 26984afd99bSAndy Whitcroft static long region_count(struct list_head *head, long f, long t) 27084afd99bSAndy Whitcroft { 27184afd99bSAndy Whitcroft struct file_region *rg; 27284afd99bSAndy Whitcroft long chg = 0; 27384afd99bSAndy Whitcroft 27484afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */ 27584afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) { 276f2135a4aSWang Sheng-Hui long seg_from; 277f2135a4aSWang Sheng-Hui long seg_to; 27884afd99bSAndy Whitcroft 27984afd99bSAndy Whitcroft if (rg->to <= f) 28084afd99bSAndy Whitcroft continue; 28184afd99bSAndy Whitcroft if (rg->from >= t) 28284afd99bSAndy Whitcroft break; 28384afd99bSAndy Whitcroft 28484afd99bSAndy Whitcroft seg_from = max(rg->from, f); 28584afd99bSAndy Whitcroft seg_to = min(rg->to, t); 28684afd99bSAndy Whitcroft 28784afd99bSAndy Whitcroft chg += seg_to - seg_from; 28884afd99bSAndy Whitcroft } 28984afd99bSAndy Whitcroft 29084afd99bSAndy Whitcroft return chg; 29184afd99bSAndy Whitcroft } 29284afd99bSAndy Whitcroft 29396822904SAndy Whitcroft /* 294e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 295e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 296e7c4b0bfSAndy Whitcroft */ 297a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h, 298a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 299e7c4b0bfSAndy Whitcroft { 300a5516438SAndi Kleen return ((address - vma->vm_start) >> huge_page_shift(h)) + 301a5516438SAndi Kleen (vma->vm_pgoff >> huge_page_order(h)); 302e7c4b0bfSAndy Whitcroft } 303e7c4b0bfSAndy Whitcroft 3040fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 3050fe6e20bSNaoya Horiguchi unsigned long address) 3060fe6e20bSNaoya Horiguchi { 3070fe6e20bSNaoya Horiguchi return vma_hugecache_offset(hstate_vma(vma), vma, address); 3080fe6e20bSNaoya Horiguchi } 3090fe6e20bSNaoya Horiguchi 31084afd99bSAndy Whitcroft /* 31108fba699SMel Gorman * Return the size of the pages allocated when backing a VMA. In the majority 31208fba699SMel Gorman * cases this will be same size as used by the page table entries. 31308fba699SMel Gorman */ 31408fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 31508fba699SMel Gorman { 31608fba699SMel Gorman struct hstate *hstate; 31708fba699SMel Gorman 31808fba699SMel Gorman if (!is_vm_hugetlb_page(vma)) 31908fba699SMel Gorman return PAGE_SIZE; 32008fba699SMel Gorman 32108fba699SMel Gorman hstate = hstate_vma(vma); 32208fba699SMel Gorman 32308fba699SMel Gorman return 1UL << (hstate->order + PAGE_SHIFT); 32408fba699SMel Gorman } 325f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 32608fba699SMel Gorman 32708fba699SMel Gorman /* 3283340289dSMel Gorman * Return the page size being used by the MMU to back a VMA. In the majority 3293340289dSMel Gorman * of cases, the page size used by the kernel matches the MMU size. On 3303340289dSMel Gorman * architectures where it differs, an architecture-specific version of this 3313340289dSMel Gorman * function is required. 3323340289dSMel Gorman */ 3333340289dSMel Gorman #ifndef vma_mmu_pagesize 3343340289dSMel Gorman unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 3353340289dSMel Gorman { 3363340289dSMel Gorman return vma_kernel_pagesize(vma); 3373340289dSMel Gorman } 3383340289dSMel Gorman #endif 3393340289dSMel Gorman 3403340289dSMel Gorman /* 34184afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom 34284afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to 34384afd99bSAndy Whitcroft * alignment. 34484afd99bSAndy Whitcroft */ 34584afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0) 34684afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1) 34704f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 34884afd99bSAndy Whitcroft 349a1e78772SMel Gorman /* 350a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 351a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 352a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 353a1e78772SMel Gorman * 354a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 355a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 356a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 357a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 35884afd99bSAndy Whitcroft * 35984afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different 36084afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated 36184afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file 36284afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even 36384afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map 36484afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which 36584afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed 36684afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated. 367a1e78772SMel Gorman */ 368e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 369e7c4b0bfSAndy Whitcroft { 370e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 371e7c4b0bfSAndy Whitcroft } 372e7c4b0bfSAndy Whitcroft 373e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 374e7c4b0bfSAndy Whitcroft unsigned long value) 375e7c4b0bfSAndy Whitcroft { 376e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 377e7c4b0bfSAndy Whitcroft } 378e7c4b0bfSAndy Whitcroft 37984afd99bSAndy Whitcroft struct resv_map { 38084afd99bSAndy Whitcroft struct kref refs; 38184afd99bSAndy Whitcroft struct list_head regions; 38284afd99bSAndy Whitcroft }; 38384afd99bSAndy Whitcroft 3842a4b3dedSHarvey Harrison static struct resv_map *resv_map_alloc(void) 38584afd99bSAndy Whitcroft { 38684afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 38784afd99bSAndy Whitcroft if (!resv_map) 38884afd99bSAndy Whitcroft return NULL; 38984afd99bSAndy Whitcroft 39084afd99bSAndy Whitcroft kref_init(&resv_map->refs); 39184afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions); 39284afd99bSAndy Whitcroft 39384afd99bSAndy Whitcroft return resv_map; 39484afd99bSAndy Whitcroft } 39584afd99bSAndy Whitcroft 3962a4b3dedSHarvey Harrison static void resv_map_release(struct kref *ref) 39784afd99bSAndy Whitcroft { 39884afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 39984afd99bSAndy Whitcroft 40084afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */ 40184afd99bSAndy Whitcroft region_truncate(&resv_map->regions, 0); 40284afd99bSAndy Whitcroft kfree(resv_map); 40384afd99bSAndy Whitcroft } 40484afd99bSAndy Whitcroft 40584afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 406a1e78772SMel Gorman { 407a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 408f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 40984afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) & 41084afd99bSAndy Whitcroft ~HPAGE_RESV_MASK); 4112a4b3dedSHarvey Harrison return NULL; 412a1e78772SMel Gorman } 413a1e78772SMel Gorman 41484afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 415a1e78772SMel Gorman { 416a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 417f83a275dSMel Gorman VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 418a1e78772SMel Gorman 41984afd99bSAndy Whitcroft set_vma_private_data(vma, (get_vma_private_data(vma) & 42084afd99bSAndy Whitcroft HPAGE_RESV_MASK) | (unsigned long)map); 42104f2cbe3SMel Gorman } 42204f2cbe3SMel Gorman 42304f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 42404f2cbe3SMel Gorman { 42504f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 426f83a275dSMel Gorman VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 427e7c4b0bfSAndy Whitcroft 428e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 42904f2cbe3SMel Gorman } 43004f2cbe3SMel Gorman 43104f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 43204f2cbe3SMel Gorman { 43304f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 434e7c4b0bfSAndy Whitcroft 435e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 436a1e78772SMel Gorman } 437a1e78772SMel Gorman 438a1e78772SMel Gorman /* Decrement the reserved pages in the hugepage pool by one */ 439a5516438SAndi Kleen static void decrement_hugepage_resv_vma(struct hstate *h, 440a5516438SAndi Kleen struct vm_area_struct *vma) 441a1e78772SMel Gorman { 442c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_NORESERVE) 443c37f9fb1SAndy Whitcroft return; 444c37f9fb1SAndy Whitcroft 445f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 446a1e78772SMel Gorman /* Shared mappings always use reserves */ 447a5516438SAndi Kleen h->resv_huge_pages--; 44884afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 449a1e78772SMel Gorman /* 450a1e78772SMel Gorman * Only the process that called mmap() has reserves for 451a1e78772SMel Gorman * private mappings. 452a1e78772SMel Gorman */ 453a5516438SAndi Kleen h->resv_huge_pages--; 454a1e78772SMel Gorman } 455a1e78772SMel Gorman } 456a1e78772SMel Gorman 45704f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 458a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 459a1e78772SMel Gorman { 460a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 461f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 462a1e78772SMel Gorman vma->vm_private_data = (void *)0; 463a1e78772SMel Gorman } 464a1e78772SMel Gorman 465a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 4667f09ca51SMel Gorman static int vma_has_reserves(struct vm_area_struct *vma) 467a1e78772SMel Gorman { 468f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) 469a1e78772SMel Gorman return 1; 4707f09ca51SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4717f09ca51SMel Gorman return 1; 4727f09ca51SMel Gorman return 0; 473a1e78772SMel Gorman } 474a1e78772SMel Gorman 4750ebabb41SNaoya Horiguchi static void copy_gigantic_page(struct page *dst, struct page *src) 4760ebabb41SNaoya Horiguchi { 4770ebabb41SNaoya Horiguchi int i; 4780ebabb41SNaoya Horiguchi struct hstate *h = page_hstate(src); 4790ebabb41SNaoya Horiguchi struct page *dst_base = dst; 4800ebabb41SNaoya Horiguchi struct page *src_base = src; 4810ebabb41SNaoya Horiguchi 4820ebabb41SNaoya Horiguchi for (i = 0; i < pages_per_huge_page(h); ) { 4830ebabb41SNaoya Horiguchi cond_resched(); 4840ebabb41SNaoya Horiguchi copy_highpage(dst, src); 4850ebabb41SNaoya Horiguchi 4860ebabb41SNaoya Horiguchi i++; 4870ebabb41SNaoya Horiguchi dst = mem_map_next(dst, dst_base, i); 4880ebabb41SNaoya Horiguchi src = mem_map_next(src, src_base, i); 4890ebabb41SNaoya Horiguchi } 4900ebabb41SNaoya Horiguchi } 4910ebabb41SNaoya Horiguchi 4920ebabb41SNaoya Horiguchi void copy_huge_page(struct page *dst, struct page *src) 4930ebabb41SNaoya Horiguchi { 4940ebabb41SNaoya Horiguchi int i; 4950ebabb41SNaoya Horiguchi struct hstate *h = page_hstate(src); 4960ebabb41SNaoya Horiguchi 4970ebabb41SNaoya Horiguchi if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { 4980ebabb41SNaoya Horiguchi copy_gigantic_page(dst, src); 4990ebabb41SNaoya Horiguchi return; 5000ebabb41SNaoya Horiguchi } 5010ebabb41SNaoya Horiguchi 5020ebabb41SNaoya Horiguchi might_sleep(); 5030ebabb41SNaoya Horiguchi for (i = 0; i < pages_per_huge_page(h); i++) { 5040ebabb41SNaoya Horiguchi cond_resched(); 5050ebabb41SNaoya Horiguchi copy_highpage(dst + i, src + i); 5060ebabb41SNaoya Horiguchi } 5070ebabb41SNaoya Horiguchi } 5080ebabb41SNaoya Horiguchi 509a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page) 5101da177e4SLinus Torvalds { 5111da177e4SLinus Torvalds int nid = page_to_nid(page); 512a5516438SAndi Kleen list_add(&page->lru, &h->hugepage_freelists[nid]); 513a5516438SAndi Kleen h->free_huge_pages++; 514a5516438SAndi Kleen h->free_huge_pages_node[nid]++; 5151da177e4SLinus Torvalds } 5161da177e4SLinus Torvalds 517bf50bab2SNaoya Horiguchi static struct page *dequeue_huge_page_node(struct hstate *h, int nid) 518bf50bab2SNaoya Horiguchi { 519bf50bab2SNaoya Horiguchi struct page *page; 520bf50bab2SNaoya Horiguchi 521bf50bab2SNaoya Horiguchi if (list_empty(&h->hugepage_freelists[nid])) 522bf50bab2SNaoya Horiguchi return NULL; 523bf50bab2SNaoya Horiguchi page = list_entry(h->hugepage_freelists[nid].next, struct page, lru); 524bf50bab2SNaoya Horiguchi list_del(&page->lru); 525a9869b83SNaoya Horiguchi set_page_refcounted(page); 526bf50bab2SNaoya Horiguchi h->free_huge_pages--; 527bf50bab2SNaoya Horiguchi h->free_huge_pages_node[nid]--; 528bf50bab2SNaoya Horiguchi return page; 529bf50bab2SNaoya Horiguchi } 530bf50bab2SNaoya Horiguchi 531a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h, 532a5516438SAndi Kleen struct vm_area_struct *vma, 53304f2cbe3SMel Gorman unsigned long address, int avoid_reserve) 5341da177e4SLinus Torvalds { 535b1c12cbcSKonstantin Khlebnikov struct page *page = NULL; 536480eccf9SLee Schermerhorn struct mempolicy *mpol; 53719770b32SMel Gorman nodemask_t *nodemask; 538c0ff7453SMiao Xie struct zonelist *zonelist; 539dd1a239fSMel Gorman struct zone *zone; 540dd1a239fSMel Gorman struct zoneref *z; 541cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 5421da177e4SLinus Torvalds 543cc9a6c87SMel Gorman retry_cpuset: 544cc9a6c87SMel Gorman cpuset_mems_cookie = get_mems_allowed(); 545c0ff7453SMiao Xie zonelist = huge_zonelist(vma, address, 546c0ff7453SMiao Xie htlb_alloc_mask, &mpol, &nodemask); 547a1e78772SMel Gorman /* 548a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 549a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 550a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 551a1e78772SMel Gorman */ 5527f09ca51SMel Gorman if (!vma_has_reserves(vma) && 553a5516438SAndi Kleen h->free_huge_pages - h->resv_huge_pages == 0) 554c0ff7453SMiao Xie goto err; 555a1e78772SMel Gorman 55604f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 557a5516438SAndi Kleen if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 5586eab04a8SJustin P. Mattock goto err; 55904f2cbe3SMel Gorman 56019770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 56119770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 562bf50bab2SNaoya Horiguchi if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) { 563bf50bab2SNaoya Horiguchi page = dequeue_huge_page_node(h, zone_to_nid(zone)); 564bf50bab2SNaoya Horiguchi if (page) { 56504f2cbe3SMel Gorman if (!avoid_reserve) 566a5516438SAndi Kleen decrement_hugepage_resv_vma(h, vma); 5675ab3ee7bSKen Chen break; 5681da177e4SLinus Torvalds } 5693abf7afdSAndrew Morton } 570bf50bab2SNaoya Horiguchi } 571cc9a6c87SMel Gorman 572cc9a6c87SMel Gorman mpol_cond_put(mpol); 573cc9a6c87SMel Gorman if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 574cc9a6c87SMel Gorman goto retry_cpuset; 575cc9a6c87SMel Gorman return page; 576cc9a6c87SMel Gorman 577c0ff7453SMiao Xie err: 57852cd3b07SLee Schermerhorn mpol_cond_put(mpol); 579cc9a6c87SMel Gorman return NULL; 5801da177e4SLinus Torvalds } 5811da177e4SLinus Torvalds 582a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page) 5836af2acb6SAdam Litke { 5846af2acb6SAdam Litke int i; 585a5516438SAndi Kleen 58618229df5SAndy Whitcroft VM_BUG_ON(h->order >= MAX_ORDER); 58718229df5SAndy Whitcroft 588a5516438SAndi Kleen h->nr_huge_pages--; 589a5516438SAndi Kleen h->nr_huge_pages_node[page_to_nid(page)]--; 590a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 59132f84528SChris Forbes page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 59232f84528SChris Forbes 1 << PG_referenced | 1 << PG_dirty | 59332f84528SChris Forbes 1 << PG_active | 1 << PG_reserved | 5946af2acb6SAdam Litke 1 << PG_private | 1 << PG_writeback); 5956af2acb6SAdam Litke } 5966af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 5976af2acb6SAdam Litke set_page_refcounted(page); 5987f2e9525SGerald Schaefer arch_release_hugepage(page); 599a5516438SAndi Kleen __free_pages(page, huge_page_order(h)); 6006af2acb6SAdam Litke } 6016af2acb6SAdam Litke 602e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size) 603e5ff2159SAndi Kleen { 604e5ff2159SAndi Kleen struct hstate *h; 605e5ff2159SAndi Kleen 606e5ff2159SAndi Kleen for_each_hstate(h) { 607e5ff2159SAndi Kleen if (huge_page_size(h) == size) 608e5ff2159SAndi Kleen return h; 609e5ff2159SAndi Kleen } 610e5ff2159SAndi Kleen return NULL; 611e5ff2159SAndi Kleen } 612e5ff2159SAndi Kleen 61327a85ef1SDavid Gibson static void free_huge_page(struct page *page) 61427a85ef1SDavid Gibson { 615a5516438SAndi Kleen /* 616a5516438SAndi Kleen * Can't pass hstate in here because it is called from the 617a5516438SAndi Kleen * compound page destructor. 618a5516438SAndi Kleen */ 619e5ff2159SAndi Kleen struct hstate *h = page_hstate(page); 6207893d1d5SAdam Litke int nid = page_to_nid(page); 62190481622SDavid Gibson struct hugepage_subpool *spool = 62290481622SDavid Gibson (struct hugepage_subpool *)page_private(page); 62327a85ef1SDavid Gibson 624e5df70abSAndy Whitcroft set_page_private(page, 0); 62523be7468SMel Gorman page->mapping = NULL; 6267893d1d5SAdam Litke BUG_ON(page_count(page)); 6270fe6e20bSNaoya Horiguchi BUG_ON(page_mapcount(page)); 62827a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 62927a85ef1SDavid Gibson 63027a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 631aa888a74SAndi Kleen if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { 632a5516438SAndi Kleen update_and_free_page(h, page); 633a5516438SAndi Kleen h->surplus_huge_pages--; 634a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 6357893d1d5SAdam Litke } else { 636a5516438SAndi Kleen enqueue_huge_page(h, page); 6377893d1d5SAdam Litke } 63827a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 63990481622SDavid Gibson hugepage_subpool_put_pages(spool, 1); 64027a85ef1SDavid Gibson } 64127a85ef1SDavid Gibson 642a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 643b7ba30c6SAndi Kleen { 644b7ba30c6SAndi Kleen set_compound_page_dtor(page, free_huge_page); 645b7ba30c6SAndi Kleen spin_lock(&hugetlb_lock); 646a5516438SAndi Kleen h->nr_huge_pages++; 647a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 648b7ba30c6SAndi Kleen spin_unlock(&hugetlb_lock); 649b7ba30c6SAndi Kleen put_page(page); /* free it into the hugepage allocator */ 650b7ba30c6SAndi Kleen } 651b7ba30c6SAndi Kleen 65220a0307cSWu Fengguang static void prep_compound_gigantic_page(struct page *page, unsigned long order) 65320a0307cSWu Fengguang { 65420a0307cSWu Fengguang int i; 65520a0307cSWu Fengguang int nr_pages = 1 << order; 65620a0307cSWu Fengguang struct page *p = page + 1; 65720a0307cSWu Fengguang 65820a0307cSWu Fengguang /* we rely on prep_new_huge_page to set the destructor */ 65920a0307cSWu Fengguang set_compound_order(page, order); 66020a0307cSWu Fengguang __SetPageHead(page); 66120a0307cSWu Fengguang for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 66220a0307cSWu Fengguang __SetPageTail(p); 66358a84aa9SYouquan Song set_page_count(p, 0); 66420a0307cSWu Fengguang p->first_page = page; 66520a0307cSWu Fengguang } 66620a0307cSWu Fengguang } 66720a0307cSWu Fengguang 66820a0307cSWu Fengguang int PageHuge(struct page *page) 66920a0307cSWu Fengguang { 67020a0307cSWu Fengguang compound_page_dtor *dtor; 67120a0307cSWu Fengguang 67220a0307cSWu Fengguang if (!PageCompound(page)) 67320a0307cSWu Fengguang return 0; 67420a0307cSWu Fengguang 67520a0307cSWu Fengguang page = compound_head(page); 67620a0307cSWu Fengguang dtor = get_compound_page_dtor(page); 67720a0307cSWu Fengguang 67820a0307cSWu Fengguang return dtor == free_huge_page; 67920a0307cSWu Fengguang } 68043131e14SNaoya Horiguchi EXPORT_SYMBOL_GPL(PageHuge); 68143131e14SNaoya Horiguchi 682a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 6831da177e4SLinus Torvalds { 6841da177e4SLinus Torvalds struct page *page; 685f96efd58SJoe Jin 686aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 687aa888a74SAndi Kleen return NULL; 688aa888a74SAndi Kleen 6896484eb3eSMel Gorman page = alloc_pages_exact_node(nid, 690551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 691551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 692a5516438SAndi Kleen huge_page_order(h)); 6931da177e4SLinus Torvalds if (page) { 6947f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 695caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 6967b8ee84dSHarvey Harrison return NULL; 6977f2e9525SGerald Schaefer } 698a5516438SAndi Kleen prep_new_huge_page(h, page, nid); 6991da177e4SLinus Torvalds } 70063b4613cSNishanth Aravamudan 70163b4613cSNishanth Aravamudan return page; 70263b4613cSNishanth Aravamudan } 70363b4613cSNishanth Aravamudan 7045ced66c9SAndi Kleen /* 7056ae11b27SLee Schermerhorn * common helper functions for hstate_next_node_to_{alloc|free}. 7066ae11b27SLee Schermerhorn * We may have allocated or freed a huge page based on a different 7076ae11b27SLee Schermerhorn * nodes_allowed previously, so h->next_node_to_{alloc|free} might 7086ae11b27SLee Schermerhorn * be outside of *nodes_allowed. Ensure that we use an allowed 7096ae11b27SLee Schermerhorn * node for alloc or free. 7109a76db09SLee Schermerhorn */ 7116ae11b27SLee Schermerhorn static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 7129a76db09SLee Schermerhorn { 7136ae11b27SLee Schermerhorn nid = next_node(nid, *nodes_allowed); 7149a76db09SLee Schermerhorn if (nid == MAX_NUMNODES) 7156ae11b27SLee Schermerhorn nid = first_node(*nodes_allowed); 7169a76db09SLee Schermerhorn VM_BUG_ON(nid >= MAX_NUMNODES); 7179a76db09SLee Schermerhorn 7189a76db09SLee Schermerhorn return nid; 7199a76db09SLee Schermerhorn } 7209a76db09SLee Schermerhorn 7216ae11b27SLee Schermerhorn static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 7225ced66c9SAndi Kleen { 7236ae11b27SLee Schermerhorn if (!node_isset(nid, *nodes_allowed)) 7246ae11b27SLee Schermerhorn nid = next_node_allowed(nid, nodes_allowed); 7259a76db09SLee Schermerhorn return nid; 7265ced66c9SAndi Kleen } 7275ced66c9SAndi Kleen 7286ae11b27SLee Schermerhorn /* 7296ae11b27SLee Schermerhorn * returns the previously saved node ["this node"] from which to 7306ae11b27SLee Schermerhorn * allocate a persistent huge page for the pool and advance the 7316ae11b27SLee Schermerhorn * next node from which to allocate, handling wrap at end of node 7326ae11b27SLee Schermerhorn * mask. 7336ae11b27SLee Schermerhorn */ 7346ae11b27SLee Schermerhorn static int hstate_next_node_to_alloc(struct hstate *h, 7356ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 7366ae11b27SLee Schermerhorn { 7376ae11b27SLee Schermerhorn int nid; 7386ae11b27SLee Schermerhorn 7396ae11b27SLee Schermerhorn VM_BUG_ON(!nodes_allowed); 7406ae11b27SLee Schermerhorn 7416ae11b27SLee Schermerhorn nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 7426ae11b27SLee Schermerhorn h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 7436ae11b27SLee Schermerhorn 7446ae11b27SLee Schermerhorn return nid; 7456ae11b27SLee Schermerhorn } 7466ae11b27SLee Schermerhorn 7476ae11b27SLee Schermerhorn static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 74863b4613cSNishanth Aravamudan { 74963b4613cSNishanth Aravamudan struct page *page; 75063b4613cSNishanth Aravamudan int start_nid; 75163b4613cSNishanth Aravamudan int next_nid; 75263b4613cSNishanth Aravamudan int ret = 0; 75363b4613cSNishanth Aravamudan 7546ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_alloc(h, nodes_allowed); 755e8c5c824SLee Schermerhorn next_nid = start_nid; 75663b4613cSNishanth Aravamudan 75763b4613cSNishanth Aravamudan do { 758e8c5c824SLee Schermerhorn page = alloc_fresh_huge_page_node(h, next_nid); 7599a76db09SLee Schermerhorn if (page) { 76063b4613cSNishanth Aravamudan ret = 1; 7619a76db09SLee Schermerhorn break; 7629a76db09SLee Schermerhorn } 7636ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_alloc(h, nodes_allowed); 7649a76db09SLee Schermerhorn } while (next_nid != start_nid); 76563b4613cSNishanth Aravamudan 7663b116300SAdam Litke if (ret) 7673b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC); 7683b116300SAdam Litke else 7693b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 7703b116300SAdam Litke 77163b4613cSNishanth Aravamudan return ret; 7721da177e4SLinus Torvalds } 7731da177e4SLinus Torvalds 774e8c5c824SLee Schermerhorn /* 7756ae11b27SLee Schermerhorn * helper for free_pool_huge_page() - return the previously saved 7766ae11b27SLee Schermerhorn * node ["this node"] from which to free a huge page. Advance the 7776ae11b27SLee Schermerhorn * next node id whether or not we find a free huge page to free so 7786ae11b27SLee Schermerhorn * that the next attempt to free addresses the next node. 779e8c5c824SLee Schermerhorn */ 7806ae11b27SLee Schermerhorn static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 781e8c5c824SLee Schermerhorn { 7826ae11b27SLee Schermerhorn int nid; 7839a76db09SLee Schermerhorn 7846ae11b27SLee Schermerhorn VM_BUG_ON(!nodes_allowed); 7856ae11b27SLee Schermerhorn 7866ae11b27SLee Schermerhorn nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 7876ae11b27SLee Schermerhorn h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 7886ae11b27SLee Schermerhorn 7899a76db09SLee Schermerhorn return nid; 790e8c5c824SLee Schermerhorn } 791e8c5c824SLee Schermerhorn 792e8c5c824SLee Schermerhorn /* 793e8c5c824SLee Schermerhorn * Free huge page from pool from next node to free. 794e8c5c824SLee Schermerhorn * Attempt to keep persistent huge pages more or less 795e8c5c824SLee Schermerhorn * balanced over allowed nodes. 796e8c5c824SLee Schermerhorn * Called with hugetlb_lock locked. 797e8c5c824SLee Schermerhorn */ 7986ae11b27SLee Schermerhorn static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 7996ae11b27SLee Schermerhorn bool acct_surplus) 800e8c5c824SLee Schermerhorn { 801e8c5c824SLee Schermerhorn int start_nid; 802e8c5c824SLee Schermerhorn int next_nid; 803e8c5c824SLee Schermerhorn int ret = 0; 804e8c5c824SLee Schermerhorn 8056ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_free(h, nodes_allowed); 806e8c5c824SLee Schermerhorn next_nid = start_nid; 807e8c5c824SLee Schermerhorn 808e8c5c824SLee Schermerhorn do { 809685f3457SLee Schermerhorn /* 810685f3457SLee Schermerhorn * If we're returning unused surplus pages, only examine 811685f3457SLee Schermerhorn * nodes with surplus pages. 812685f3457SLee Schermerhorn */ 813685f3457SLee Schermerhorn if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) && 814685f3457SLee Schermerhorn !list_empty(&h->hugepage_freelists[next_nid])) { 815e8c5c824SLee Schermerhorn struct page *page = 816e8c5c824SLee Schermerhorn list_entry(h->hugepage_freelists[next_nid].next, 817e8c5c824SLee Schermerhorn struct page, lru); 818e8c5c824SLee Schermerhorn list_del(&page->lru); 819e8c5c824SLee Schermerhorn h->free_huge_pages--; 820e8c5c824SLee Schermerhorn h->free_huge_pages_node[next_nid]--; 821685f3457SLee Schermerhorn if (acct_surplus) { 822685f3457SLee Schermerhorn h->surplus_huge_pages--; 823685f3457SLee Schermerhorn h->surplus_huge_pages_node[next_nid]--; 824685f3457SLee Schermerhorn } 825e8c5c824SLee Schermerhorn update_and_free_page(h, page); 826e8c5c824SLee Schermerhorn ret = 1; 8279a76db09SLee Schermerhorn break; 828e8c5c824SLee Schermerhorn } 8296ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_free(h, nodes_allowed); 8309a76db09SLee Schermerhorn } while (next_nid != start_nid); 831e8c5c824SLee Schermerhorn 832e8c5c824SLee Schermerhorn return ret; 833e8c5c824SLee Schermerhorn } 834e8c5c824SLee Schermerhorn 835bf50bab2SNaoya Horiguchi static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) 8367893d1d5SAdam Litke { 8377893d1d5SAdam Litke struct page *page; 838bf50bab2SNaoya Horiguchi unsigned int r_nid; 8397893d1d5SAdam Litke 840aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 841aa888a74SAndi Kleen return NULL; 842aa888a74SAndi Kleen 843d1c3fb1fSNishanth Aravamudan /* 844d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 845d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 846d1c3fb1fSNishanth Aravamudan * overcommit 847d1c3fb1fSNishanth Aravamudan * 848d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 849d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 850d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 851d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 852d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 853d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 854d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 855d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 856d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 857d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 858d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 859d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 860d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 861d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 862d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 863d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 864d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 865d1c3fb1fSNishanth Aravamudan */ 866d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 867a5516438SAndi Kleen if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 868d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 869d1c3fb1fSNishanth Aravamudan return NULL; 870d1c3fb1fSNishanth Aravamudan } else { 871a5516438SAndi Kleen h->nr_huge_pages++; 872a5516438SAndi Kleen h->surplus_huge_pages++; 873d1c3fb1fSNishanth Aravamudan } 874d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 875d1c3fb1fSNishanth Aravamudan 876bf50bab2SNaoya Horiguchi if (nid == NUMA_NO_NODE) 877551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 878551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 879a5516438SAndi Kleen huge_page_order(h)); 880bf50bab2SNaoya Horiguchi else 881bf50bab2SNaoya Horiguchi page = alloc_pages_exact_node(nid, 882bf50bab2SNaoya Horiguchi htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 883bf50bab2SNaoya Horiguchi __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); 884d1c3fb1fSNishanth Aravamudan 885caff3a2cSGerald Schaefer if (page && arch_prepare_hugepage(page)) { 886caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 887ea5768c7SHillf Danton page = NULL; 888caff3a2cSGerald Schaefer } 889caff3a2cSGerald Schaefer 8907893d1d5SAdam Litke spin_lock(&hugetlb_lock); 891d1c3fb1fSNishanth Aravamudan if (page) { 892bf50bab2SNaoya Horiguchi r_nid = page_to_nid(page); 893d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 894d1c3fb1fSNishanth Aravamudan /* 895d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 896d1c3fb1fSNishanth Aravamudan */ 897bf50bab2SNaoya Horiguchi h->nr_huge_pages_node[r_nid]++; 898bf50bab2SNaoya Horiguchi h->surplus_huge_pages_node[r_nid]++; 8993b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 900d1c3fb1fSNishanth Aravamudan } else { 901a5516438SAndi Kleen h->nr_huge_pages--; 902a5516438SAndi Kleen h->surplus_huge_pages--; 9033b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 9047893d1d5SAdam Litke } 905d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 9067893d1d5SAdam Litke 9077893d1d5SAdam Litke return page; 9087893d1d5SAdam Litke } 9097893d1d5SAdam Litke 910e4e574b7SAdam Litke /* 911bf50bab2SNaoya Horiguchi * This allocation function is useful in the context where vma is irrelevant. 912bf50bab2SNaoya Horiguchi * E.g. soft-offlining uses this function because it only cares physical 913bf50bab2SNaoya Horiguchi * address of error page. 914bf50bab2SNaoya Horiguchi */ 915bf50bab2SNaoya Horiguchi struct page *alloc_huge_page_node(struct hstate *h, int nid) 916bf50bab2SNaoya Horiguchi { 917bf50bab2SNaoya Horiguchi struct page *page; 918bf50bab2SNaoya Horiguchi 919bf50bab2SNaoya Horiguchi spin_lock(&hugetlb_lock); 920bf50bab2SNaoya Horiguchi page = dequeue_huge_page_node(h, nid); 921bf50bab2SNaoya Horiguchi spin_unlock(&hugetlb_lock); 922bf50bab2SNaoya Horiguchi 923bf50bab2SNaoya Horiguchi if (!page) 924bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, nid); 925bf50bab2SNaoya Horiguchi 926bf50bab2SNaoya Horiguchi return page; 927bf50bab2SNaoya Horiguchi } 928bf50bab2SNaoya Horiguchi 929bf50bab2SNaoya Horiguchi /* 93025985edcSLucas De Marchi * Increase the hugetlb pool such that it can accommodate a reservation 931e4e574b7SAdam Litke * of size 'delta'. 932e4e574b7SAdam Litke */ 933a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta) 934e4e574b7SAdam Litke { 935e4e574b7SAdam Litke struct list_head surplus_list; 936e4e574b7SAdam Litke struct page *page, *tmp; 937e4e574b7SAdam Litke int ret, i; 938e4e574b7SAdam Litke int needed, allocated; 93928073b02SHillf Danton bool alloc_ok = true; 940e4e574b7SAdam Litke 941a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 942ac09b3a1SAdam Litke if (needed <= 0) { 943a5516438SAndi Kleen h->resv_huge_pages += delta; 944e4e574b7SAdam Litke return 0; 945ac09b3a1SAdam Litke } 946e4e574b7SAdam Litke 947e4e574b7SAdam Litke allocated = 0; 948e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 949e4e574b7SAdam Litke 950e4e574b7SAdam Litke ret = -ENOMEM; 951e4e574b7SAdam Litke retry: 952e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 953e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 954bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 95528073b02SHillf Danton if (!page) { 95628073b02SHillf Danton alloc_ok = false; 95728073b02SHillf Danton break; 95828073b02SHillf Danton } 959e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 960e4e574b7SAdam Litke } 96128073b02SHillf Danton allocated += i; 962e4e574b7SAdam Litke 963e4e574b7SAdam Litke /* 964e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 965e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 966e4e574b7SAdam Litke */ 967e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 968a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - 969a5516438SAndi Kleen (h->free_huge_pages + allocated); 97028073b02SHillf Danton if (needed > 0) { 97128073b02SHillf Danton if (alloc_ok) 972e4e574b7SAdam Litke goto retry; 97328073b02SHillf Danton /* 97428073b02SHillf Danton * We were not able to allocate enough pages to 97528073b02SHillf Danton * satisfy the entire reservation so we free what 97628073b02SHillf Danton * we've allocated so far. 97728073b02SHillf Danton */ 97828073b02SHillf Danton goto free; 97928073b02SHillf Danton } 980e4e574b7SAdam Litke /* 981e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 98225985edcSLucas De Marchi * needed to accommodate the reservation. Add the appropriate number 983e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 984ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 985ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 986ac09b3a1SAdam Litke * before they are reserved. 987e4e574b7SAdam Litke */ 988e4e574b7SAdam Litke needed += allocated; 989a5516438SAndi Kleen h->resv_huge_pages += delta; 990e4e574b7SAdam Litke ret = 0; 991a9869b83SNaoya Horiguchi 99219fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 99319fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 99419fc3f0aSAdam Litke if ((--needed) < 0) 99519fc3f0aSAdam Litke break; 99619fc3f0aSAdam Litke list_del(&page->lru); 997a9869b83SNaoya Horiguchi /* 998a9869b83SNaoya Horiguchi * This page is now managed by the hugetlb allocator and has 999a9869b83SNaoya Horiguchi * no users -- drop the buddy allocator's reference. 1000a9869b83SNaoya Horiguchi */ 1001a9869b83SNaoya Horiguchi put_page_testzero(page); 1002a9869b83SNaoya Horiguchi VM_BUG_ON(page_count(page)); 1003a5516438SAndi Kleen enqueue_huge_page(h, page); 100419fc3f0aSAdam Litke } 100528073b02SHillf Danton free: 1006b0365c8dSHillf Danton spin_unlock(&hugetlb_lock); 100719fc3f0aSAdam Litke 100819fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 100919fc3f0aSAdam Litke if (!list_empty(&surplus_list)) { 1010e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 1011e4e574b7SAdam Litke list_del(&page->lru); 1012a9869b83SNaoya Horiguchi put_page(page); 1013a9869b83SNaoya Horiguchi } 1014af767cbdSAdam Litke } 101519fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 1016e4e574b7SAdam Litke 1017e4e574b7SAdam Litke return ret; 1018e4e574b7SAdam Litke } 1019e4e574b7SAdam Litke 1020e4e574b7SAdam Litke /* 1021e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 1022e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 1023e4e574b7SAdam Litke * never used. 1024685f3457SLee Schermerhorn * Called with hugetlb_lock held. 1025e4e574b7SAdam Litke */ 1026a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h, 1027a5516438SAndi Kleen unsigned long unused_resv_pages) 1028e4e574b7SAdam Litke { 1029e4e574b7SAdam Litke unsigned long nr_pages; 1030e4e574b7SAdam Litke 1031ac09b3a1SAdam Litke /* Uncommit the reservation */ 1032a5516438SAndi Kleen h->resv_huge_pages -= unused_resv_pages; 1033ac09b3a1SAdam Litke 1034aa888a74SAndi Kleen /* Cannot return gigantic pages currently */ 1035aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1036aa888a74SAndi Kleen return; 1037aa888a74SAndi Kleen 1038a5516438SAndi Kleen nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 1039e4e574b7SAdam Litke 1040685f3457SLee Schermerhorn /* 1041685f3457SLee Schermerhorn * We want to release as many surplus pages as possible, spread 10429b5e5d0fSLee Schermerhorn * evenly across all nodes with memory. Iterate across these nodes 10439b5e5d0fSLee Schermerhorn * until we can no longer free unreserved surplus pages. This occurs 10449b5e5d0fSLee Schermerhorn * when the nodes with surplus pages have no free pages. 10459b5e5d0fSLee Schermerhorn * free_pool_huge_page() will balance the the freed pages across the 10469b5e5d0fSLee Schermerhorn * on-line nodes with memory and will handle the hstate accounting. 1047685f3457SLee Schermerhorn */ 1048685f3457SLee Schermerhorn while (nr_pages--) { 10499b5e5d0fSLee Schermerhorn if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1)) 1050685f3457SLee Schermerhorn break; 1051e4e574b7SAdam Litke } 1052e4e574b7SAdam Litke } 1053e4e574b7SAdam Litke 1054c37f9fb1SAndy Whitcroft /* 1055c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 1056c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 105790481622SDavid Gibson * reservation and actually increase subpool usage before an allocation 105890481622SDavid Gibson * can occur. Where any new reservation would be required the 105990481622SDavid Gibson * reservation change is prepared, but not committed. Once the page 106090481622SDavid Gibson * has been allocated from the subpool and instantiated the change should 106190481622SDavid Gibson * be committed via vma_commit_reservation. No action is required on 106290481622SDavid Gibson * failure. 1063c37f9fb1SAndy Whitcroft */ 1064e2f17d94SRoel Kluin static long vma_needs_reservation(struct hstate *h, 1065a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 1066c37f9fb1SAndy Whitcroft { 1067c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 1068c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 1069c37f9fb1SAndy Whitcroft 1070f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 1071a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1072c37f9fb1SAndy Whitcroft return region_chg(&inode->i_mapping->private_list, 1073c37f9fb1SAndy Whitcroft idx, idx + 1); 1074c37f9fb1SAndy Whitcroft 107584afd99bSAndy Whitcroft } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1076c37f9fb1SAndy Whitcroft return 1; 1077c37f9fb1SAndy Whitcroft 107884afd99bSAndy Whitcroft } else { 1079e2f17d94SRoel Kluin long err; 1080a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 108184afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 108284afd99bSAndy Whitcroft 108384afd99bSAndy Whitcroft err = region_chg(&reservations->regions, idx, idx + 1); 108484afd99bSAndy Whitcroft if (err < 0) 108584afd99bSAndy Whitcroft return err; 1086c37f9fb1SAndy Whitcroft return 0; 1087c37f9fb1SAndy Whitcroft } 108884afd99bSAndy Whitcroft } 1089a5516438SAndi Kleen static void vma_commit_reservation(struct hstate *h, 1090a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 1091c37f9fb1SAndy Whitcroft { 1092c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 1093c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 1094c37f9fb1SAndy Whitcroft 1095f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 1096a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1097c37f9fb1SAndy Whitcroft region_add(&inode->i_mapping->private_list, idx, idx + 1); 109884afd99bSAndy Whitcroft 109984afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1100a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 110184afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 110284afd99bSAndy Whitcroft 110384afd99bSAndy Whitcroft /* Mark this page used in the map. */ 110484afd99bSAndy Whitcroft region_add(&reservations->regions, idx, idx + 1); 1105c37f9fb1SAndy Whitcroft } 1106c37f9fb1SAndy Whitcroft } 1107c37f9fb1SAndy Whitcroft 1108348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 110904f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 1110348ea204SAdam Litke { 111190481622SDavid Gibson struct hugepage_subpool *spool = subpool_vma(vma); 1112a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1113348ea204SAdam Litke struct page *page; 1114e2f17d94SRoel Kluin long chg; 11152fc39cecSAdam Litke 1116a1e78772SMel Gorman /* 111790481622SDavid Gibson * Processes that did not create the mapping will have no 111890481622SDavid Gibson * reserves and will not have accounted against subpool 111990481622SDavid Gibson * limit. Check that the subpool limit can be made before 112090481622SDavid Gibson * satisfying the allocation MAP_NORESERVE mappings may also 112190481622SDavid Gibson * need pages and subpool limit allocated allocated if no reserve 112290481622SDavid Gibson * mapping overlaps. 1123a1e78772SMel Gorman */ 1124a5516438SAndi Kleen chg = vma_needs_reservation(h, vma, addr); 1125c37f9fb1SAndy Whitcroft if (chg < 0) 1126e0dcd8a0SHugh Dickins return ERR_PTR(-VM_FAULT_OOM); 1127c37f9fb1SAndy Whitcroft if (chg) 112890481622SDavid Gibson if (hugepage_subpool_get_pages(spool, chg)) 1129e0dcd8a0SHugh Dickins return ERR_PTR(-VM_FAULT_SIGBUS); 113090d8b7e6SAdam Litke 1131a1e78772SMel Gorman spin_lock(&hugetlb_lock); 1132a5516438SAndi Kleen page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 1133a1e78772SMel Gorman spin_unlock(&hugetlb_lock); 1134a1e78772SMel Gorman 1135a1e78772SMel Gorman if (!page) { 1136bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 1137a1e78772SMel Gorman if (!page) { 113890481622SDavid Gibson hugepage_subpool_put_pages(spool, chg); 11394a6018f7SMel Gorman return ERR_PTR(-VM_FAULT_SIGBUS); 1140a1e78772SMel Gorman } 1141a1e78772SMel Gorman } 1142a1e78772SMel Gorman 114390481622SDavid Gibson set_page_private(page, (unsigned long)spool); 1144a1e78772SMel Gorman 1145a5516438SAndi Kleen vma_commit_reservation(h, vma, addr); 1146c37f9fb1SAndy Whitcroft 11477893d1d5SAdam Litke return page; 1148b45b5bd6SDavid Gibson } 1149b45b5bd6SDavid Gibson 115091f47662SCyrill Gorcunov int __weak alloc_bootmem_huge_page(struct hstate *h) 1151aa888a74SAndi Kleen { 1152aa888a74SAndi Kleen struct huge_bootmem_page *m; 11539b5e5d0fSLee Schermerhorn int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 1154aa888a74SAndi Kleen 1155aa888a74SAndi Kleen while (nr_nodes) { 1156aa888a74SAndi Kleen void *addr; 1157aa888a74SAndi Kleen 1158aa888a74SAndi Kleen addr = __alloc_bootmem_node_nopanic( 11596ae11b27SLee Schermerhorn NODE_DATA(hstate_next_node_to_alloc(h, 11609b5e5d0fSLee Schermerhorn &node_states[N_HIGH_MEMORY])), 1161aa888a74SAndi Kleen huge_page_size(h), huge_page_size(h), 0); 1162aa888a74SAndi Kleen 1163aa888a74SAndi Kleen if (addr) { 1164aa888a74SAndi Kleen /* 1165aa888a74SAndi Kleen * Use the beginning of the huge page to store the 1166aa888a74SAndi Kleen * huge_bootmem_page struct (until gather_bootmem 1167aa888a74SAndi Kleen * puts them into the mem_map). 1168aa888a74SAndi Kleen */ 1169aa888a74SAndi Kleen m = addr; 1170aa888a74SAndi Kleen goto found; 1171aa888a74SAndi Kleen } 1172aa888a74SAndi Kleen nr_nodes--; 1173aa888a74SAndi Kleen } 1174aa888a74SAndi Kleen return 0; 1175aa888a74SAndi Kleen 1176aa888a74SAndi Kleen found: 1177aa888a74SAndi Kleen BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); 1178aa888a74SAndi Kleen /* Put them into a private list first because mem_map is not up yet */ 1179aa888a74SAndi Kleen list_add(&m->list, &huge_boot_pages); 1180aa888a74SAndi Kleen m->hstate = h; 1181aa888a74SAndi Kleen return 1; 1182aa888a74SAndi Kleen } 1183aa888a74SAndi Kleen 118418229df5SAndy Whitcroft static void prep_compound_huge_page(struct page *page, int order) 118518229df5SAndy Whitcroft { 118618229df5SAndy Whitcroft if (unlikely(order > (MAX_ORDER - 1))) 118718229df5SAndy Whitcroft prep_compound_gigantic_page(page, order); 118818229df5SAndy Whitcroft else 118918229df5SAndy Whitcroft prep_compound_page(page, order); 119018229df5SAndy Whitcroft } 119118229df5SAndy Whitcroft 1192aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */ 1193aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void) 1194aa888a74SAndi Kleen { 1195aa888a74SAndi Kleen struct huge_bootmem_page *m; 1196aa888a74SAndi Kleen 1197aa888a74SAndi Kleen list_for_each_entry(m, &huge_boot_pages, list) { 1198aa888a74SAndi Kleen struct hstate *h = m->hstate; 1199ee8f248dSBecky Bruce struct page *page; 1200ee8f248dSBecky Bruce 1201ee8f248dSBecky Bruce #ifdef CONFIG_HIGHMEM 1202ee8f248dSBecky Bruce page = pfn_to_page(m->phys >> PAGE_SHIFT); 1203ee8f248dSBecky Bruce free_bootmem_late((unsigned long)m, 1204ee8f248dSBecky Bruce sizeof(struct huge_bootmem_page)); 1205ee8f248dSBecky Bruce #else 1206ee8f248dSBecky Bruce page = virt_to_page(m); 1207ee8f248dSBecky Bruce #endif 1208aa888a74SAndi Kleen __ClearPageReserved(page); 1209aa888a74SAndi Kleen WARN_ON(page_count(page) != 1); 121018229df5SAndy Whitcroft prep_compound_huge_page(page, h->order); 1211aa888a74SAndi Kleen prep_new_huge_page(h, page, page_to_nid(page)); 1212b0320c7bSRafael Aquini /* 1213b0320c7bSRafael Aquini * If we had gigantic hugepages allocated at boot time, we need 1214b0320c7bSRafael Aquini * to restore the 'stolen' pages to totalram_pages in order to 1215b0320c7bSRafael Aquini * fix confusing memory reports from free(1) and another 1216b0320c7bSRafael Aquini * side-effects, like CommitLimit going negative. 1217b0320c7bSRafael Aquini */ 1218b0320c7bSRafael Aquini if (h->order > (MAX_ORDER - 1)) 1219b0320c7bSRafael Aquini totalram_pages += 1 << h->order; 1220aa888a74SAndi Kleen } 1221aa888a74SAndi Kleen } 1222aa888a74SAndi Kleen 12238faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 12241da177e4SLinus Torvalds { 12251da177e4SLinus Torvalds unsigned long i; 12261da177e4SLinus Torvalds 1227e5ff2159SAndi Kleen for (i = 0; i < h->max_huge_pages; ++i) { 1228aa888a74SAndi Kleen if (h->order >= MAX_ORDER) { 1229aa888a74SAndi Kleen if (!alloc_bootmem_huge_page(h)) 1230aa888a74SAndi Kleen break; 12319b5e5d0fSLee Schermerhorn } else if (!alloc_fresh_huge_page(h, 12329b5e5d0fSLee Schermerhorn &node_states[N_HIGH_MEMORY])) 12331da177e4SLinus Torvalds break; 12341da177e4SLinus Torvalds } 12358faa8b07SAndi Kleen h->max_huge_pages = i; 1236e5ff2159SAndi Kleen } 1237e5ff2159SAndi Kleen 1238e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void) 1239e5ff2159SAndi Kleen { 1240e5ff2159SAndi Kleen struct hstate *h; 1241e5ff2159SAndi Kleen 1242e5ff2159SAndi Kleen for_each_hstate(h) { 12438faa8b07SAndi Kleen /* oversize hugepages were init'ed in early boot */ 12448faa8b07SAndi Kleen if (h->order < MAX_ORDER) 12458faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(h); 1246e5ff2159SAndi Kleen } 1247e5ff2159SAndi Kleen } 1248e5ff2159SAndi Kleen 12494abd32dbSAndi Kleen static char * __init memfmt(char *buf, unsigned long n) 12504abd32dbSAndi Kleen { 12514abd32dbSAndi Kleen if (n >= (1UL << 30)) 12524abd32dbSAndi Kleen sprintf(buf, "%lu GB", n >> 30); 12534abd32dbSAndi Kleen else if (n >= (1UL << 20)) 12544abd32dbSAndi Kleen sprintf(buf, "%lu MB", n >> 20); 12554abd32dbSAndi Kleen else 12564abd32dbSAndi Kleen sprintf(buf, "%lu KB", n >> 10); 12574abd32dbSAndi Kleen return buf; 12584abd32dbSAndi Kleen } 12594abd32dbSAndi Kleen 1260e5ff2159SAndi Kleen static void __init report_hugepages(void) 1261e5ff2159SAndi Kleen { 1262e5ff2159SAndi Kleen struct hstate *h; 1263e5ff2159SAndi Kleen 1264e5ff2159SAndi Kleen for_each_hstate(h) { 12654abd32dbSAndi Kleen char buf[32]; 12664abd32dbSAndi Kleen printk(KERN_INFO "HugeTLB registered %s page size, " 12674abd32dbSAndi Kleen "pre-allocated %ld pages\n", 12684abd32dbSAndi Kleen memfmt(buf, huge_page_size(h)), 12694abd32dbSAndi Kleen h->free_huge_pages); 1270e5ff2159SAndi Kleen } 1271e5ff2159SAndi Kleen } 1272e5ff2159SAndi Kleen 12731da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 12746ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count, 12756ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 12761da177e4SLinus Torvalds { 12774415cc8dSChristoph Lameter int i; 12784415cc8dSChristoph Lameter 1279aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1280aa888a74SAndi Kleen return; 1281aa888a74SAndi Kleen 12826ae11b27SLee Schermerhorn for_each_node_mask(i, *nodes_allowed) { 12831da177e4SLinus Torvalds struct page *page, *next; 1284a5516438SAndi Kleen struct list_head *freel = &h->hugepage_freelists[i]; 1285a5516438SAndi Kleen list_for_each_entry_safe(page, next, freel, lru) { 1286a5516438SAndi Kleen if (count >= h->nr_huge_pages) 12876b0c880dSAdam Litke return; 12881da177e4SLinus Torvalds if (PageHighMem(page)) 12891da177e4SLinus Torvalds continue; 12901da177e4SLinus Torvalds list_del(&page->lru); 1291e5ff2159SAndi Kleen update_and_free_page(h, page); 1292a5516438SAndi Kleen h->free_huge_pages--; 1293a5516438SAndi Kleen h->free_huge_pages_node[page_to_nid(page)]--; 12941da177e4SLinus Torvalds } 12951da177e4SLinus Torvalds } 12961da177e4SLinus Torvalds } 12971da177e4SLinus Torvalds #else 12986ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count, 12996ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 13001da177e4SLinus Torvalds { 13011da177e4SLinus Torvalds } 13021da177e4SLinus Torvalds #endif 13031da177e4SLinus Torvalds 130420a0307cSWu Fengguang /* 130520a0307cSWu Fengguang * Increment or decrement surplus_huge_pages. Keep node-specific counters 130620a0307cSWu Fengguang * balanced by operating on them in a round-robin fashion. 130720a0307cSWu Fengguang * Returns 1 if an adjustment was made. 130820a0307cSWu Fengguang */ 13096ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 13106ae11b27SLee Schermerhorn int delta) 131120a0307cSWu Fengguang { 1312e8c5c824SLee Schermerhorn int start_nid, next_nid; 131320a0307cSWu Fengguang int ret = 0; 131420a0307cSWu Fengguang 131520a0307cSWu Fengguang VM_BUG_ON(delta != -1 && delta != 1); 131620a0307cSWu Fengguang 1317e8c5c824SLee Schermerhorn if (delta < 0) 13186ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_alloc(h, nodes_allowed); 1319e8c5c824SLee Schermerhorn else 13206ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_free(h, nodes_allowed); 1321e8c5c824SLee Schermerhorn next_nid = start_nid; 1322e8c5c824SLee Schermerhorn 1323e8c5c824SLee Schermerhorn do { 1324e8c5c824SLee Schermerhorn int nid = next_nid; 1325e8c5c824SLee Schermerhorn if (delta < 0) { 1326e8c5c824SLee Schermerhorn /* 1327e8c5c824SLee Schermerhorn * To shrink on this node, there must be a surplus page 1328e8c5c824SLee Schermerhorn */ 13299a76db09SLee Schermerhorn if (!h->surplus_huge_pages_node[nid]) { 13306ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_alloc(h, 13316ae11b27SLee Schermerhorn nodes_allowed); 133220a0307cSWu Fengguang continue; 1333e8c5c824SLee Schermerhorn } 13349a76db09SLee Schermerhorn } 1335e8c5c824SLee Schermerhorn if (delta > 0) { 1336e8c5c824SLee Schermerhorn /* 1337e8c5c824SLee Schermerhorn * Surplus cannot exceed the total number of pages 1338e8c5c824SLee Schermerhorn */ 1339e8c5c824SLee Schermerhorn if (h->surplus_huge_pages_node[nid] >= 13409a76db09SLee Schermerhorn h->nr_huge_pages_node[nid]) { 13416ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_free(h, 13426ae11b27SLee Schermerhorn nodes_allowed); 134320a0307cSWu Fengguang continue; 1344e8c5c824SLee Schermerhorn } 13459a76db09SLee Schermerhorn } 134620a0307cSWu Fengguang 134720a0307cSWu Fengguang h->surplus_huge_pages += delta; 134820a0307cSWu Fengguang h->surplus_huge_pages_node[nid] += delta; 134920a0307cSWu Fengguang ret = 1; 135020a0307cSWu Fengguang break; 1351e8c5c824SLee Schermerhorn } while (next_nid != start_nid); 135220a0307cSWu Fengguang 135320a0307cSWu Fengguang return ret; 135420a0307cSWu Fengguang } 135520a0307cSWu Fengguang 1356a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 13576ae11b27SLee Schermerhorn static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, 13586ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 13591da177e4SLinus Torvalds { 13607893d1d5SAdam Litke unsigned long min_count, ret; 13611da177e4SLinus Torvalds 1362aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1363aa888a74SAndi Kleen return h->max_huge_pages; 1364aa888a74SAndi Kleen 13657893d1d5SAdam Litke /* 13667893d1d5SAdam Litke * Increase the pool size 13677893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 13687893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 1369d1c3fb1fSNishanth Aravamudan * 1370d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 1371d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 1372d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 1373d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 1374d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 13757893d1d5SAdam Litke */ 13761da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 1377a5516438SAndi Kleen while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 13786ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, -1)) 13797893d1d5SAdam Litke break; 13807893d1d5SAdam Litke } 13817893d1d5SAdam Litke 1382a5516438SAndi Kleen while (count > persistent_huge_pages(h)) { 13837893d1d5SAdam Litke /* 13847893d1d5SAdam Litke * If this allocation races such that we no longer need the 13857893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 13867893d1d5SAdam Litke * and reducing the surplus. 13877893d1d5SAdam Litke */ 13887893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 13896ae11b27SLee Schermerhorn ret = alloc_fresh_huge_page(h, nodes_allowed); 13907893d1d5SAdam Litke spin_lock(&hugetlb_lock); 13917893d1d5SAdam Litke if (!ret) 13927893d1d5SAdam Litke goto out; 13937893d1d5SAdam Litke 1394536240f2SMel Gorman /* Bail for signals. Probably ctrl-c from user */ 1395536240f2SMel Gorman if (signal_pending(current)) 1396536240f2SMel Gorman goto out; 13977893d1d5SAdam Litke } 13987893d1d5SAdam Litke 13997893d1d5SAdam Litke /* 14007893d1d5SAdam Litke * Decrease the pool size 14017893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 14027893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 14037893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 14047893d1d5SAdam Litke * to the desired size as pages become free. 1405d1c3fb1fSNishanth Aravamudan * 1406d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 1407d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 1408d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 1409d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 1410d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 1411d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 1412d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 14137893d1d5SAdam Litke */ 1414a5516438SAndi Kleen min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 14156b0c880dSAdam Litke min_count = max(count, min_count); 14166ae11b27SLee Schermerhorn try_to_free_low(h, min_count, nodes_allowed); 1417a5516438SAndi Kleen while (min_count < persistent_huge_pages(h)) { 14186ae11b27SLee Schermerhorn if (!free_pool_huge_page(h, nodes_allowed, 0)) 14191da177e4SLinus Torvalds break; 14201da177e4SLinus Torvalds } 1421a5516438SAndi Kleen while (count < persistent_huge_pages(h)) { 14226ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, 1)) 14237893d1d5SAdam Litke break; 14247893d1d5SAdam Litke } 14257893d1d5SAdam Litke out: 1426a5516438SAndi Kleen ret = persistent_huge_pages(h); 14271da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 14287893d1d5SAdam Litke return ret; 14291da177e4SLinus Torvalds } 14301da177e4SLinus Torvalds 1431a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \ 1432a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 1433a3437870SNishanth Aravamudan 1434a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \ 1435a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = \ 1436a3437870SNishanth Aravamudan __ATTR(_name, 0644, _name##_show, _name##_store) 1437a3437870SNishanth Aravamudan 1438a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj; 1439a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1440a3437870SNishanth Aravamudan 14419a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 14429a305230SLee Schermerhorn 14439a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 1444a3437870SNishanth Aravamudan { 1445a3437870SNishanth Aravamudan int i; 14469a305230SLee Schermerhorn 1447a3437870SNishanth Aravamudan for (i = 0; i < HUGE_MAX_HSTATE; i++) 14489a305230SLee Schermerhorn if (hstate_kobjs[i] == kobj) { 14499a305230SLee Schermerhorn if (nidp) 14509a305230SLee Schermerhorn *nidp = NUMA_NO_NODE; 1451a3437870SNishanth Aravamudan return &hstates[i]; 14529a305230SLee Schermerhorn } 14539a305230SLee Schermerhorn 14549a305230SLee Schermerhorn return kobj_to_node_hstate(kobj, nidp); 1455a3437870SNishanth Aravamudan } 1456a3437870SNishanth Aravamudan 145706808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj, 1458a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1459a3437870SNishanth Aravamudan { 14609a305230SLee Schermerhorn struct hstate *h; 14619a305230SLee Schermerhorn unsigned long nr_huge_pages; 14629a305230SLee Schermerhorn int nid; 14639a305230SLee Schermerhorn 14649a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 14659a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 14669a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages; 14679a305230SLee Schermerhorn else 14689a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages_node[nid]; 14699a305230SLee Schermerhorn 14709a305230SLee Schermerhorn return sprintf(buf, "%lu\n", nr_huge_pages); 1471a3437870SNishanth Aravamudan } 1472adbe8726SEric B Munson 147306808b08SLee Schermerhorn static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 147406808b08SLee Schermerhorn struct kobject *kobj, struct kobj_attribute *attr, 147506808b08SLee Schermerhorn const char *buf, size_t len) 1476a3437870SNishanth Aravamudan { 1477a3437870SNishanth Aravamudan int err; 14789a305230SLee Schermerhorn int nid; 147906808b08SLee Schermerhorn unsigned long count; 14809a305230SLee Schermerhorn struct hstate *h; 1481bad44b5bSDavid Rientjes NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); 1482a3437870SNishanth Aravamudan 148306808b08SLee Schermerhorn err = strict_strtoul(buf, 10, &count); 148473ae31e5SEric B Munson if (err) 1485adbe8726SEric B Munson goto out; 1486a3437870SNishanth Aravamudan 14879a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 1488adbe8726SEric B Munson if (h->order >= MAX_ORDER) { 1489adbe8726SEric B Munson err = -EINVAL; 1490adbe8726SEric B Munson goto out; 1491adbe8726SEric B Munson } 1492adbe8726SEric B Munson 14939a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) { 14949a305230SLee Schermerhorn /* 14959a305230SLee Schermerhorn * global hstate attribute 14969a305230SLee Schermerhorn */ 14979a305230SLee Schermerhorn if (!(obey_mempolicy && 14989a305230SLee Schermerhorn init_nodemask_of_mempolicy(nodes_allowed))) { 149906808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 15009a305230SLee Schermerhorn nodes_allowed = &node_states[N_HIGH_MEMORY]; 150106808b08SLee Schermerhorn } 15029a305230SLee Schermerhorn } else if (nodes_allowed) { 15039a305230SLee Schermerhorn /* 15049a305230SLee Schermerhorn * per node hstate attribute: adjust count to global, 15059a305230SLee Schermerhorn * but restrict alloc/free to the specified node. 15069a305230SLee Schermerhorn */ 15079a305230SLee Schermerhorn count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 15089a305230SLee Schermerhorn init_nodemask_of_node(nodes_allowed, nid); 15099a305230SLee Schermerhorn } else 15109a305230SLee Schermerhorn nodes_allowed = &node_states[N_HIGH_MEMORY]; 15119a305230SLee Schermerhorn 151206808b08SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); 1513a3437870SNishanth Aravamudan 15149b5e5d0fSLee Schermerhorn if (nodes_allowed != &node_states[N_HIGH_MEMORY]) 151506808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 151606808b08SLee Schermerhorn 151706808b08SLee Schermerhorn return len; 1518adbe8726SEric B Munson out: 1519adbe8726SEric B Munson NODEMASK_FREE(nodes_allowed); 1520adbe8726SEric B Munson return err; 152106808b08SLee Schermerhorn } 152206808b08SLee Schermerhorn 152306808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj, 152406808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf) 152506808b08SLee Schermerhorn { 152606808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf); 152706808b08SLee Schermerhorn } 152806808b08SLee Schermerhorn 152906808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj, 153006808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len) 153106808b08SLee Schermerhorn { 153206808b08SLee Schermerhorn return nr_hugepages_store_common(false, kobj, attr, buf, len); 1533a3437870SNishanth Aravamudan } 1534a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages); 1535a3437870SNishanth Aravamudan 153606808b08SLee Schermerhorn #ifdef CONFIG_NUMA 153706808b08SLee Schermerhorn 153806808b08SLee Schermerhorn /* 153906808b08SLee Schermerhorn * hstate attribute for optionally mempolicy-based constraint on persistent 154006808b08SLee Schermerhorn * huge page alloc/free. 154106808b08SLee Schermerhorn */ 154206808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 154306808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf) 154406808b08SLee Schermerhorn { 154506808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf); 154606808b08SLee Schermerhorn } 154706808b08SLee Schermerhorn 154806808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 154906808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len) 155006808b08SLee Schermerhorn { 155106808b08SLee Schermerhorn return nr_hugepages_store_common(true, kobj, attr, buf, len); 155206808b08SLee Schermerhorn } 155306808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy); 155406808b08SLee Schermerhorn #endif 155506808b08SLee Schermerhorn 155606808b08SLee Schermerhorn 1557a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 1558a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1559a3437870SNishanth Aravamudan { 15609a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1561a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 1562a3437870SNishanth Aravamudan } 1563adbe8726SEric B Munson 1564a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 1565a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1566a3437870SNishanth Aravamudan { 1567a3437870SNishanth Aravamudan int err; 1568a3437870SNishanth Aravamudan unsigned long input; 15699a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1570a3437870SNishanth Aravamudan 1571adbe8726SEric B Munson if (h->order >= MAX_ORDER) 1572adbe8726SEric B Munson return -EINVAL; 1573adbe8726SEric B Munson 1574a3437870SNishanth Aravamudan err = strict_strtoul(buf, 10, &input); 1575a3437870SNishanth Aravamudan if (err) 157673ae31e5SEric B Munson return err; 1577a3437870SNishanth Aravamudan 1578a3437870SNishanth Aravamudan spin_lock(&hugetlb_lock); 1579a3437870SNishanth Aravamudan h->nr_overcommit_huge_pages = input; 1580a3437870SNishanth Aravamudan spin_unlock(&hugetlb_lock); 1581a3437870SNishanth Aravamudan 1582a3437870SNishanth Aravamudan return count; 1583a3437870SNishanth Aravamudan } 1584a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages); 1585a3437870SNishanth Aravamudan 1586a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj, 1587a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1588a3437870SNishanth Aravamudan { 15899a305230SLee Schermerhorn struct hstate *h; 15909a305230SLee Schermerhorn unsigned long free_huge_pages; 15919a305230SLee Schermerhorn int nid; 15929a305230SLee Schermerhorn 15939a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 15949a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 15959a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages; 15969a305230SLee Schermerhorn else 15979a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages_node[nid]; 15989a305230SLee Schermerhorn 15999a305230SLee Schermerhorn return sprintf(buf, "%lu\n", free_huge_pages); 1600a3437870SNishanth Aravamudan } 1601a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages); 1602a3437870SNishanth Aravamudan 1603a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj, 1604a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1605a3437870SNishanth Aravamudan { 16069a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1607a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->resv_huge_pages); 1608a3437870SNishanth Aravamudan } 1609a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages); 1610a3437870SNishanth Aravamudan 1611a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj, 1612a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1613a3437870SNishanth Aravamudan { 16149a305230SLee Schermerhorn struct hstate *h; 16159a305230SLee Schermerhorn unsigned long surplus_huge_pages; 16169a305230SLee Schermerhorn int nid; 16179a305230SLee Schermerhorn 16189a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 16199a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 16209a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages; 16219a305230SLee Schermerhorn else 16229a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages_node[nid]; 16239a305230SLee Schermerhorn 16249a305230SLee Schermerhorn return sprintf(buf, "%lu\n", surplus_huge_pages); 1625a3437870SNishanth Aravamudan } 1626a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages); 1627a3437870SNishanth Aravamudan 1628a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = { 1629a3437870SNishanth Aravamudan &nr_hugepages_attr.attr, 1630a3437870SNishanth Aravamudan &nr_overcommit_hugepages_attr.attr, 1631a3437870SNishanth Aravamudan &free_hugepages_attr.attr, 1632a3437870SNishanth Aravamudan &resv_hugepages_attr.attr, 1633a3437870SNishanth Aravamudan &surplus_hugepages_attr.attr, 163406808b08SLee Schermerhorn #ifdef CONFIG_NUMA 163506808b08SLee Schermerhorn &nr_hugepages_mempolicy_attr.attr, 163606808b08SLee Schermerhorn #endif 1637a3437870SNishanth Aravamudan NULL, 1638a3437870SNishanth Aravamudan }; 1639a3437870SNishanth Aravamudan 1640a3437870SNishanth Aravamudan static struct attribute_group hstate_attr_group = { 1641a3437870SNishanth Aravamudan .attrs = hstate_attrs, 1642a3437870SNishanth Aravamudan }; 1643a3437870SNishanth Aravamudan 1644094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 16459a305230SLee Schermerhorn struct kobject **hstate_kobjs, 16469a305230SLee Schermerhorn struct attribute_group *hstate_attr_group) 1647a3437870SNishanth Aravamudan { 1648a3437870SNishanth Aravamudan int retval; 16499a305230SLee Schermerhorn int hi = h - hstates; 1650a3437870SNishanth Aravamudan 16519a305230SLee Schermerhorn hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 16529a305230SLee Schermerhorn if (!hstate_kobjs[hi]) 1653a3437870SNishanth Aravamudan return -ENOMEM; 1654a3437870SNishanth Aravamudan 16559a305230SLee Schermerhorn retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 1656a3437870SNishanth Aravamudan if (retval) 16579a305230SLee Schermerhorn kobject_put(hstate_kobjs[hi]); 1658a3437870SNishanth Aravamudan 1659a3437870SNishanth Aravamudan return retval; 1660a3437870SNishanth Aravamudan } 1661a3437870SNishanth Aravamudan 1662a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void) 1663a3437870SNishanth Aravamudan { 1664a3437870SNishanth Aravamudan struct hstate *h; 1665a3437870SNishanth Aravamudan int err; 1666a3437870SNishanth Aravamudan 1667a3437870SNishanth Aravamudan hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 1668a3437870SNishanth Aravamudan if (!hugepages_kobj) 1669a3437870SNishanth Aravamudan return; 1670a3437870SNishanth Aravamudan 1671a3437870SNishanth Aravamudan for_each_hstate(h) { 16729a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 16739a305230SLee Schermerhorn hstate_kobjs, &hstate_attr_group); 1674a3437870SNishanth Aravamudan if (err) 1675a3437870SNishanth Aravamudan printk(KERN_ERR "Hugetlb: Unable to add hstate %s", 1676a3437870SNishanth Aravamudan h->name); 1677a3437870SNishanth Aravamudan } 1678a3437870SNishanth Aravamudan } 1679a3437870SNishanth Aravamudan 16809a305230SLee Schermerhorn #ifdef CONFIG_NUMA 16819a305230SLee Schermerhorn 16829a305230SLee Schermerhorn /* 16839a305230SLee Schermerhorn * node_hstate/s - associate per node hstate attributes, via their kobjects, 168410fbcf4cSKay Sievers * with node devices in node_devices[] using a parallel array. The array 168510fbcf4cSKay Sievers * index of a node device or _hstate == node id. 168610fbcf4cSKay Sievers * This is here to avoid any static dependency of the node device driver, in 16879a305230SLee Schermerhorn * the base kernel, on the hugetlb module. 16889a305230SLee Schermerhorn */ 16899a305230SLee Schermerhorn struct node_hstate { 16909a305230SLee Schermerhorn struct kobject *hugepages_kobj; 16919a305230SLee Schermerhorn struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 16929a305230SLee Schermerhorn }; 16939a305230SLee Schermerhorn struct node_hstate node_hstates[MAX_NUMNODES]; 16949a305230SLee Schermerhorn 16959a305230SLee Schermerhorn /* 169610fbcf4cSKay Sievers * A subset of global hstate attributes for node devices 16979a305230SLee Schermerhorn */ 16989a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = { 16999a305230SLee Schermerhorn &nr_hugepages_attr.attr, 17009a305230SLee Schermerhorn &free_hugepages_attr.attr, 17019a305230SLee Schermerhorn &surplus_hugepages_attr.attr, 17029a305230SLee Schermerhorn NULL, 17039a305230SLee Schermerhorn }; 17049a305230SLee Schermerhorn 17059a305230SLee Schermerhorn static struct attribute_group per_node_hstate_attr_group = { 17069a305230SLee Schermerhorn .attrs = per_node_hstate_attrs, 17079a305230SLee Schermerhorn }; 17089a305230SLee Schermerhorn 17099a305230SLee Schermerhorn /* 171010fbcf4cSKay Sievers * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 17119a305230SLee Schermerhorn * Returns node id via non-NULL nidp. 17129a305230SLee Schermerhorn */ 17139a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 17149a305230SLee Schermerhorn { 17159a305230SLee Schermerhorn int nid; 17169a305230SLee Schermerhorn 17179a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) { 17189a305230SLee Schermerhorn struct node_hstate *nhs = &node_hstates[nid]; 17199a305230SLee Schermerhorn int i; 17209a305230SLee Schermerhorn for (i = 0; i < HUGE_MAX_HSTATE; i++) 17219a305230SLee Schermerhorn if (nhs->hstate_kobjs[i] == kobj) { 17229a305230SLee Schermerhorn if (nidp) 17239a305230SLee Schermerhorn *nidp = nid; 17249a305230SLee Schermerhorn return &hstates[i]; 17259a305230SLee Schermerhorn } 17269a305230SLee Schermerhorn } 17279a305230SLee Schermerhorn 17289a305230SLee Schermerhorn BUG(); 17299a305230SLee Schermerhorn return NULL; 17309a305230SLee Schermerhorn } 17319a305230SLee Schermerhorn 17329a305230SLee Schermerhorn /* 173310fbcf4cSKay Sievers * Unregister hstate attributes from a single node device. 17349a305230SLee Schermerhorn * No-op if no hstate attributes attached. 17359a305230SLee Schermerhorn */ 17369a305230SLee Schermerhorn void hugetlb_unregister_node(struct node *node) 17379a305230SLee Schermerhorn { 17389a305230SLee Schermerhorn struct hstate *h; 173910fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id]; 17409a305230SLee Schermerhorn 17419a305230SLee Schermerhorn if (!nhs->hugepages_kobj) 17429b5e5d0fSLee Schermerhorn return; /* no hstate attributes */ 17439a305230SLee Schermerhorn 17449a305230SLee Schermerhorn for_each_hstate(h) 17459a305230SLee Schermerhorn if (nhs->hstate_kobjs[h - hstates]) { 17469a305230SLee Schermerhorn kobject_put(nhs->hstate_kobjs[h - hstates]); 17479a305230SLee Schermerhorn nhs->hstate_kobjs[h - hstates] = NULL; 17489a305230SLee Schermerhorn } 17499a305230SLee Schermerhorn 17509a305230SLee Schermerhorn kobject_put(nhs->hugepages_kobj); 17519a305230SLee Schermerhorn nhs->hugepages_kobj = NULL; 17529a305230SLee Schermerhorn } 17539a305230SLee Schermerhorn 17549a305230SLee Schermerhorn /* 175510fbcf4cSKay Sievers * hugetlb module exit: unregister hstate attributes from node devices 17569a305230SLee Schermerhorn * that have them. 17579a305230SLee Schermerhorn */ 17589a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) 17599a305230SLee Schermerhorn { 17609a305230SLee Schermerhorn int nid; 17619a305230SLee Schermerhorn 17629a305230SLee Schermerhorn /* 176310fbcf4cSKay Sievers * disable node device registrations. 17649a305230SLee Schermerhorn */ 17659a305230SLee Schermerhorn register_hugetlbfs_with_node(NULL, NULL); 17669a305230SLee Schermerhorn 17679a305230SLee Schermerhorn /* 17689a305230SLee Schermerhorn * remove hstate attributes from any nodes that have them. 17699a305230SLee Schermerhorn */ 17709a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) 17719a305230SLee Schermerhorn hugetlb_unregister_node(&node_devices[nid]); 17729a305230SLee Schermerhorn } 17739a305230SLee Schermerhorn 17749a305230SLee Schermerhorn /* 177510fbcf4cSKay Sievers * Register hstate attributes for a single node device. 17769a305230SLee Schermerhorn * No-op if attributes already registered. 17779a305230SLee Schermerhorn */ 17789a305230SLee Schermerhorn void hugetlb_register_node(struct node *node) 17799a305230SLee Schermerhorn { 17809a305230SLee Schermerhorn struct hstate *h; 178110fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id]; 17829a305230SLee Schermerhorn int err; 17839a305230SLee Schermerhorn 17849a305230SLee Schermerhorn if (nhs->hugepages_kobj) 17859a305230SLee Schermerhorn return; /* already allocated */ 17869a305230SLee Schermerhorn 17879a305230SLee Schermerhorn nhs->hugepages_kobj = kobject_create_and_add("hugepages", 178810fbcf4cSKay Sievers &node->dev.kobj); 17899a305230SLee Schermerhorn if (!nhs->hugepages_kobj) 17909a305230SLee Schermerhorn return; 17919a305230SLee Schermerhorn 17929a305230SLee Schermerhorn for_each_hstate(h) { 17939a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 17949a305230SLee Schermerhorn nhs->hstate_kobjs, 17959a305230SLee Schermerhorn &per_node_hstate_attr_group); 17969a305230SLee Schermerhorn if (err) { 17979a305230SLee Schermerhorn printk(KERN_ERR "Hugetlb: Unable to add hstate %s" 17989a305230SLee Schermerhorn " for node %d\n", 179910fbcf4cSKay Sievers h->name, node->dev.id); 18009a305230SLee Schermerhorn hugetlb_unregister_node(node); 18019a305230SLee Schermerhorn break; 18029a305230SLee Schermerhorn } 18039a305230SLee Schermerhorn } 18049a305230SLee Schermerhorn } 18059a305230SLee Schermerhorn 18069a305230SLee Schermerhorn /* 18079b5e5d0fSLee Schermerhorn * hugetlb init time: register hstate attributes for all registered node 180810fbcf4cSKay Sievers * devices of nodes that have memory. All on-line nodes should have 180910fbcf4cSKay Sievers * registered their associated device by this time. 18109a305230SLee Schermerhorn */ 18119a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) 18129a305230SLee Schermerhorn { 18139a305230SLee Schermerhorn int nid; 18149a305230SLee Schermerhorn 18159b5e5d0fSLee Schermerhorn for_each_node_state(nid, N_HIGH_MEMORY) { 18169a305230SLee Schermerhorn struct node *node = &node_devices[nid]; 181710fbcf4cSKay Sievers if (node->dev.id == nid) 18189a305230SLee Schermerhorn hugetlb_register_node(node); 18199a305230SLee Schermerhorn } 18209a305230SLee Schermerhorn 18219a305230SLee Schermerhorn /* 182210fbcf4cSKay Sievers * Let the node device driver know we're here so it can 18239a305230SLee Schermerhorn * [un]register hstate attributes on node hotplug. 18249a305230SLee Schermerhorn */ 18259a305230SLee Schermerhorn register_hugetlbfs_with_node(hugetlb_register_node, 18269a305230SLee Schermerhorn hugetlb_unregister_node); 18279a305230SLee Schermerhorn } 18289a305230SLee Schermerhorn #else /* !CONFIG_NUMA */ 18299a305230SLee Schermerhorn 18309a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 18319a305230SLee Schermerhorn { 18329a305230SLee Schermerhorn BUG(); 18339a305230SLee Schermerhorn if (nidp) 18349a305230SLee Schermerhorn *nidp = -1; 18359a305230SLee Schermerhorn return NULL; 18369a305230SLee Schermerhorn } 18379a305230SLee Schermerhorn 18389a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) { } 18399a305230SLee Schermerhorn 18409a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { } 18419a305230SLee Schermerhorn 18429a305230SLee Schermerhorn #endif 18439a305230SLee Schermerhorn 1844a3437870SNishanth Aravamudan static void __exit hugetlb_exit(void) 1845a3437870SNishanth Aravamudan { 1846a3437870SNishanth Aravamudan struct hstate *h; 1847a3437870SNishanth Aravamudan 18489a305230SLee Schermerhorn hugetlb_unregister_all_nodes(); 18499a305230SLee Schermerhorn 1850a3437870SNishanth Aravamudan for_each_hstate(h) { 1851a3437870SNishanth Aravamudan kobject_put(hstate_kobjs[h - hstates]); 1852a3437870SNishanth Aravamudan } 1853a3437870SNishanth Aravamudan 1854a3437870SNishanth Aravamudan kobject_put(hugepages_kobj); 1855a3437870SNishanth Aravamudan } 1856a3437870SNishanth Aravamudan module_exit(hugetlb_exit); 1857a3437870SNishanth Aravamudan 1858a3437870SNishanth Aravamudan static int __init hugetlb_init(void) 1859a3437870SNishanth Aravamudan { 18600ef89d25SBenjamin Herrenschmidt /* Some platform decide whether they support huge pages at boot 18610ef89d25SBenjamin Herrenschmidt * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 18620ef89d25SBenjamin Herrenschmidt * there is no such support 18630ef89d25SBenjamin Herrenschmidt */ 18640ef89d25SBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 18650ef89d25SBenjamin Herrenschmidt return 0; 1866a3437870SNishanth Aravamudan 1867e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) { 1868e11bfbfcSNick Piggin default_hstate_size = HPAGE_SIZE; 1869e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) 1870a3437870SNishanth Aravamudan hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 1871a3437870SNishanth Aravamudan } 1872e11bfbfcSNick Piggin default_hstate_idx = size_to_hstate(default_hstate_size) - hstates; 1873e11bfbfcSNick Piggin if (default_hstate_max_huge_pages) 1874e11bfbfcSNick Piggin default_hstate.max_huge_pages = default_hstate_max_huge_pages; 1875a3437870SNishanth Aravamudan 1876a3437870SNishanth Aravamudan hugetlb_init_hstates(); 1877a3437870SNishanth Aravamudan 1878aa888a74SAndi Kleen gather_bootmem_prealloc(); 1879aa888a74SAndi Kleen 1880a3437870SNishanth Aravamudan report_hugepages(); 1881a3437870SNishanth Aravamudan 1882a3437870SNishanth Aravamudan hugetlb_sysfs_init(); 1883a3437870SNishanth Aravamudan 18849a305230SLee Schermerhorn hugetlb_register_all_nodes(); 18859a305230SLee Schermerhorn 1886a3437870SNishanth Aravamudan return 0; 1887a3437870SNishanth Aravamudan } 1888a3437870SNishanth Aravamudan module_init(hugetlb_init); 1889a3437870SNishanth Aravamudan 1890a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */ 1891a3437870SNishanth Aravamudan void __init hugetlb_add_hstate(unsigned order) 1892a3437870SNishanth Aravamudan { 1893a3437870SNishanth Aravamudan struct hstate *h; 18948faa8b07SAndi Kleen unsigned long i; 18958faa8b07SAndi Kleen 1896a3437870SNishanth Aravamudan if (size_to_hstate(PAGE_SIZE << order)) { 1897a3437870SNishanth Aravamudan printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n"); 1898a3437870SNishanth Aravamudan return; 1899a3437870SNishanth Aravamudan } 1900a3437870SNishanth Aravamudan BUG_ON(max_hstate >= HUGE_MAX_HSTATE); 1901a3437870SNishanth Aravamudan BUG_ON(order == 0); 1902a3437870SNishanth Aravamudan h = &hstates[max_hstate++]; 1903a3437870SNishanth Aravamudan h->order = order; 1904a3437870SNishanth Aravamudan h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 19058faa8b07SAndi Kleen h->nr_huge_pages = 0; 19068faa8b07SAndi Kleen h->free_huge_pages = 0; 19078faa8b07SAndi Kleen for (i = 0; i < MAX_NUMNODES; ++i) 19088faa8b07SAndi Kleen INIT_LIST_HEAD(&h->hugepage_freelists[i]); 19099b5e5d0fSLee Schermerhorn h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]); 19109b5e5d0fSLee Schermerhorn h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]); 1911a3437870SNishanth Aravamudan snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1912a3437870SNishanth Aravamudan huge_page_size(h)/1024); 19138faa8b07SAndi Kleen 1914a3437870SNishanth Aravamudan parsed_hstate = h; 1915a3437870SNishanth Aravamudan } 1916a3437870SNishanth Aravamudan 1917e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s) 1918a3437870SNishanth Aravamudan { 1919a3437870SNishanth Aravamudan unsigned long *mhp; 19208faa8b07SAndi Kleen static unsigned long *last_mhp; 1921a3437870SNishanth Aravamudan 1922a3437870SNishanth Aravamudan /* 1923a3437870SNishanth Aravamudan * !max_hstate means we haven't parsed a hugepagesz= parameter yet, 1924a3437870SNishanth Aravamudan * so this hugepages= parameter goes to the "default hstate". 1925a3437870SNishanth Aravamudan */ 1926a3437870SNishanth Aravamudan if (!max_hstate) 1927a3437870SNishanth Aravamudan mhp = &default_hstate_max_huge_pages; 1928a3437870SNishanth Aravamudan else 1929a3437870SNishanth Aravamudan mhp = &parsed_hstate->max_huge_pages; 1930a3437870SNishanth Aravamudan 19318faa8b07SAndi Kleen if (mhp == last_mhp) { 19328faa8b07SAndi Kleen printk(KERN_WARNING "hugepages= specified twice without " 19338faa8b07SAndi Kleen "interleaving hugepagesz=, ignoring\n"); 19348faa8b07SAndi Kleen return 1; 19358faa8b07SAndi Kleen } 19368faa8b07SAndi Kleen 1937a3437870SNishanth Aravamudan if (sscanf(s, "%lu", mhp) <= 0) 1938a3437870SNishanth Aravamudan *mhp = 0; 1939a3437870SNishanth Aravamudan 19408faa8b07SAndi Kleen /* 19418faa8b07SAndi Kleen * Global state is always initialized later in hugetlb_init. 19428faa8b07SAndi Kleen * But we need to allocate >= MAX_ORDER hstates here early to still 19438faa8b07SAndi Kleen * use the bootmem allocator. 19448faa8b07SAndi Kleen */ 19458faa8b07SAndi Kleen if (max_hstate && parsed_hstate->order >= MAX_ORDER) 19468faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(parsed_hstate); 19478faa8b07SAndi Kleen 19488faa8b07SAndi Kleen last_mhp = mhp; 19498faa8b07SAndi Kleen 1950a3437870SNishanth Aravamudan return 1; 1951a3437870SNishanth Aravamudan } 1952e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup); 1953e11bfbfcSNick Piggin 1954e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s) 1955e11bfbfcSNick Piggin { 1956e11bfbfcSNick Piggin default_hstate_size = memparse(s, &s); 1957e11bfbfcSNick Piggin return 1; 1958e11bfbfcSNick Piggin } 1959e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup); 1960a3437870SNishanth Aravamudan 19618a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array) 19628a213460SNishanth Aravamudan { 19638a213460SNishanth Aravamudan int node; 19648a213460SNishanth Aravamudan unsigned int nr = 0; 19658a213460SNishanth Aravamudan 19668a213460SNishanth Aravamudan for_each_node_mask(node, cpuset_current_mems_allowed) 19678a213460SNishanth Aravamudan nr += array[node]; 19688a213460SNishanth Aravamudan 19698a213460SNishanth Aravamudan return nr; 19708a213460SNishanth Aravamudan } 19718a213460SNishanth Aravamudan 19728a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL 197306808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 197406808b08SLee Schermerhorn struct ctl_table *table, int write, 197506808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 19761da177e4SLinus Torvalds { 1977e5ff2159SAndi Kleen struct hstate *h = &default_hstate; 1978e5ff2159SAndi Kleen unsigned long tmp; 197908d4a246SMichal Hocko int ret; 1980e5ff2159SAndi Kleen 1981e5ff2159SAndi Kleen tmp = h->max_huge_pages; 1982e5ff2159SAndi Kleen 1983adbe8726SEric B Munson if (write && h->order >= MAX_ORDER) 1984adbe8726SEric B Munson return -EINVAL; 1985adbe8726SEric B Munson 1986e5ff2159SAndi Kleen table->data = &tmp; 1987e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 198808d4a246SMichal Hocko ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 198908d4a246SMichal Hocko if (ret) 199008d4a246SMichal Hocko goto out; 1991e5ff2159SAndi Kleen 199206808b08SLee Schermerhorn if (write) { 1993bad44b5bSDavid Rientjes NODEMASK_ALLOC(nodemask_t, nodes_allowed, 1994bad44b5bSDavid Rientjes GFP_KERNEL | __GFP_NORETRY); 199506808b08SLee Schermerhorn if (!(obey_mempolicy && 199606808b08SLee Schermerhorn init_nodemask_of_mempolicy(nodes_allowed))) { 199706808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 199806808b08SLee Schermerhorn nodes_allowed = &node_states[N_HIGH_MEMORY]; 199906808b08SLee Schermerhorn } 200006808b08SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed); 200106808b08SLee Schermerhorn 200206808b08SLee Schermerhorn if (nodes_allowed != &node_states[N_HIGH_MEMORY]) 200306808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 200406808b08SLee Schermerhorn } 200508d4a246SMichal Hocko out: 200608d4a246SMichal Hocko return ret; 20071da177e4SLinus Torvalds } 2008396faf03SMel Gorman 200906808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write, 201006808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 201106808b08SLee Schermerhorn { 201206808b08SLee Schermerhorn 201306808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(false, table, write, 201406808b08SLee Schermerhorn buffer, length, ppos); 201506808b08SLee Schermerhorn } 201606808b08SLee Schermerhorn 201706808b08SLee Schermerhorn #ifdef CONFIG_NUMA 201806808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 201906808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 202006808b08SLee Schermerhorn { 202106808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(true, table, write, 202206808b08SLee Schermerhorn buffer, length, ppos); 202306808b08SLee Schermerhorn } 202406808b08SLee Schermerhorn #endif /* CONFIG_NUMA */ 202506808b08SLee Schermerhorn 2026396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 20278d65af78SAlexey Dobriyan void __user *buffer, 2028396faf03SMel Gorman size_t *length, loff_t *ppos) 2029396faf03SMel Gorman { 20308d65af78SAlexey Dobriyan proc_dointvec(table, write, buffer, length, ppos); 2031396faf03SMel Gorman if (hugepages_treat_as_movable) 2032396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 2033396faf03SMel Gorman else 2034396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 2035396faf03SMel Gorman return 0; 2036396faf03SMel Gorman } 2037396faf03SMel Gorman 2038a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 20398d65af78SAlexey Dobriyan void __user *buffer, 2040a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 2041a3d0c6aaSNishanth Aravamudan { 2042a5516438SAndi Kleen struct hstate *h = &default_hstate; 2043e5ff2159SAndi Kleen unsigned long tmp; 204408d4a246SMichal Hocko int ret; 2045e5ff2159SAndi Kleen 2046e5ff2159SAndi Kleen tmp = h->nr_overcommit_huge_pages; 2047e5ff2159SAndi Kleen 2048adbe8726SEric B Munson if (write && h->order >= MAX_ORDER) 2049adbe8726SEric B Munson return -EINVAL; 2050adbe8726SEric B Munson 2051e5ff2159SAndi Kleen table->data = &tmp; 2052e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 205308d4a246SMichal Hocko ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 205408d4a246SMichal Hocko if (ret) 205508d4a246SMichal Hocko goto out; 2056e5ff2159SAndi Kleen 2057e5ff2159SAndi Kleen if (write) { 2058064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 2059e5ff2159SAndi Kleen h->nr_overcommit_huge_pages = tmp; 2060a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 2061e5ff2159SAndi Kleen } 206208d4a246SMichal Hocko out: 206308d4a246SMichal Hocko return ret; 2064a3d0c6aaSNishanth Aravamudan } 2065a3d0c6aaSNishanth Aravamudan 20661da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 20671da177e4SLinus Torvalds 2068e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m) 20691da177e4SLinus Torvalds { 2070a5516438SAndi Kleen struct hstate *h = &default_hstate; 2071e1759c21SAlexey Dobriyan seq_printf(m, 20721da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 20731da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 2074b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 20757893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 20764f98a2feSRik van Riel "Hugepagesize: %8lu kB\n", 2077a5516438SAndi Kleen h->nr_huge_pages, 2078a5516438SAndi Kleen h->free_huge_pages, 2079a5516438SAndi Kleen h->resv_huge_pages, 2080a5516438SAndi Kleen h->surplus_huge_pages, 2081a5516438SAndi Kleen 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 20821da177e4SLinus Torvalds } 20831da177e4SLinus Torvalds 20841da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 20851da177e4SLinus Torvalds { 2086a5516438SAndi Kleen struct hstate *h = &default_hstate; 20871da177e4SLinus Torvalds return sprintf(buf, 20881da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 2089a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 2090a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 2091a5516438SAndi Kleen nid, h->nr_huge_pages_node[nid], 2092a5516438SAndi Kleen nid, h->free_huge_pages_node[nid], 2093a5516438SAndi Kleen nid, h->surplus_huge_pages_node[nid]); 20941da177e4SLinus Torvalds } 20951da177e4SLinus Torvalds 20961da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 20971da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 20981da177e4SLinus Torvalds { 2099a5516438SAndi Kleen struct hstate *h = &default_hstate; 2100a5516438SAndi Kleen return h->nr_huge_pages * pages_per_huge_page(h); 21011da177e4SLinus Torvalds } 21021da177e4SLinus Torvalds 2103a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta) 2104fc1b8a73SMel Gorman { 2105fc1b8a73SMel Gorman int ret = -ENOMEM; 2106fc1b8a73SMel Gorman 2107fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 2108fc1b8a73SMel Gorman /* 2109fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 2110fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 2111fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 2112fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 2113fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 2114fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 2115fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 2116fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 2117fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 2118fc1b8a73SMel Gorman * 2119fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 2120fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 2121fc1b8a73SMel Gorman * we fall back to check against current free page availability as 2122fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 2123fc1b8a73SMel Gorman * semantics that cpuset has. 2124fc1b8a73SMel Gorman */ 2125fc1b8a73SMel Gorman if (delta > 0) { 2126a5516438SAndi Kleen if (gather_surplus_pages(h, delta) < 0) 2127fc1b8a73SMel Gorman goto out; 2128fc1b8a73SMel Gorman 2129a5516438SAndi Kleen if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 2130a5516438SAndi Kleen return_unused_surplus_pages(h, delta); 2131fc1b8a73SMel Gorman goto out; 2132fc1b8a73SMel Gorman } 2133fc1b8a73SMel Gorman } 2134fc1b8a73SMel Gorman 2135fc1b8a73SMel Gorman ret = 0; 2136fc1b8a73SMel Gorman if (delta < 0) 2137a5516438SAndi Kleen return_unused_surplus_pages(h, (unsigned long) -delta); 2138fc1b8a73SMel Gorman 2139fc1b8a73SMel Gorman out: 2140fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 2141fc1b8a73SMel Gorman return ret; 2142fc1b8a73SMel Gorman } 2143fc1b8a73SMel Gorman 214484afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma) 214584afd99bSAndy Whitcroft { 214684afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 214784afd99bSAndy Whitcroft 214884afd99bSAndy Whitcroft /* 214984afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present. 215084afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where 215184afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA 215225985edcSLucas De Marchi * has a reference to the reservation map it cannot disappear until 215384afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a 215484afd99bSAndy Whitcroft * new reference here without additional locking. 215584afd99bSAndy Whitcroft */ 215684afd99bSAndy Whitcroft if (reservations) 215784afd99bSAndy Whitcroft kref_get(&reservations->refs); 215884afd99bSAndy Whitcroft } 215984afd99bSAndy Whitcroft 2160c50ac050SDave Hansen static void resv_map_put(struct vm_area_struct *vma) 2161c50ac050SDave Hansen { 2162c50ac050SDave Hansen struct resv_map *reservations = vma_resv_map(vma); 2163c50ac050SDave Hansen 2164c50ac050SDave Hansen if (!reservations) 2165c50ac050SDave Hansen return; 2166c50ac050SDave Hansen kref_put(&reservations->refs, resv_map_release); 2167c50ac050SDave Hansen } 2168c50ac050SDave Hansen 2169a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 2170a1e78772SMel Gorman { 2171a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 217284afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 217390481622SDavid Gibson struct hugepage_subpool *spool = subpool_vma(vma); 217484afd99bSAndy Whitcroft unsigned long reserve; 217584afd99bSAndy Whitcroft unsigned long start; 217684afd99bSAndy Whitcroft unsigned long end; 217784afd99bSAndy Whitcroft 217884afd99bSAndy Whitcroft if (reservations) { 2179a5516438SAndi Kleen start = vma_hugecache_offset(h, vma, vma->vm_start); 2180a5516438SAndi Kleen end = vma_hugecache_offset(h, vma, vma->vm_end); 218184afd99bSAndy Whitcroft 218284afd99bSAndy Whitcroft reserve = (end - start) - 218384afd99bSAndy Whitcroft region_count(&reservations->regions, start, end); 218484afd99bSAndy Whitcroft 2185c50ac050SDave Hansen resv_map_put(vma); 218684afd99bSAndy Whitcroft 21877251ff78SAdam Litke if (reserve) { 2188a5516438SAndi Kleen hugetlb_acct_memory(h, -reserve); 218990481622SDavid Gibson hugepage_subpool_put_pages(spool, reserve); 21907251ff78SAdam Litke } 2191a1e78772SMel Gorman } 219284afd99bSAndy Whitcroft } 2193a1e78772SMel Gorman 21941da177e4SLinus Torvalds /* 21951da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 21961da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 21971da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 21981da177e4SLinus Torvalds * this far. 21991da177e4SLinus Torvalds */ 2200d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 22011da177e4SLinus Torvalds { 22021da177e4SLinus Torvalds BUG(); 2203d0217ac0SNick Piggin return 0; 22041da177e4SLinus Torvalds } 22051da177e4SLinus Torvalds 2206f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = { 2207d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 220884afd99bSAndy Whitcroft .open = hugetlb_vm_op_open, 2209a1e78772SMel Gorman .close = hugetlb_vm_op_close, 22101da177e4SLinus Torvalds }; 22111da177e4SLinus Torvalds 22121e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 22131e8f889bSDavid Gibson int writable) 221463551ae0SDavid Gibson { 221563551ae0SDavid Gibson pte_t entry; 221663551ae0SDavid Gibson 22171e8f889bSDavid Gibson if (writable) { 221863551ae0SDavid Gibson entry = 221963551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 222063551ae0SDavid Gibson } else { 22217f2e9525SGerald Schaefer entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 222263551ae0SDavid Gibson } 222363551ae0SDavid Gibson entry = pte_mkyoung(entry); 222463551ae0SDavid Gibson entry = pte_mkhuge(entry); 2225d9ed9faaSChris Metcalf entry = arch_make_huge_pte(entry, vma, page, writable); 222663551ae0SDavid Gibson 222763551ae0SDavid Gibson return entry; 222863551ae0SDavid Gibson } 222963551ae0SDavid Gibson 22301e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 22311e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 22321e8f889bSDavid Gibson { 22331e8f889bSDavid Gibson pte_t entry; 22341e8f889bSDavid Gibson 22357f2e9525SGerald Schaefer entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 223632f84528SChris Forbes if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 22374b3073e1SRussell King update_mmu_cache(vma, address, ptep); 22381e8f889bSDavid Gibson } 22391e8f889bSDavid Gibson 22401e8f889bSDavid Gibson 224163551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 224263551ae0SDavid Gibson struct vm_area_struct *vma) 224363551ae0SDavid Gibson { 224463551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 224563551ae0SDavid Gibson struct page *ptepage; 22461c59827dSHugh Dickins unsigned long addr; 22471e8f889bSDavid Gibson int cow; 2248a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2249a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 22501e8f889bSDavid Gibson 22511e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 225263551ae0SDavid Gibson 2253a5516438SAndi Kleen for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 2254c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 2255c74df32cSHugh Dickins if (!src_pte) 2256c74df32cSHugh Dickins continue; 2257a5516438SAndi Kleen dst_pte = huge_pte_alloc(dst, addr, sz); 225863551ae0SDavid Gibson if (!dst_pte) 225963551ae0SDavid Gibson goto nomem; 2260c5c99429SLarry Woodman 2261c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 2262c5c99429SLarry Woodman if (dst_pte == src_pte) 2263c5c99429SLarry Woodman continue; 2264c5c99429SLarry Woodman 2265c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 226646478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 22677f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 22681e8f889bSDavid Gibson if (cow) 22697f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 22707f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 227163551ae0SDavid Gibson ptepage = pte_page(entry); 227263551ae0SDavid Gibson get_page(ptepage); 22730fe6e20bSNaoya Horiguchi page_dup_rmap(ptepage); 227463551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 22751c59827dSHugh Dickins } 22761c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 2277c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 227863551ae0SDavid Gibson } 227963551ae0SDavid Gibson return 0; 228063551ae0SDavid Gibson 228163551ae0SDavid Gibson nomem: 228263551ae0SDavid Gibson return -ENOMEM; 228363551ae0SDavid Gibson } 228463551ae0SDavid Gibson 2285290408d4SNaoya Horiguchi static int is_hugetlb_entry_migration(pte_t pte) 2286290408d4SNaoya Horiguchi { 2287290408d4SNaoya Horiguchi swp_entry_t swp; 2288290408d4SNaoya Horiguchi 2289290408d4SNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte)) 2290290408d4SNaoya Horiguchi return 0; 2291290408d4SNaoya Horiguchi swp = pte_to_swp_entry(pte); 229232f84528SChris Forbes if (non_swap_entry(swp) && is_migration_entry(swp)) 2293290408d4SNaoya Horiguchi return 1; 229432f84528SChris Forbes else 2295290408d4SNaoya Horiguchi return 0; 2296290408d4SNaoya Horiguchi } 2297290408d4SNaoya Horiguchi 2298fd6a03edSNaoya Horiguchi static int is_hugetlb_entry_hwpoisoned(pte_t pte) 2299fd6a03edSNaoya Horiguchi { 2300fd6a03edSNaoya Horiguchi swp_entry_t swp; 2301fd6a03edSNaoya Horiguchi 2302fd6a03edSNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte)) 2303fd6a03edSNaoya Horiguchi return 0; 2304fd6a03edSNaoya Horiguchi swp = pte_to_swp_entry(pte); 230532f84528SChris Forbes if (non_swap_entry(swp) && is_hwpoison_entry(swp)) 2306fd6a03edSNaoya Horiguchi return 1; 230732f84528SChris Forbes else 2308fd6a03edSNaoya Horiguchi return 0; 2309fd6a03edSNaoya Horiguchi } 2310fd6a03edSNaoya Horiguchi 2311502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 231204f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 231363551ae0SDavid Gibson { 231463551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 231563551ae0SDavid Gibson unsigned long address; 2316c7546f8fSDavid Gibson pte_t *ptep; 231763551ae0SDavid Gibson pte_t pte; 231863551ae0SDavid Gibson struct page *page; 2319fe1668aeSChen, Kenneth W struct page *tmp; 2320a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2321a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 2322a5516438SAndi Kleen 2323c0a499c2SChen, Kenneth W /* 23243d48ae45SPeter Zijlstra * A page gathering list, protected by per file i_mmap_mutex. The 2325c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 2326c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 2327c0a499c2SChen, Kenneth W */ 2328fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 232963551ae0SDavid Gibson 233063551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 2331a5516438SAndi Kleen BUG_ON(start & ~huge_page_mask(h)); 2332a5516438SAndi Kleen BUG_ON(end & ~huge_page_mask(h)); 233363551ae0SDavid Gibson 2334cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(mm, start, end); 2335508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 2336a5516438SAndi Kleen for (address = start; address < end; address += sz) { 2337c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 2338c7546f8fSDavid Gibson if (!ptep) 2339c7546f8fSDavid Gibson continue; 2340c7546f8fSDavid Gibson 234139dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 234239dde65cSChen, Kenneth W continue; 234339dde65cSChen, Kenneth W 23446629326bSHillf Danton pte = huge_ptep_get(ptep); 23456629326bSHillf Danton if (huge_pte_none(pte)) 23466629326bSHillf Danton continue; 23476629326bSHillf Danton 23486629326bSHillf Danton /* 23496629326bSHillf Danton * HWPoisoned hugepage is already unmapped and dropped reference 23506629326bSHillf Danton */ 23516629326bSHillf Danton if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) 23526629326bSHillf Danton continue; 23536629326bSHillf Danton 23546629326bSHillf Danton page = pte_page(pte); 235504f2cbe3SMel Gorman /* 235604f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 235704f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 235804f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 235904f2cbe3SMel Gorman */ 236004f2cbe3SMel Gorman if (ref_page) { 236104f2cbe3SMel Gorman if (page != ref_page) 236204f2cbe3SMel Gorman continue; 236304f2cbe3SMel Gorman 236404f2cbe3SMel Gorman /* 236504f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 236604f2cbe3SMel Gorman * future faults in this VMA will fail rather than 236704f2cbe3SMel Gorman * looking like data was lost 236804f2cbe3SMel Gorman */ 236904f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 237004f2cbe3SMel Gorman } 237104f2cbe3SMel Gorman 2372c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 23736649a386SKen Chen if (pte_dirty(pte)) 23746649a386SKen Chen set_page_dirty(page); 2375fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 23769e81130bSHillf Danton 23779e81130bSHillf Danton /* Bail out after unmapping reference page if supplied */ 23789e81130bSHillf Danton if (ref_page) 23799e81130bSHillf Danton break; 238063551ae0SDavid Gibson } 2381508034a3SHugh Dickins flush_tlb_range(vma, start, end); 2382cd2934a3SAl Viro spin_unlock(&mm->page_table_lock); 2383cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start, end); 2384fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 23850fe6e20bSNaoya Horiguchi page_remove_rmap(page); 2386fe1668aeSChen, Kenneth W list_del(&page->lru); 2387fe1668aeSChen, Kenneth W put_page(page); 2388fe1668aeSChen, Kenneth W } 23891da177e4SLinus Torvalds } 239063551ae0SDavid Gibson 2391502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 239204f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 2393502717f4SChen, Kenneth W { 23943d48ae45SPeter Zijlstra mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 239504f2cbe3SMel Gorman __unmap_hugepage_range(vma, start, end, ref_page); 23963d48ae45SPeter Zijlstra mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 2397502717f4SChen, Kenneth W } 2398502717f4SChen, Kenneth W 239904f2cbe3SMel Gorman /* 240004f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 240104f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 240204f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 240304f2cbe3SMel Gorman * same region. 240404f2cbe3SMel Gorman */ 24052a4b3dedSHarvey Harrison static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 24062a4b3dedSHarvey Harrison struct page *page, unsigned long address) 240704f2cbe3SMel Gorman { 24087526674dSAdam Litke struct hstate *h = hstate_vma(vma); 240904f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 241004f2cbe3SMel Gorman struct address_space *mapping; 241104f2cbe3SMel Gorman struct prio_tree_iter iter; 241204f2cbe3SMel Gorman pgoff_t pgoff; 241304f2cbe3SMel Gorman 241404f2cbe3SMel Gorman /* 241504f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 241604f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 241704f2cbe3SMel Gorman */ 24187526674dSAdam Litke address = address & huge_page_mask(h); 24190c176d52SHillf Danton pgoff = vma_hugecache_offset(h, vma, address); 242090481622SDavid Gibson mapping = vma->vm_file->f_dentry->d_inode->i_mapping; 242104f2cbe3SMel Gorman 24224eb2b1dcSMel Gorman /* 24234eb2b1dcSMel Gorman * Take the mapping lock for the duration of the table walk. As 24244eb2b1dcSMel Gorman * this mapping should be shared between all the VMAs, 24254eb2b1dcSMel Gorman * __unmap_hugepage_range() is called as the lock is already held 24264eb2b1dcSMel Gorman */ 24273d48ae45SPeter Zijlstra mutex_lock(&mapping->i_mmap_mutex); 242804f2cbe3SMel Gorman vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 242904f2cbe3SMel Gorman /* Do not unmap the current VMA */ 243004f2cbe3SMel Gorman if (iter_vma == vma) 243104f2cbe3SMel Gorman continue; 243204f2cbe3SMel Gorman 243304f2cbe3SMel Gorman /* 243404f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 243504f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 243604f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 243704f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 243804f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 243904f2cbe3SMel Gorman */ 244004f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 24414eb2b1dcSMel Gorman __unmap_hugepage_range(iter_vma, 24427526674dSAdam Litke address, address + huge_page_size(h), 244304f2cbe3SMel Gorman page); 244404f2cbe3SMel Gorman } 24453d48ae45SPeter Zijlstra mutex_unlock(&mapping->i_mmap_mutex); 244604f2cbe3SMel Gorman 244704f2cbe3SMel Gorman return 1; 244804f2cbe3SMel Gorman } 244904f2cbe3SMel Gorman 24500fe6e20bSNaoya Horiguchi /* 24510fe6e20bSNaoya Horiguchi * Hugetlb_cow() should be called with page lock of the original hugepage held. 2452ef009b25SMichal Hocko * Called with hugetlb_instantiation_mutex held and pte_page locked so we 2453ef009b25SMichal Hocko * cannot race with other handlers or page migration. 2454ef009b25SMichal Hocko * Keep the pte_same checks anyway to make transition from the mutex easier. 24550fe6e20bSNaoya Horiguchi */ 24561e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 245704f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 245804f2cbe3SMel Gorman struct page *pagecache_page) 24591e8f889bSDavid Gibson { 2460a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 24611e8f889bSDavid Gibson struct page *old_page, *new_page; 246279ac6ba4SDavid Gibson int avoidcopy; 246304f2cbe3SMel Gorman int outside_reserve = 0; 24641e8f889bSDavid Gibson 24651e8f889bSDavid Gibson old_page = pte_page(pte); 24661e8f889bSDavid Gibson 246704f2cbe3SMel Gorman retry_avoidcopy: 24681e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 24691e8f889bSDavid Gibson * and just make the page writable */ 24700fe6e20bSNaoya Horiguchi avoidcopy = (page_mapcount(old_page) == 1); 24711e8f889bSDavid Gibson if (avoidcopy) { 24720fe6e20bSNaoya Horiguchi if (PageAnon(old_page)) 24730fe6e20bSNaoya Horiguchi page_move_anon_rmap(old_page, vma, address); 24741e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 247583c54070SNick Piggin return 0; 24761e8f889bSDavid Gibson } 24771e8f889bSDavid Gibson 247804f2cbe3SMel Gorman /* 247904f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 248004f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 248104f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 248204f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 248304f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 248404f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 248504f2cbe3SMel Gorman * of the full address range. 248604f2cbe3SMel Gorman */ 2487f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE) && 248804f2cbe3SMel Gorman is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 248904f2cbe3SMel Gorman old_page != pagecache_page) 249004f2cbe3SMel Gorman outside_reserve = 1; 249104f2cbe3SMel Gorman 24921e8f889bSDavid Gibson page_cache_get(old_page); 2493b76c8cfbSLarry Woodman 2494b76c8cfbSLarry Woodman /* Drop page_table_lock as buddy allocator may be called */ 2495b76c8cfbSLarry Woodman spin_unlock(&mm->page_table_lock); 249604f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 24971e8f889bSDavid Gibson 24982fc39cecSAdam Litke if (IS_ERR(new_page)) { 24991e8f889bSDavid Gibson page_cache_release(old_page); 250004f2cbe3SMel Gorman 250104f2cbe3SMel Gorman /* 250204f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 250304f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 250404f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 250504f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 250604f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 250704f2cbe3SMel Gorman */ 250804f2cbe3SMel Gorman if (outside_reserve) { 250904f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 251004f2cbe3SMel Gorman if (unmap_ref_private(mm, vma, old_page, address)) { 251104f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 2512b76c8cfbSLarry Woodman spin_lock(&mm->page_table_lock); 2513a734bcc8SHillf Danton ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2514a734bcc8SHillf Danton if (likely(pte_same(huge_ptep_get(ptep), pte))) 251504f2cbe3SMel Gorman goto retry_avoidcopy; 2516a734bcc8SHillf Danton /* 2517a734bcc8SHillf Danton * race occurs while re-acquiring page_table_lock, and 2518a734bcc8SHillf Danton * our job is done. 2519a734bcc8SHillf Danton */ 2520a734bcc8SHillf Danton return 0; 252104f2cbe3SMel Gorman } 252204f2cbe3SMel Gorman WARN_ON_ONCE(1); 252304f2cbe3SMel Gorman } 252404f2cbe3SMel Gorman 2525b76c8cfbSLarry Woodman /* Caller expects lock to be held */ 2526b76c8cfbSLarry Woodman spin_lock(&mm->page_table_lock); 25272fc39cecSAdam Litke return -PTR_ERR(new_page); 25281e8f889bSDavid Gibson } 25291e8f889bSDavid Gibson 25300fe6e20bSNaoya Horiguchi /* 25310fe6e20bSNaoya Horiguchi * When the original hugepage is shared one, it does not have 25320fe6e20bSNaoya Horiguchi * anon_vma prepared. 25330fe6e20bSNaoya Horiguchi */ 253444e2aa93SDean Nelson if (unlikely(anon_vma_prepare(vma))) { 2535ea4039a3SHillf Danton page_cache_release(new_page); 2536ea4039a3SHillf Danton page_cache_release(old_page); 253744e2aa93SDean Nelson /* Caller expects lock to be held */ 253844e2aa93SDean Nelson spin_lock(&mm->page_table_lock); 25390fe6e20bSNaoya Horiguchi return VM_FAULT_OOM; 254044e2aa93SDean Nelson } 25410fe6e20bSNaoya Horiguchi 254247ad8475SAndrea Arcangeli copy_user_huge_page(new_page, old_page, address, vma, 254347ad8475SAndrea Arcangeli pages_per_huge_page(h)); 25440ed361deSNick Piggin __SetPageUptodate(new_page); 25451e8f889bSDavid Gibson 2546b76c8cfbSLarry Woodman /* 2547b76c8cfbSLarry Woodman * Retake the page_table_lock to check for racing updates 2548b76c8cfbSLarry Woodman * before the page tables are altered 2549b76c8cfbSLarry Woodman */ 2550b76c8cfbSLarry Woodman spin_lock(&mm->page_table_lock); 2551a5516438SAndi Kleen ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 25527f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 25531e8f889bSDavid Gibson /* Break COW */ 25543edd4fc9SDoug Doan mmu_notifier_invalidate_range_start(mm, 25553edd4fc9SDoug Doan address & huge_page_mask(h), 25563edd4fc9SDoug Doan (address & huge_page_mask(h)) + huge_page_size(h)); 25578fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 25581e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 25591e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 25600fe6e20bSNaoya Horiguchi page_remove_rmap(old_page); 2561cd67f0d2SNaoya Horiguchi hugepage_add_new_anon_rmap(new_page, vma, address); 25621e8f889bSDavid Gibson /* Make the old page be freed below */ 25631e8f889bSDavid Gibson new_page = old_page; 25643edd4fc9SDoug Doan mmu_notifier_invalidate_range_end(mm, 25653edd4fc9SDoug Doan address & huge_page_mask(h), 25663edd4fc9SDoug Doan (address & huge_page_mask(h)) + huge_page_size(h)); 25671e8f889bSDavid Gibson } 25681e8f889bSDavid Gibson page_cache_release(new_page); 25691e8f889bSDavid Gibson page_cache_release(old_page); 257083c54070SNick Piggin return 0; 25711e8f889bSDavid Gibson } 25721e8f889bSDavid Gibson 257304f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 2574a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h, 2575a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 257604f2cbe3SMel Gorman { 257704f2cbe3SMel Gorman struct address_space *mapping; 2578e7c4b0bfSAndy Whitcroft pgoff_t idx; 257904f2cbe3SMel Gorman 258004f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 2581a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 258204f2cbe3SMel Gorman 258304f2cbe3SMel Gorman return find_lock_page(mapping, idx); 258404f2cbe3SMel Gorman } 258504f2cbe3SMel Gorman 25863ae77f43SHugh Dickins /* 25873ae77f43SHugh Dickins * Return whether there is a pagecache page to back given address within VMA. 25883ae77f43SHugh Dickins * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 25893ae77f43SHugh Dickins */ 25903ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h, 25912a15efc9SHugh Dickins struct vm_area_struct *vma, unsigned long address) 25922a15efc9SHugh Dickins { 25932a15efc9SHugh Dickins struct address_space *mapping; 25942a15efc9SHugh Dickins pgoff_t idx; 25952a15efc9SHugh Dickins struct page *page; 25962a15efc9SHugh Dickins 25972a15efc9SHugh Dickins mapping = vma->vm_file->f_mapping; 25982a15efc9SHugh Dickins idx = vma_hugecache_offset(h, vma, address); 25992a15efc9SHugh Dickins 26002a15efc9SHugh Dickins page = find_get_page(mapping, idx); 26012a15efc9SHugh Dickins if (page) 26022a15efc9SHugh Dickins put_page(page); 26032a15efc9SHugh Dickins return page != NULL; 26042a15efc9SHugh Dickins } 26052a15efc9SHugh Dickins 2606a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 2607788c7df4SHugh Dickins unsigned long address, pte_t *ptep, unsigned int flags) 2608ac9b9c66SHugh Dickins { 2609a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2610ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 2611409eb8c2SHillf Danton int anon_rmap = 0; 2612e7c4b0bfSAndy Whitcroft pgoff_t idx; 26134c887265SAdam Litke unsigned long size; 26144c887265SAdam Litke struct page *page; 26154c887265SAdam Litke struct address_space *mapping; 26161e8f889bSDavid Gibson pte_t new_pte; 26174c887265SAdam Litke 261804f2cbe3SMel Gorman /* 261904f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 262004f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 262125985edcSLucas De Marchi * COW. Warn that such a situation has occurred as it may not be obvious 262204f2cbe3SMel Gorman */ 262304f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 262404f2cbe3SMel Gorman printk(KERN_WARNING 262504f2cbe3SMel Gorman "PID %d killed due to inadequate hugepage pool\n", 262604f2cbe3SMel Gorman current->pid); 262704f2cbe3SMel Gorman return ret; 262804f2cbe3SMel Gorman } 262904f2cbe3SMel Gorman 26304c887265SAdam Litke mapping = vma->vm_file->f_mapping; 2631a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 26324c887265SAdam Litke 26334c887265SAdam Litke /* 26344c887265SAdam Litke * Use page lock to guard against racing truncation 26354c887265SAdam Litke * before we get page_table_lock. 26364c887265SAdam Litke */ 26376bda666aSChristoph Lameter retry: 26386bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 26396bda666aSChristoph Lameter if (!page) { 2640a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 2641ebed4bfcSHugh Dickins if (idx >= size) 2642ebed4bfcSHugh Dickins goto out; 264304f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 26442fc39cecSAdam Litke if (IS_ERR(page)) { 26452fc39cecSAdam Litke ret = -PTR_ERR(page); 26466bda666aSChristoph Lameter goto out; 26476bda666aSChristoph Lameter } 264847ad8475SAndrea Arcangeli clear_huge_page(page, address, pages_per_huge_page(h)); 26490ed361deSNick Piggin __SetPageUptodate(page); 2650ac9b9c66SHugh Dickins 2651f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 26526bda666aSChristoph Lameter int err; 265345c682a6SKen Chen struct inode *inode = mapping->host; 26546bda666aSChristoph Lameter 26556bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 26566bda666aSChristoph Lameter if (err) { 26576bda666aSChristoph Lameter put_page(page); 26586bda666aSChristoph Lameter if (err == -EEXIST) 26596bda666aSChristoph Lameter goto retry; 26606bda666aSChristoph Lameter goto out; 26616bda666aSChristoph Lameter } 266245c682a6SKen Chen 266345c682a6SKen Chen spin_lock(&inode->i_lock); 2664a5516438SAndi Kleen inode->i_blocks += blocks_per_huge_page(h); 266545c682a6SKen Chen spin_unlock(&inode->i_lock); 266623be7468SMel Gorman } else { 26676bda666aSChristoph Lameter lock_page(page); 26680fe6e20bSNaoya Horiguchi if (unlikely(anon_vma_prepare(vma))) { 26690fe6e20bSNaoya Horiguchi ret = VM_FAULT_OOM; 26700fe6e20bSNaoya Horiguchi goto backout_unlocked; 267123be7468SMel Gorman } 2672409eb8c2SHillf Danton anon_rmap = 1; 26730fe6e20bSNaoya Horiguchi } 26740fe6e20bSNaoya Horiguchi } else { 267557303d80SAndy Whitcroft /* 2676998b4382SNaoya Horiguchi * If memory error occurs between mmap() and fault, some process 2677998b4382SNaoya Horiguchi * don't have hwpoisoned swap entry for errored virtual address. 2678998b4382SNaoya Horiguchi * So we need to block hugepage fault by PG_hwpoison bit check. 2679fd6a03edSNaoya Horiguchi */ 2680fd6a03edSNaoya Horiguchi if (unlikely(PageHWPoison(page))) { 2681aa50d3a7SAndi Kleen ret = VM_FAULT_HWPOISON | 2682aa50d3a7SAndi Kleen VM_FAULT_SET_HINDEX(h - hstates); 2683fd6a03edSNaoya Horiguchi goto backout_unlocked; 26846bda666aSChristoph Lameter } 2685998b4382SNaoya Horiguchi } 26861e8f889bSDavid Gibson 268757303d80SAndy Whitcroft /* 268857303d80SAndy Whitcroft * If we are going to COW a private mapping later, we examine the 268957303d80SAndy Whitcroft * pending reservations for this page now. This will ensure that 269057303d80SAndy Whitcroft * any allocations necessary to record that reservation occur outside 269157303d80SAndy Whitcroft * the spinlock. 269257303d80SAndy Whitcroft */ 2693788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) 26942b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 26952b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 26962b26736cSAndy Whitcroft goto backout_unlocked; 26972b26736cSAndy Whitcroft } 269857303d80SAndy Whitcroft 2699ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 2700a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 27014c887265SAdam Litke if (idx >= size) 27024c887265SAdam Litke goto backout; 27034c887265SAdam Litke 270483c54070SNick Piggin ret = 0; 27057f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 27064c887265SAdam Litke goto backout; 27074c887265SAdam Litke 2708409eb8c2SHillf Danton if (anon_rmap) 2709409eb8c2SHillf Danton hugepage_add_new_anon_rmap(page, vma, address); 2710409eb8c2SHillf Danton else 2711409eb8c2SHillf Danton page_dup_rmap(page); 27121e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 27131e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 27141e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 27151e8f889bSDavid Gibson 2716788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 27171e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 271804f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 27191e8f889bSDavid Gibson } 27201e8f889bSDavid Gibson 2721ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 27224c887265SAdam Litke unlock_page(page); 27234c887265SAdam Litke out: 2724ac9b9c66SHugh Dickins return ret; 27254c887265SAdam Litke 27264c887265SAdam Litke backout: 27274c887265SAdam Litke spin_unlock(&mm->page_table_lock); 27282b26736cSAndy Whitcroft backout_unlocked: 27294c887265SAdam Litke unlock_page(page); 27304c887265SAdam Litke put_page(page); 27314c887265SAdam Litke goto out; 2732ac9b9c66SHugh Dickins } 2733ac9b9c66SHugh Dickins 273486e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2735788c7df4SHugh Dickins unsigned long address, unsigned int flags) 273686e5216fSAdam Litke { 273786e5216fSAdam Litke pte_t *ptep; 273886e5216fSAdam Litke pte_t entry; 27391e8f889bSDavid Gibson int ret; 27400fe6e20bSNaoya Horiguchi struct page *page = NULL; 274157303d80SAndy Whitcroft struct page *pagecache_page = NULL; 27423935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 2743a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 274486e5216fSAdam Litke 27451e16a539SKAMEZAWA Hiroyuki address &= huge_page_mask(h); 27461e16a539SKAMEZAWA Hiroyuki 2747fd6a03edSNaoya Horiguchi ptep = huge_pte_offset(mm, address); 2748fd6a03edSNaoya Horiguchi if (ptep) { 2749fd6a03edSNaoya Horiguchi entry = huge_ptep_get(ptep); 2750290408d4SNaoya Horiguchi if (unlikely(is_hugetlb_entry_migration(entry))) { 2751290408d4SNaoya Horiguchi migration_entry_wait(mm, (pmd_t *)ptep, address); 2752290408d4SNaoya Horiguchi return 0; 2753290408d4SNaoya Horiguchi } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2754aa50d3a7SAndi Kleen return VM_FAULT_HWPOISON_LARGE | 2755aa50d3a7SAndi Kleen VM_FAULT_SET_HINDEX(h - hstates); 2756fd6a03edSNaoya Horiguchi } 2757fd6a03edSNaoya Horiguchi 2758a5516438SAndi Kleen ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 275986e5216fSAdam Litke if (!ptep) 276086e5216fSAdam Litke return VM_FAULT_OOM; 276186e5216fSAdam Litke 27623935baa9SDavid Gibson /* 27633935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 27643935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 27653935baa9SDavid Gibson * the same page in the page cache. 27663935baa9SDavid Gibson */ 27673935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 27687f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 27697f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 2770788c7df4SHugh Dickins ret = hugetlb_no_page(mm, vma, address, ptep, flags); 2771b4d1d99fSDavid Gibson goto out_mutex; 27723935baa9SDavid Gibson } 277386e5216fSAdam Litke 277483c54070SNick Piggin ret = 0; 27751e8f889bSDavid Gibson 277657303d80SAndy Whitcroft /* 277757303d80SAndy Whitcroft * If we are going to COW the mapping later, we examine the pending 277857303d80SAndy Whitcroft * reservations for this page now. This will ensure that any 277957303d80SAndy Whitcroft * allocations necessary to record that reservation occur outside the 278057303d80SAndy Whitcroft * spinlock. For private mappings, we also lookup the pagecache 278157303d80SAndy Whitcroft * page now as it is used to determine if a reservation has been 278257303d80SAndy Whitcroft * consumed. 278357303d80SAndy Whitcroft */ 2784788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) { 27852b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 27862b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 2787b4d1d99fSDavid Gibson goto out_mutex; 27882b26736cSAndy Whitcroft } 278957303d80SAndy Whitcroft 2790f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 279157303d80SAndy Whitcroft pagecache_page = hugetlbfs_pagecache_page(h, 279257303d80SAndy Whitcroft vma, address); 279357303d80SAndy Whitcroft } 279457303d80SAndy Whitcroft 279556c9cfb1SNaoya Horiguchi /* 279656c9cfb1SNaoya Horiguchi * hugetlb_cow() requires page locks of pte_page(entry) and 279756c9cfb1SNaoya Horiguchi * pagecache_page, so here we need take the former one 279856c9cfb1SNaoya Horiguchi * when page != pagecache_page or !pagecache_page. 279956c9cfb1SNaoya Horiguchi * Note that locking order is always pagecache_page -> page, 280056c9cfb1SNaoya Horiguchi * so no worry about deadlock. 280156c9cfb1SNaoya Horiguchi */ 28020fe6e20bSNaoya Horiguchi page = pte_page(entry); 280366aebce7SChris Metcalf get_page(page); 280456c9cfb1SNaoya Horiguchi if (page != pagecache_page) 28050fe6e20bSNaoya Horiguchi lock_page(page); 28060fe6e20bSNaoya Horiguchi 28071e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 28081e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 2809b4d1d99fSDavid Gibson if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 2810b4d1d99fSDavid Gibson goto out_page_table_lock; 2811b4d1d99fSDavid Gibson 2812b4d1d99fSDavid Gibson 2813788c7df4SHugh Dickins if (flags & FAULT_FLAG_WRITE) { 2814b4d1d99fSDavid Gibson if (!pte_write(entry)) { 281557303d80SAndy Whitcroft ret = hugetlb_cow(mm, vma, address, ptep, entry, 281657303d80SAndy Whitcroft pagecache_page); 2817b4d1d99fSDavid Gibson goto out_page_table_lock; 2818b4d1d99fSDavid Gibson } 2819b4d1d99fSDavid Gibson entry = pte_mkdirty(entry); 2820b4d1d99fSDavid Gibson } 2821b4d1d99fSDavid Gibson entry = pte_mkyoung(entry); 2822788c7df4SHugh Dickins if (huge_ptep_set_access_flags(vma, address, ptep, entry, 2823788c7df4SHugh Dickins flags & FAULT_FLAG_WRITE)) 28244b3073e1SRussell King update_mmu_cache(vma, address, ptep); 2825b4d1d99fSDavid Gibson 2826b4d1d99fSDavid Gibson out_page_table_lock: 28271e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 282857303d80SAndy Whitcroft 282957303d80SAndy Whitcroft if (pagecache_page) { 283057303d80SAndy Whitcroft unlock_page(pagecache_page); 283157303d80SAndy Whitcroft put_page(pagecache_page); 283257303d80SAndy Whitcroft } 28331f64d69cSDean Nelson if (page != pagecache_page) 283456c9cfb1SNaoya Horiguchi unlock_page(page); 283566aebce7SChris Metcalf put_page(page); 283657303d80SAndy Whitcroft 2837b4d1d99fSDavid Gibson out_mutex: 28383935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 28391e8f889bSDavid Gibson 28401e8f889bSDavid Gibson return ret; 284186e5216fSAdam Litke } 284286e5216fSAdam Litke 2843ceb86879SAndi Kleen /* Can be overriden by architectures */ 2844ceb86879SAndi Kleen __attribute__((weak)) struct page * 2845ceb86879SAndi Kleen follow_huge_pud(struct mm_struct *mm, unsigned long address, 2846ceb86879SAndi Kleen pud_t *pud, int write) 2847ceb86879SAndi Kleen { 2848ceb86879SAndi Kleen BUG(); 2849ceb86879SAndi Kleen return NULL; 2850ceb86879SAndi Kleen } 2851ceb86879SAndi Kleen 285263551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 285363551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 28545b23dbe8SAdam Litke unsigned long *position, int *length, int i, 28552a15efc9SHugh Dickins unsigned int flags) 285663551ae0SDavid Gibson { 2857d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 2858d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 285963551ae0SDavid Gibson int remainder = *length; 2860a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 286163551ae0SDavid Gibson 28621c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 286363551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 286463551ae0SDavid Gibson pte_t *pte; 28652a15efc9SHugh Dickins int absent; 286663551ae0SDavid Gibson struct page *page; 286763551ae0SDavid Gibson 28684c887265SAdam Litke /* 28694c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 28702a15efc9SHugh Dickins * each hugepage. We have to make sure we get the 28714c887265SAdam Litke * first, for the page indexing below to work. 28724c887265SAdam Litke */ 2873a5516438SAndi Kleen pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 28742a15efc9SHugh Dickins absent = !pte || huge_pte_none(huge_ptep_get(pte)); 287563551ae0SDavid Gibson 28762a15efc9SHugh Dickins /* 28772a15efc9SHugh Dickins * When coredumping, it suits get_dump_page if we just return 28783ae77f43SHugh Dickins * an error where there's an empty slot with no huge pagecache 28793ae77f43SHugh Dickins * to back it. This way, we avoid allocating a hugepage, and 28803ae77f43SHugh Dickins * the sparse dumpfile avoids allocating disk blocks, but its 28813ae77f43SHugh Dickins * huge holes still show up with zeroes where they need to be. 28822a15efc9SHugh Dickins */ 28833ae77f43SHugh Dickins if (absent && (flags & FOLL_DUMP) && 28843ae77f43SHugh Dickins !hugetlbfs_pagecache_present(h, vma, vaddr)) { 28852a15efc9SHugh Dickins remainder = 0; 28862a15efc9SHugh Dickins break; 28872a15efc9SHugh Dickins } 28882a15efc9SHugh Dickins 28892a15efc9SHugh Dickins if (absent || 28902a15efc9SHugh Dickins ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { 28914c887265SAdam Litke int ret; 28924c887265SAdam Litke 28934c887265SAdam Litke spin_unlock(&mm->page_table_lock); 28942a15efc9SHugh Dickins ret = hugetlb_fault(mm, vma, vaddr, 28952a15efc9SHugh Dickins (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); 28964c887265SAdam Litke spin_lock(&mm->page_table_lock); 2897a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 28984c887265SAdam Litke continue; 28994c887265SAdam Litke 29001c59827dSHugh Dickins remainder = 0; 29011c59827dSHugh Dickins break; 29021c59827dSHugh Dickins } 290363551ae0SDavid Gibson 2904a5516438SAndi Kleen pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 29057f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 2906d5d4b0aaSChen, Kenneth W same_page: 2907d6692183SChen, Kenneth W if (pages) { 290869d177c2SAndy Whitcroft pages[i] = mem_map_offset(page, pfn_offset); 29094b2e38adSKOSAKI Motohiro get_page(pages[i]); 2910d6692183SChen, Kenneth W } 291163551ae0SDavid Gibson 291263551ae0SDavid Gibson if (vmas) 291363551ae0SDavid Gibson vmas[i] = vma; 291463551ae0SDavid Gibson 291563551ae0SDavid Gibson vaddr += PAGE_SIZE; 2916d5d4b0aaSChen, Kenneth W ++pfn_offset; 291763551ae0SDavid Gibson --remainder; 291863551ae0SDavid Gibson ++i; 2919d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 2920a5516438SAndi Kleen pfn_offset < pages_per_huge_page(h)) { 2921d5d4b0aaSChen, Kenneth W /* 2922d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 2923d5d4b0aaSChen, Kenneth W * of this compound page. 2924d5d4b0aaSChen, Kenneth W */ 2925d5d4b0aaSChen, Kenneth W goto same_page; 2926d5d4b0aaSChen, Kenneth W } 292763551ae0SDavid Gibson } 29281c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 292963551ae0SDavid Gibson *length = remainder; 293063551ae0SDavid Gibson *position = vaddr; 293163551ae0SDavid Gibson 29322a15efc9SHugh Dickins return i ? i : -EFAULT; 293363551ae0SDavid Gibson } 29348f860591SZhang, Yanmin 29358f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 29368f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 29378f860591SZhang, Yanmin { 29388f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 29398f860591SZhang, Yanmin unsigned long start = address; 29408f860591SZhang, Yanmin pte_t *ptep; 29418f860591SZhang, Yanmin pte_t pte; 2942a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 29438f860591SZhang, Yanmin 29448f860591SZhang, Yanmin BUG_ON(address >= end); 29458f860591SZhang, Yanmin flush_cache_range(vma, address, end); 29468f860591SZhang, Yanmin 29473d48ae45SPeter Zijlstra mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 29488f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 2949a5516438SAndi Kleen for (; address < end; address += huge_page_size(h)) { 29508f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 29518f860591SZhang, Yanmin if (!ptep) 29528f860591SZhang, Yanmin continue; 295339dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 295439dde65cSChen, Kenneth W continue; 29557f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 29568f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 29578f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 29588f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 29598f860591SZhang, Yanmin } 29608f860591SZhang, Yanmin } 29618f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 29623d48ae45SPeter Zijlstra mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 29638f860591SZhang, Yanmin 29648f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 29658f860591SZhang, Yanmin } 29668f860591SZhang, Yanmin 2967a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 2968a1e78772SMel Gorman long from, long to, 29695a6fe125SMel Gorman struct vm_area_struct *vma, 2970ca16d140SKOSAKI Motohiro vm_flags_t vm_flags) 2971e4e574b7SAdam Litke { 297217c9d12eSMel Gorman long ret, chg; 2973a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 297490481622SDavid Gibson struct hugepage_subpool *spool = subpool_inode(inode); 2975e4e574b7SAdam Litke 2976a1e78772SMel Gorman /* 297717c9d12eSMel Gorman * Only apply hugepage reservation if asked. At fault time, an 297817c9d12eSMel Gorman * attempt will be made for VM_NORESERVE to allocate a page 297990481622SDavid Gibson * without using reserves 298017c9d12eSMel Gorman */ 2981ca16d140SKOSAKI Motohiro if (vm_flags & VM_NORESERVE) 298217c9d12eSMel Gorman return 0; 298317c9d12eSMel Gorman 298417c9d12eSMel Gorman /* 2985a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 2986a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 2987a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 2988a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 2989a1e78772SMel Gorman */ 2990f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 2991e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 29925a6fe125SMel Gorman else { 29935a6fe125SMel Gorman struct resv_map *resv_map = resv_map_alloc(); 29945a6fe125SMel Gorman if (!resv_map) 29955a6fe125SMel Gorman return -ENOMEM; 29965a6fe125SMel Gorman 299717c9d12eSMel Gorman chg = to - from; 299817c9d12eSMel Gorman 29995a6fe125SMel Gorman set_vma_resv_map(vma, resv_map); 30005a6fe125SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 30015a6fe125SMel Gorman } 30025a6fe125SMel Gorman 3003c50ac050SDave Hansen if (chg < 0) { 3004c50ac050SDave Hansen ret = chg; 3005c50ac050SDave Hansen goto out_err; 3006c50ac050SDave Hansen } 300717c9d12eSMel Gorman 300890481622SDavid Gibson /* There must be enough pages in the subpool for the mapping */ 3009c50ac050SDave Hansen if (hugepage_subpool_get_pages(spool, chg)) { 3010c50ac050SDave Hansen ret = -ENOSPC; 3011c50ac050SDave Hansen goto out_err; 3012c50ac050SDave Hansen } 301317c9d12eSMel Gorman 301417c9d12eSMel Gorman /* 301517c9d12eSMel Gorman * Check enough hugepages are available for the reservation. 301690481622SDavid Gibson * Hand the pages back to the subpool if there are not 301717c9d12eSMel Gorman */ 301817c9d12eSMel Gorman ret = hugetlb_acct_memory(h, chg); 301917c9d12eSMel Gorman if (ret < 0) { 302090481622SDavid Gibson hugepage_subpool_put_pages(spool, chg); 3021c50ac050SDave Hansen goto out_err; 302217c9d12eSMel Gorman } 302317c9d12eSMel Gorman 302417c9d12eSMel Gorman /* 302517c9d12eSMel Gorman * Account for the reservations made. Shared mappings record regions 302617c9d12eSMel Gorman * that have reservations as they are shared by multiple VMAs. 302717c9d12eSMel Gorman * When the last VMA disappears, the region map says how much 302817c9d12eSMel Gorman * the reservation was and the page cache tells how much of 302917c9d12eSMel Gorman * the reservation was consumed. Private mappings are per-VMA and 303017c9d12eSMel Gorman * only the consumed reservations are tracked. When the VMA 303117c9d12eSMel Gorman * disappears, the original reservation is the VMA size and the 303217c9d12eSMel Gorman * consumed reservations are stored in the map. Hence, nothing 303317c9d12eSMel Gorman * else has to be done for private mappings here 303417c9d12eSMel Gorman */ 3035f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 303617c9d12eSMel Gorman region_add(&inode->i_mapping->private_list, from, to); 3037a43a8c39SChen, Kenneth W return 0; 3038c50ac050SDave Hansen out_err: 3039c50ac050SDave Hansen resv_map_put(vma); 3040c50ac050SDave Hansen return ret; 3041a43a8c39SChen, Kenneth W } 3042a43a8c39SChen, Kenneth W 3043a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 3044a43a8c39SChen, Kenneth W { 3045a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 3046a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 304790481622SDavid Gibson struct hugepage_subpool *spool = subpool_inode(inode); 304845c682a6SKen Chen 304945c682a6SKen Chen spin_lock(&inode->i_lock); 3050e4c6f8beSEric Sandeen inode->i_blocks -= (blocks_per_huge_page(h) * freed); 305145c682a6SKen Chen spin_unlock(&inode->i_lock); 305245c682a6SKen Chen 305390481622SDavid Gibson hugepage_subpool_put_pages(spool, (chg - freed)); 3054a5516438SAndi Kleen hugetlb_acct_memory(h, -(chg - freed)); 3055a43a8c39SChen, Kenneth W } 305693f70f90SNaoya Horiguchi 3057d5bd9106SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 3058d5bd9106SAndi Kleen 30596de2b1aaSNaoya Horiguchi /* Should be called in hugetlb_lock */ 30606de2b1aaSNaoya Horiguchi static int is_hugepage_on_freelist(struct page *hpage) 30616de2b1aaSNaoya Horiguchi { 30626de2b1aaSNaoya Horiguchi struct page *page; 30636de2b1aaSNaoya Horiguchi struct page *tmp; 30646de2b1aaSNaoya Horiguchi struct hstate *h = page_hstate(hpage); 30656de2b1aaSNaoya Horiguchi int nid = page_to_nid(hpage); 30666de2b1aaSNaoya Horiguchi 30676de2b1aaSNaoya Horiguchi list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru) 30686de2b1aaSNaoya Horiguchi if (page == hpage) 30696de2b1aaSNaoya Horiguchi return 1; 30706de2b1aaSNaoya Horiguchi return 0; 30716de2b1aaSNaoya Horiguchi } 30726de2b1aaSNaoya Horiguchi 307393f70f90SNaoya Horiguchi /* 307493f70f90SNaoya Horiguchi * This function is called from memory failure code. 307593f70f90SNaoya Horiguchi * Assume the caller holds page lock of the head page. 307693f70f90SNaoya Horiguchi */ 30776de2b1aaSNaoya Horiguchi int dequeue_hwpoisoned_huge_page(struct page *hpage) 307893f70f90SNaoya Horiguchi { 307993f70f90SNaoya Horiguchi struct hstate *h = page_hstate(hpage); 308093f70f90SNaoya Horiguchi int nid = page_to_nid(hpage); 30816de2b1aaSNaoya Horiguchi int ret = -EBUSY; 308293f70f90SNaoya Horiguchi 308393f70f90SNaoya Horiguchi spin_lock(&hugetlb_lock); 30846de2b1aaSNaoya Horiguchi if (is_hugepage_on_freelist(hpage)) { 308593f70f90SNaoya Horiguchi list_del(&hpage->lru); 30868c6c2ecbSNaoya Horiguchi set_page_refcounted(hpage); 308793f70f90SNaoya Horiguchi h->free_huge_pages--; 308893f70f90SNaoya Horiguchi h->free_huge_pages_node[nid]--; 30896de2b1aaSNaoya Horiguchi ret = 0; 309093f70f90SNaoya Horiguchi } 30916de2b1aaSNaoya Horiguchi spin_unlock(&hugetlb_lock); 30926de2b1aaSNaoya Horiguchi return ret; 30936de2b1aaSNaoya Horiguchi } 30946de2b1aaSNaoya Horiguchi #endif 3095