11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 36d49e352SNadia Yvette Chambers * (C) Nadia Yvette Chambers, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/list.h> 61da177e4SLinus Torvalds #include <linux/init.h> 71da177e4SLinus Torvalds #include <linux/module.h> 81da177e4SLinus Torvalds #include <linux/mm.h> 9e1759c21SAlexey Dobriyan #include <linux/seq_file.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 12cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 131da177e4SLinus Torvalds #include <linux/nodemask.h> 1463551ae0SDavid Gibson #include <linux/pagemap.h> 155da7ca86SChristoph Lameter #include <linux/mempolicy.h> 163b32123dSGideon Israel Dsouza #include <linux/compiler.h> 17aea47ff3SChristoph Lameter #include <linux/cpuset.h> 183935baa9SDavid Gibson #include <linux/mutex.h> 19aa888a74SAndi Kleen #include <linux/bootmem.h> 20a3437870SNishanth Aravamudan #include <linux/sysfs.h> 215a0e3ad6STejun Heo #include <linux/slab.h> 220fe6e20bSNaoya Horiguchi #include <linux/rmap.h> 23fd6a03edSNaoya Horiguchi #include <linux/swap.h> 24fd6a03edSNaoya Horiguchi #include <linux/swapops.h> 25c8721bbbSNaoya Horiguchi #include <linux/page-isolation.h> 268382d914SDavidlohr Bueso #include <linux/jhash.h> 27d6606683SLinus Torvalds 2863551ae0SDavid Gibson #include <asm/page.h> 2963551ae0SDavid Gibson #include <asm/pgtable.h> 3024669e58SAneesh Kumar K.V #include <asm/tlb.h> 3163551ae0SDavid Gibson 3224669e58SAneesh Kumar K.V #include <linux/io.h> 3363551ae0SDavid Gibson #include <linux/hugetlb.h> 349dd540e2SAneesh Kumar K.V #include <linux/hugetlb_cgroup.h> 359a305230SLee Schermerhorn #include <linux/node.h> 367835e98bSNick Piggin #include "internal.h" 371da177e4SLinus Torvalds 38753162cdSAndrey Ryabinin int hugepages_treat_as_movable; 39a5516438SAndi Kleen 40c3f38a38SAneesh Kumar K.V int hugetlb_max_hstate __read_mostly; 41e5ff2159SAndi Kleen unsigned int default_hstate_idx; 42e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE]; 43e5ff2159SAndi Kleen 4453ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages); 4553ba51d2SJon Tollefson 46e5ff2159SAndi Kleen /* for command line parsing */ 47e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate; 48e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages; 49e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size; 50e5ff2159SAndi Kleen 513935baa9SDavid Gibson /* 5231caf665SNaoya Horiguchi * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 5331caf665SNaoya Horiguchi * free_huge_pages, and surplus_huge_pages. 543935baa9SDavid Gibson */ 55c3f38a38SAneesh Kumar K.V DEFINE_SPINLOCK(hugetlb_lock); 560bd0f9fbSEric Paris 578382d914SDavidlohr Bueso /* 588382d914SDavidlohr Bueso * Serializes faults on the same logical page. This is used to 598382d914SDavidlohr Bueso * prevent spurious OOMs when the hugepage pool is fully utilized. 608382d914SDavidlohr Bueso */ 618382d914SDavidlohr Bueso static int num_fault_mutexes; 628382d914SDavidlohr Bueso static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp; 638382d914SDavidlohr Bueso 6490481622SDavid Gibson static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) 6590481622SDavid Gibson { 6690481622SDavid Gibson bool free = (spool->count == 0) && (spool->used_hpages == 0); 6790481622SDavid Gibson 6890481622SDavid Gibson spin_unlock(&spool->lock); 6990481622SDavid Gibson 7090481622SDavid Gibson /* If no pages are used, and no other handles to the subpool 7190481622SDavid Gibson * remain, free the subpool the subpool remain */ 7290481622SDavid Gibson if (free) 7390481622SDavid Gibson kfree(spool); 7490481622SDavid Gibson } 7590481622SDavid Gibson 7690481622SDavid Gibson struct hugepage_subpool *hugepage_new_subpool(long nr_blocks) 7790481622SDavid Gibson { 7890481622SDavid Gibson struct hugepage_subpool *spool; 7990481622SDavid Gibson 8090481622SDavid Gibson spool = kmalloc(sizeof(*spool), GFP_KERNEL); 8190481622SDavid Gibson if (!spool) 8290481622SDavid Gibson return NULL; 8390481622SDavid Gibson 8490481622SDavid Gibson spin_lock_init(&spool->lock); 8590481622SDavid Gibson spool->count = 1; 8690481622SDavid Gibson spool->max_hpages = nr_blocks; 8790481622SDavid Gibson spool->used_hpages = 0; 8890481622SDavid Gibson 8990481622SDavid Gibson return spool; 9090481622SDavid Gibson } 9190481622SDavid Gibson 9290481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool) 9390481622SDavid Gibson { 9490481622SDavid Gibson spin_lock(&spool->lock); 9590481622SDavid Gibson BUG_ON(!spool->count); 9690481622SDavid Gibson spool->count--; 9790481622SDavid Gibson unlock_or_release_subpool(spool); 9890481622SDavid Gibson } 9990481622SDavid Gibson 10090481622SDavid Gibson static int hugepage_subpool_get_pages(struct hugepage_subpool *spool, 10190481622SDavid Gibson long delta) 10290481622SDavid Gibson { 10390481622SDavid Gibson int ret = 0; 10490481622SDavid Gibson 10590481622SDavid Gibson if (!spool) 10690481622SDavid Gibson return 0; 10790481622SDavid Gibson 10890481622SDavid Gibson spin_lock(&spool->lock); 10990481622SDavid Gibson if ((spool->used_hpages + delta) <= spool->max_hpages) { 11090481622SDavid Gibson spool->used_hpages += delta; 11190481622SDavid Gibson } else { 11290481622SDavid Gibson ret = -ENOMEM; 11390481622SDavid Gibson } 11490481622SDavid Gibson spin_unlock(&spool->lock); 11590481622SDavid Gibson 11690481622SDavid Gibson return ret; 11790481622SDavid Gibson } 11890481622SDavid Gibson 11990481622SDavid Gibson static void hugepage_subpool_put_pages(struct hugepage_subpool *spool, 12090481622SDavid Gibson long delta) 12190481622SDavid Gibson { 12290481622SDavid Gibson if (!spool) 12390481622SDavid Gibson return; 12490481622SDavid Gibson 12590481622SDavid Gibson spin_lock(&spool->lock); 12690481622SDavid Gibson spool->used_hpages -= delta; 12790481622SDavid Gibson /* If hugetlbfs_put_super couldn't free spool due to 12890481622SDavid Gibson * an outstanding quota reference, free it now. */ 12990481622SDavid Gibson unlock_or_release_subpool(spool); 13090481622SDavid Gibson } 13190481622SDavid Gibson 13290481622SDavid Gibson static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 13390481622SDavid Gibson { 13490481622SDavid Gibson return HUGETLBFS_SB(inode->i_sb)->spool; 13590481622SDavid Gibson } 13690481622SDavid Gibson 13790481622SDavid Gibson static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 13890481622SDavid Gibson { 139496ad9aaSAl Viro return subpool_inode(file_inode(vma->vm_file)); 14090481622SDavid Gibson } 14190481622SDavid Gibson 142e7c4b0bfSAndy Whitcroft /* 14396822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 14496822904SAndy Whitcroft * across the pages in a mapping. 14584afd99bSAndy Whitcroft * 1467b24d861SDavidlohr Bueso * The region data structures are embedded into a resv_map and 1477b24d861SDavidlohr Bueso * protected by a resv_map's lock 14896822904SAndy Whitcroft */ 14996822904SAndy Whitcroft struct file_region { 15096822904SAndy Whitcroft struct list_head link; 15196822904SAndy Whitcroft long from; 15296822904SAndy Whitcroft long to; 15396822904SAndy Whitcroft }; 15496822904SAndy Whitcroft 1551406ec9bSJoonsoo Kim static long region_add(struct resv_map *resv, long f, long t) 15696822904SAndy Whitcroft { 1571406ec9bSJoonsoo Kim struct list_head *head = &resv->regions; 15896822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 15996822904SAndy Whitcroft 1607b24d861SDavidlohr Bueso spin_lock(&resv->lock); 16196822904SAndy Whitcroft /* Locate the region we are either in or before. */ 16296822904SAndy Whitcroft list_for_each_entry(rg, head, link) 16396822904SAndy Whitcroft if (f <= rg->to) 16496822904SAndy Whitcroft break; 16596822904SAndy Whitcroft 16696822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 16796822904SAndy Whitcroft if (f > rg->from) 16896822904SAndy Whitcroft f = rg->from; 16996822904SAndy Whitcroft 17096822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 17196822904SAndy Whitcroft nrg = rg; 17296822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 17396822904SAndy Whitcroft if (&rg->link == head) 17496822904SAndy Whitcroft break; 17596822904SAndy Whitcroft if (rg->from > t) 17696822904SAndy Whitcroft break; 17796822904SAndy Whitcroft 17896822904SAndy Whitcroft /* If this area reaches higher then extend our area to 17996822904SAndy Whitcroft * include it completely. If this is not the first area 18096822904SAndy Whitcroft * which we intend to reuse, free it. */ 18196822904SAndy Whitcroft if (rg->to > t) 18296822904SAndy Whitcroft t = rg->to; 18396822904SAndy Whitcroft if (rg != nrg) { 18496822904SAndy Whitcroft list_del(&rg->link); 18596822904SAndy Whitcroft kfree(rg); 18696822904SAndy Whitcroft } 18796822904SAndy Whitcroft } 18896822904SAndy Whitcroft nrg->from = f; 18996822904SAndy Whitcroft nrg->to = t; 1907b24d861SDavidlohr Bueso spin_unlock(&resv->lock); 19196822904SAndy Whitcroft return 0; 19296822904SAndy Whitcroft } 19396822904SAndy Whitcroft 1941406ec9bSJoonsoo Kim static long region_chg(struct resv_map *resv, long f, long t) 19596822904SAndy Whitcroft { 1961406ec9bSJoonsoo Kim struct list_head *head = &resv->regions; 1977b24d861SDavidlohr Bueso struct file_region *rg, *nrg = NULL; 19896822904SAndy Whitcroft long chg = 0; 19996822904SAndy Whitcroft 2007b24d861SDavidlohr Bueso retry: 2017b24d861SDavidlohr Bueso spin_lock(&resv->lock); 20296822904SAndy Whitcroft /* Locate the region we are before or in. */ 20396822904SAndy Whitcroft list_for_each_entry(rg, head, link) 20496822904SAndy Whitcroft if (f <= rg->to) 20596822904SAndy Whitcroft break; 20696822904SAndy Whitcroft 20796822904SAndy Whitcroft /* If we are below the current region then a new region is required. 20896822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 20996822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 21096822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 2117b24d861SDavidlohr Bueso if (!nrg) { 2127b24d861SDavidlohr Bueso spin_unlock(&resv->lock); 21396822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 21496822904SAndy Whitcroft if (!nrg) 21596822904SAndy Whitcroft return -ENOMEM; 2167b24d861SDavidlohr Bueso 21796822904SAndy Whitcroft nrg->from = f; 21896822904SAndy Whitcroft nrg->to = f; 21996822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 2207b24d861SDavidlohr Bueso goto retry; 2217b24d861SDavidlohr Bueso } 22296822904SAndy Whitcroft 2237b24d861SDavidlohr Bueso list_add(&nrg->link, rg->link.prev); 2247b24d861SDavidlohr Bueso chg = t - f; 2257b24d861SDavidlohr Bueso goto out_nrg; 22696822904SAndy Whitcroft } 22796822904SAndy Whitcroft 22896822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 22996822904SAndy Whitcroft if (f > rg->from) 23096822904SAndy Whitcroft f = rg->from; 23196822904SAndy Whitcroft chg = t - f; 23296822904SAndy Whitcroft 23396822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 23496822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 23596822904SAndy Whitcroft if (&rg->link == head) 23696822904SAndy Whitcroft break; 23796822904SAndy Whitcroft if (rg->from > t) 2387b24d861SDavidlohr Bueso goto out; 23996822904SAndy Whitcroft 24025985edcSLucas De Marchi /* We overlap with this area, if it extends further than 24196822904SAndy Whitcroft * us then we must extend ourselves. Account for its 24296822904SAndy Whitcroft * existing reservation. */ 24396822904SAndy Whitcroft if (rg->to > t) { 24496822904SAndy Whitcroft chg += rg->to - t; 24596822904SAndy Whitcroft t = rg->to; 24696822904SAndy Whitcroft } 24796822904SAndy Whitcroft chg -= rg->to - rg->from; 24896822904SAndy Whitcroft } 2497b24d861SDavidlohr Bueso 2507b24d861SDavidlohr Bueso out: 2517b24d861SDavidlohr Bueso spin_unlock(&resv->lock); 2527b24d861SDavidlohr Bueso /* We already know we raced and no longer need the new region */ 2537b24d861SDavidlohr Bueso kfree(nrg); 2547b24d861SDavidlohr Bueso return chg; 2557b24d861SDavidlohr Bueso out_nrg: 2567b24d861SDavidlohr Bueso spin_unlock(&resv->lock); 25796822904SAndy Whitcroft return chg; 25896822904SAndy Whitcroft } 25996822904SAndy Whitcroft 2601406ec9bSJoonsoo Kim static long region_truncate(struct resv_map *resv, long end) 26196822904SAndy Whitcroft { 2621406ec9bSJoonsoo Kim struct list_head *head = &resv->regions; 26396822904SAndy Whitcroft struct file_region *rg, *trg; 26496822904SAndy Whitcroft long chg = 0; 26596822904SAndy Whitcroft 2667b24d861SDavidlohr Bueso spin_lock(&resv->lock); 26796822904SAndy Whitcroft /* Locate the region we are either in or before. */ 26896822904SAndy Whitcroft list_for_each_entry(rg, head, link) 26996822904SAndy Whitcroft if (end <= rg->to) 27096822904SAndy Whitcroft break; 27196822904SAndy Whitcroft if (&rg->link == head) 2727b24d861SDavidlohr Bueso goto out; 27396822904SAndy Whitcroft 27496822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 27596822904SAndy Whitcroft if (end > rg->from) { 27696822904SAndy Whitcroft chg = rg->to - end; 27796822904SAndy Whitcroft rg->to = end; 27896822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 27996822904SAndy Whitcroft } 28096822904SAndy Whitcroft 28196822904SAndy Whitcroft /* Drop any remaining regions. */ 28296822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 28396822904SAndy Whitcroft if (&rg->link == head) 28496822904SAndy Whitcroft break; 28596822904SAndy Whitcroft chg += rg->to - rg->from; 28696822904SAndy Whitcroft list_del(&rg->link); 28796822904SAndy Whitcroft kfree(rg); 28896822904SAndy Whitcroft } 2897b24d861SDavidlohr Bueso 2907b24d861SDavidlohr Bueso out: 2917b24d861SDavidlohr Bueso spin_unlock(&resv->lock); 29296822904SAndy Whitcroft return chg; 29396822904SAndy Whitcroft } 29496822904SAndy Whitcroft 2951406ec9bSJoonsoo Kim static long region_count(struct resv_map *resv, long f, long t) 29684afd99bSAndy Whitcroft { 2971406ec9bSJoonsoo Kim struct list_head *head = &resv->regions; 29884afd99bSAndy Whitcroft struct file_region *rg; 29984afd99bSAndy Whitcroft long chg = 0; 30084afd99bSAndy Whitcroft 3017b24d861SDavidlohr Bueso spin_lock(&resv->lock); 30284afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */ 30384afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) { 304f2135a4aSWang Sheng-Hui long seg_from; 305f2135a4aSWang Sheng-Hui long seg_to; 30684afd99bSAndy Whitcroft 30784afd99bSAndy Whitcroft if (rg->to <= f) 30884afd99bSAndy Whitcroft continue; 30984afd99bSAndy Whitcroft if (rg->from >= t) 31084afd99bSAndy Whitcroft break; 31184afd99bSAndy Whitcroft 31284afd99bSAndy Whitcroft seg_from = max(rg->from, f); 31384afd99bSAndy Whitcroft seg_to = min(rg->to, t); 31484afd99bSAndy Whitcroft 31584afd99bSAndy Whitcroft chg += seg_to - seg_from; 31684afd99bSAndy Whitcroft } 3177b24d861SDavidlohr Bueso spin_unlock(&resv->lock); 31884afd99bSAndy Whitcroft 31984afd99bSAndy Whitcroft return chg; 32084afd99bSAndy Whitcroft } 32184afd99bSAndy Whitcroft 32296822904SAndy Whitcroft /* 323e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 324e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 325e7c4b0bfSAndy Whitcroft */ 326a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h, 327a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 328e7c4b0bfSAndy Whitcroft { 329a5516438SAndi Kleen return ((address - vma->vm_start) >> huge_page_shift(h)) + 330a5516438SAndi Kleen (vma->vm_pgoff >> huge_page_order(h)); 331e7c4b0bfSAndy Whitcroft } 332e7c4b0bfSAndy Whitcroft 3330fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 3340fe6e20bSNaoya Horiguchi unsigned long address) 3350fe6e20bSNaoya Horiguchi { 3360fe6e20bSNaoya Horiguchi return vma_hugecache_offset(hstate_vma(vma), vma, address); 3370fe6e20bSNaoya Horiguchi } 3380fe6e20bSNaoya Horiguchi 33984afd99bSAndy Whitcroft /* 34008fba699SMel Gorman * Return the size of the pages allocated when backing a VMA. In the majority 34108fba699SMel Gorman * cases this will be same size as used by the page table entries. 34208fba699SMel Gorman */ 34308fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 34408fba699SMel Gorman { 34508fba699SMel Gorman struct hstate *hstate; 34608fba699SMel Gorman 34708fba699SMel Gorman if (!is_vm_hugetlb_page(vma)) 34808fba699SMel Gorman return PAGE_SIZE; 34908fba699SMel Gorman 35008fba699SMel Gorman hstate = hstate_vma(vma); 35108fba699SMel Gorman 3522415cf12SWanpeng Li return 1UL << huge_page_shift(hstate); 35308fba699SMel Gorman } 354f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 35508fba699SMel Gorman 35608fba699SMel Gorman /* 3573340289dSMel Gorman * Return the page size being used by the MMU to back a VMA. In the majority 3583340289dSMel Gorman * of cases, the page size used by the kernel matches the MMU size. On 3593340289dSMel Gorman * architectures where it differs, an architecture-specific version of this 3603340289dSMel Gorman * function is required. 3613340289dSMel Gorman */ 3623340289dSMel Gorman #ifndef vma_mmu_pagesize 3633340289dSMel Gorman unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 3643340289dSMel Gorman { 3653340289dSMel Gorman return vma_kernel_pagesize(vma); 3663340289dSMel Gorman } 3673340289dSMel Gorman #endif 3683340289dSMel Gorman 3693340289dSMel Gorman /* 37084afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom 37184afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to 37284afd99bSAndy Whitcroft * alignment. 37384afd99bSAndy Whitcroft */ 37484afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0) 37584afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1) 37604f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 37784afd99bSAndy Whitcroft 378a1e78772SMel Gorman /* 379a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 380a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 381a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 382a1e78772SMel Gorman * 383a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 384a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 385a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 386a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 38784afd99bSAndy Whitcroft * 38884afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different 38984afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated 39084afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file 39184afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even 39284afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map 39384afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which 39484afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed 39584afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated. 396a1e78772SMel Gorman */ 397e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 398e7c4b0bfSAndy Whitcroft { 399e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 400e7c4b0bfSAndy Whitcroft } 401e7c4b0bfSAndy Whitcroft 402e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 403e7c4b0bfSAndy Whitcroft unsigned long value) 404e7c4b0bfSAndy Whitcroft { 405e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 406e7c4b0bfSAndy Whitcroft } 407e7c4b0bfSAndy Whitcroft 4089119a41eSJoonsoo Kim struct resv_map *resv_map_alloc(void) 40984afd99bSAndy Whitcroft { 41084afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 41184afd99bSAndy Whitcroft if (!resv_map) 41284afd99bSAndy Whitcroft return NULL; 41384afd99bSAndy Whitcroft 41484afd99bSAndy Whitcroft kref_init(&resv_map->refs); 4157b24d861SDavidlohr Bueso spin_lock_init(&resv_map->lock); 41684afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions); 41784afd99bSAndy Whitcroft 41884afd99bSAndy Whitcroft return resv_map; 41984afd99bSAndy Whitcroft } 42084afd99bSAndy Whitcroft 4219119a41eSJoonsoo Kim void resv_map_release(struct kref *ref) 42284afd99bSAndy Whitcroft { 42384afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 42484afd99bSAndy Whitcroft 42584afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */ 4261406ec9bSJoonsoo Kim region_truncate(resv_map, 0); 42784afd99bSAndy Whitcroft kfree(resv_map); 42884afd99bSAndy Whitcroft } 42984afd99bSAndy Whitcroft 4304e35f483SJoonsoo Kim static inline struct resv_map *inode_resv_map(struct inode *inode) 4314e35f483SJoonsoo Kim { 4324e35f483SJoonsoo Kim return inode->i_mapping->private_data; 4334e35f483SJoonsoo Kim } 4344e35f483SJoonsoo Kim 43584afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 436a1e78772SMel Gorman { 43781d1b09cSSasha Levin VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 4384e35f483SJoonsoo Kim if (vma->vm_flags & VM_MAYSHARE) { 4394e35f483SJoonsoo Kim struct address_space *mapping = vma->vm_file->f_mapping; 4404e35f483SJoonsoo Kim struct inode *inode = mapping->host; 4414e35f483SJoonsoo Kim 4424e35f483SJoonsoo Kim return inode_resv_map(inode); 4434e35f483SJoonsoo Kim 4444e35f483SJoonsoo Kim } else { 44584afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) & 44684afd99bSAndy Whitcroft ~HPAGE_RESV_MASK); 4474e35f483SJoonsoo Kim } 448a1e78772SMel Gorman } 449a1e78772SMel Gorman 45084afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 451a1e78772SMel Gorman { 45281d1b09cSSasha Levin VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 45381d1b09cSSasha Levin VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 454a1e78772SMel Gorman 45584afd99bSAndy Whitcroft set_vma_private_data(vma, (get_vma_private_data(vma) & 45684afd99bSAndy Whitcroft HPAGE_RESV_MASK) | (unsigned long)map); 45704f2cbe3SMel Gorman } 45804f2cbe3SMel Gorman 45904f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 46004f2cbe3SMel Gorman { 46181d1b09cSSasha Levin VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 46281d1b09cSSasha Levin VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 463e7c4b0bfSAndy Whitcroft 464e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 46504f2cbe3SMel Gorman } 46604f2cbe3SMel Gorman 46704f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 46804f2cbe3SMel Gorman { 46981d1b09cSSasha Levin VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 470e7c4b0bfSAndy Whitcroft 471e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 472a1e78772SMel Gorman } 473a1e78772SMel Gorman 47404f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 475a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 476a1e78772SMel Gorman { 47781d1b09cSSasha Levin VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 478f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 479a1e78772SMel Gorman vma->vm_private_data = (void *)0; 480a1e78772SMel Gorman } 481a1e78772SMel Gorman 482a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 483af0ed73eSJoonsoo Kim static int vma_has_reserves(struct vm_area_struct *vma, long chg) 484a1e78772SMel Gorman { 485af0ed73eSJoonsoo Kim if (vma->vm_flags & VM_NORESERVE) { 486af0ed73eSJoonsoo Kim /* 487af0ed73eSJoonsoo Kim * This address is already reserved by other process(chg == 0), 488af0ed73eSJoonsoo Kim * so, we should decrement reserved count. Without decrementing, 489af0ed73eSJoonsoo Kim * reserve count remains after releasing inode, because this 490af0ed73eSJoonsoo Kim * allocated page will go into page cache and is regarded as 491af0ed73eSJoonsoo Kim * coming from reserved pool in releasing step. Currently, we 492af0ed73eSJoonsoo Kim * don't have any other solution to deal with this situation 493af0ed73eSJoonsoo Kim * properly, so add work-around here. 494af0ed73eSJoonsoo Kim */ 495af0ed73eSJoonsoo Kim if (vma->vm_flags & VM_MAYSHARE && chg == 0) 496af0ed73eSJoonsoo Kim return 1; 497af0ed73eSJoonsoo Kim else 49872231b03SJoonsoo Kim return 0; 499af0ed73eSJoonsoo Kim } 500a63884e9SJoonsoo Kim 501a63884e9SJoonsoo Kim /* Shared mappings always use reserves */ 502f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) 503a1e78772SMel Gorman return 1; 504a63884e9SJoonsoo Kim 505a63884e9SJoonsoo Kim /* 506a63884e9SJoonsoo Kim * Only the process that called mmap() has reserves for 507a63884e9SJoonsoo Kim * private mappings. 508a63884e9SJoonsoo Kim */ 5097f09ca51SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 5107f09ca51SMel Gorman return 1; 511a63884e9SJoonsoo Kim 5127f09ca51SMel Gorman return 0; 513a1e78772SMel Gorman } 514a1e78772SMel Gorman 515a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page) 5161da177e4SLinus Torvalds { 5171da177e4SLinus Torvalds int nid = page_to_nid(page); 5180edaecfaSAneesh Kumar K.V list_move(&page->lru, &h->hugepage_freelists[nid]); 519a5516438SAndi Kleen h->free_huge_pages++; 520a5516438SAndi Kleen h->free_huge_pages_node[nid]++; 5211da177e4SLinus Torvalds } 5221da177e4SLinus Torvalds 523bf50bab2SNaoya Horiguchi static struct page *dequeue_huge_page_node(struct hstate *h, int nid) 524bf50bab2SNaoya Horiguchi { 525bf50bab2SNaoya Horiguchi struct page *page; 526bf50bab2SNaoya Horiguchi 527c8721bbbSNaoya Horiguchi list_for_each_entry(page, &h->hugepage_freelists[nid], lru) 528c8721bbbSNaoya Horiguchi if (!is_migrate_isolate_page(page)) 529c8721bbbSNaoya Horiguchi break; 530c8721bbbSNaoya Horiguchi /* 531c8721bbbSNaoya Horiguchi * if 'non-isolated free hugepage' not found on the list, 532c8721bbbSNaoya Horiguchi * the allocation fails. 533c8721bbbSNaoya Horiguchi */ 534c8721bbbSNaoya Horiguchi if (&h->hugepage_freelists[nid] == &page->lru) 535bf50bab2SNaoya Horiguchi return NULL; 5360edaecfaSAneesh Kumar K.V list_move(&page->lru, &h->hugepage_activelist); 537a9869b83SNaoya Horiguchi set_page_refcounted(page); 538bf50bab2SNaoya Horiguchi h->free_huge_pages--; 539bf50bab2SNaoya Horiguchi h->free_huge_pages_node[nid]--; 540bf50bab2SNaoya Horiguchi return page; 541bf50bab2SNaoya Horiguchi } 542bf50bab2SNaoya Horiguchi 54386cdb465SNaoya Horiguchi /* Movability of hugepages depends on migration support. */ 54486cdb465SNaoya Horiguchi static inline gfp_t htlb_alloc_mask(struct hstate *h) 54586cdb465SNaoya Horiguchi { 546100873d7SNaoya Horiguchi if (hugepages_treat_as_movable || hugepage_migration_supported(h)) 54786cdb465SNaoya Horiguchi return GFP_HIGHUSER_MOVABLE; 54886cdb465SNaoya Horiguchi else 54986cdb465SNaoya Horiguchi return GFP_HIGHUSER; 55086cdb465SNaoya Horiguchi } 55186cdb465SNaoya Horiguchi 552a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h, 553a5516438SAndi Kleen struct vm_area_struct *vma, 554af0ed73eSJoonsoo Kim unsigned long address, int avoid_reserve, 555af0ed73eSJoonsoo Kim long chg) 5561da177e4SLinus Torvalds { 557b1c12cbcSKonstantin Khlebnikov struct page *page = NULL; 558480eccf9SLee Schermerhorn struct mempolicy *mpol; 55919770b32SMel Gorman nodemask_t *nodemask; 560c0ff7453SMiao Xie struct zonelist *zonelist; 561dd1a239fSMel Gorman struct zone *zone; 562dd1a239fSMel Gorman struct zoneref *z; 563cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 5641da177e4SLinus Torvalds 565a1e78772SMel Gorman /* 566a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 567a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 568a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 569a1e78772SMel Gorman */ 570af0ed73eSJoonsoo Kim if (!vma_has_reserves(vma, chg) && 571a5516438SAndi Kleen h->free_huge_pages - h->resv_huge_pages == 0) 572c0ff7453SMiao Xie goto err; 573a1e78772SMel Gorman 57404f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 575a5516438SAndi Kleen if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 5766eab04a8SJustin P. Mattock goto err; 57704f2cbe3SMel Gorman 5789966c4bbSJoonsoo Kim retry_cpuset: 579d26914d1SMel Gorman cpuset_mems_cookie = read_mems_allowed_begin(); 5809966c4bbSJoonsoo Kim zonelist = huge_zonelist(vma, address, 58186cdb465SNaoya Horiguchi htlb_alloc_mask(h), &mpol, &nodemask); 5829966c4bbSJoonsoo Kim 58319770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 58419770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 585344736f2SVladimir Davydov if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) { 586bf50bab2SNaoya Horiguchi page = dequeue_huge_page_node(h, zone_to_nid(zone)); 587bf50bab2SNaoya Horiguchi if (page) { 588af0ed73eSJoonsoo Kim if (avoid_reserve) 589af0ed73eSJoonsoo Kim break; 590af0ed73eSJoonsoo Kim if (!vma_has_reserves(vma, chg)) 591af0ed73eSJoonsoo Kim break; 592af0ed73eSJoonsoo Kim 59307443a85SJoonsoo Kim SetPagePrivate(page); 594a63884e9SJoonsoo Kim h->resv_huge_pages--; 5955ab3ee7bSKen Chen break; 5961da177e4SLinus Torvalds } 5973abf7afdSAndrew Morton } 598bf50bab2SNaoya Horiguchi } 599cc9a6c87SMel Gorman 600cc9a6c87SMel Gorman mpol_cond_put(mpol); 601d26914d1SMel Gorman if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 602cc9a6c87SMel Gorman goto retry_cpuset; 603cc9a6c87SMel Gorman return page; 604cc9a6c87SMel Gorman 605c0ff7453SMiao Xie err: 606cc9a6c87SMel Gorman return NULL; 6071da177e4SLinus Torvalds } 6081da177e4SLinus Torvalds 6091cac6f2cSLuiz Capitulino /* 6101cac6f2cSLuiz Capitulino * common helper functions for hstate_next_node_to_{alloc|free}. 6111cac6f2cSLuiz Capitulino * We may have allocated or freed a huge page based on a different 6121cac6f2cSLuiz Capitulino * nodes_allowed previously, so h->next_node_to_{alloc|free} might 6131cac6f2cSLuiz Capitulino * be outside of *nodes_allowed. Ensure that we use an allowed 6141cac6f2cSLuiz Capitulino * node for alloc or free. 6151cac6f2cSLuiz Capitulino */ 6161cac6f2cSLuiz Capitulino static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 6171cac6f2cSLuiz Capitulino { 6181cac6f2cSLuiz Capitulino nid = next_node(nid, *nodes_allowed); 6191cac6f2cSLuiz Capitulino if (nid == MAX_NUMNODES) 6201cac6f2cSLuiz Capitulino nid = first_node(*nodes_allowed); 6211cac6f2cSLuiz Capitulino VM_BUG_ON(nid >= MAX_NUMNODES); 6221cac6f2cSLuiz Capitulino 6231cac6f2cSLuiz Capitulino return nid; 6241cac6f2cSLuiz Capitulino } 6251cac6f2cSLuiz Capitulino 6261cac6f2cSLuiz Capitulino static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 6271cac6f2cSLuiz Capitulino { 6281cac6f2cSLuiz Capitulino if (!node_isset(nid, *nodes_allowed)) 6291cac6f2cSLuiz Capitulino nid = next_node_allowed(nid, nodes_allowed); 6301cac6f2cSLuiz Capitulino return nid; 6311cac6f2cSLuiz Capitulino } 6321cac6f2cSLuiz Capitulino 6331cac6f2cSLuiz Capitulino /* 6341cac6f2cSLuiz Capitulino * returns the previously saved node ["this node"] from which to 6351cac6f2cSLuiz Capitulino * allocate a persistent huge page for the pool and advance the 6361cac6f2cSLuiz Capitulino * next node from which to allocate, handling wrap at end of node 6371cac6f2cSLuiz Capitulino * mask. 6381cac6f2cSLuiz Capitulino */ 6391cac6f2cSLuiz Capitulino static int hstate_next_node_to_alloc(struct hstate *h, 6401cac6f2cSLuiz Capitulino nodemask_t *nodes_allowed) 6411cac6f2cSLuiz Capitulino { 6421cac6f2cSLuiz Capitulino int nid; 6431cac6f2cSLuiz Capitulino 6441cac6f2cSLuiz Capitulino VM_BUG_ON(!nodes_allowed); 6451cac6f2cSLuiz Capitulino 6461cac6f2cSLuiz Capitulino nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 6471cac6f2cSLuiz Capitulino h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 6481cac6f2cSLuiz Capitulino 6491cac6f2cSLuiz Capitulino return nid; 6501cac6f2cSLuiz Capitulino } 6511cac6f2cSLuiz Capitulino 6521cac6f2cSLuiz Capitulino /* 6531cac6f2cSLuiz Capitulino * helper for free_pool_huge_page() - return the previously saved 6541cac6f2cSLuiz Capitulino * node ["this node"] from which to free a huge page. Advance the 6551cac6f2cSLuiz Capitulino * next node id whether or not we find a free huge page to free so 6561cac6f2cSLuiz Capitulino * that the next attempt to free addresses the next node. 6571cac6f2cSLuiz Capitulino */ 6581cac6f2cSLuiz Capitulino static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 6591cac6f2cSLuiz Capitulino { 6601cac6f2cSLuiz Capitulino int nid; 6611cac6f2cSLuiz Capitulino 6621cac6f2cSLuiz Capitulino VM_BUG_ON(!nodes_allowed); 6631cac6f2cSLuiz Capitulino 6641cac6f2cSLuiz Capitulino nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 6651cac6f2cSLuiz Capitulino h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 6661cac6f2cSLuiz Capitulino 6671cac6f2cSLuiz Capitulino return nid; 6681cac6f2cSLuiz Capitulino } 6691cac6f2cSLuiz Capitulino 6701cac6f2cSLuiz Capitulino #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 6711cac6f2cSLuiz Capitulino for (nr_nodes = nodes_weight(*mask); \ 6721cac6f2cSLuiz Capitulino nr_nodes > 0 && \ 6731cac6f2cSLuiz Capitulino ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 6741cac6f2cSLuiz Capitulino nr_nodes--) 6751cac6f2cSLuiz Capitulino 6761cac6f2cSLuiz Capitulino #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 6771cac6f2cSLuiz Capitulino for (nr_nodes = nodes_weight(*mask); \ 6781cac6f2cSLuiz Capitulino nr_nodes > 0 && \ 6791cac6f2cSLuiz Capitulino ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 6801cac6f2cSLuiz Capitulino nr_nodes--) 6811cac6f2cSLuiz Capitulino 682944d9fecSLuiz Capitulino #if defined(CONFIG_CMA) && defined(CONFIG_X86_64) 683944d9fecSLuiz Capitulino static void destroy_compound_gigantic_page(struct page *page, 684944d9fecSLuiz Capitulino unsigned long order) 685944d9fecSLuiz Capitulino { 686944d9fecSLuiz Capitulino int i; 687944d9fecSLuiz Capitulino int nr_pages = 1 << order; 688944d9fecSLuiz Capitulino struct page *p = page + 1; 689944d9fecSLuiz Capitulino 690944d9fecSLuiz Capitulino for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 691944d9fecSLuiz Capitulino __ClearPageTail(p); 692944d9fecSLuiz Capitulino set_page_refcounted(p); 693944d9fecSLuiz Capitulino p->first_page = NULL; 694944d9fecSLuiz Capitulino } 695944d9fecSLuiz Capitulino 696944d9fecSLuiz Capitulino set_compound_order(page, 0); 697944d9fecSLuiz Capitulino __ClearPageHead(page); 698944d9fecSLuiz Capitulino } 699944d9fecSLuiz Capitulino 700944d9fecSLuiz Capitulino static void free_gigantic_page(struct page *page, unsigned order) 701944d9fecSLuiz Capitulino { 702944d9fecSLuiz Capitulino free_contig_range(page_to_pfn(page), 1 << order); 703944d9fecSLuiz Capitulino } 704944d9fecSLuiz Capitulino 705944d9fecSLuiz Capitulino static int __alloc_gigantic_page(unsigned long start_pfn, 706944d9fecSLuiz Capitulino unsigned long nr_pages) 707944d9fecSLuiz Capitulino { 708944d9fecSLuiz Capitulino unsigned long end_pfn = start_pfn + nr_pages; 709944d9fecSLuiz Capitulino return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 710944d9fecSLuiz Capitulino } 711944d9fecSLuiz Capitulino 712944d9fecSLuiz Capitulino static bool pfn_range_valid_gigantic(unsigned long start_pfn, 713944d9fecSLuiz Capitulino unsigned long nr_pages) 714944d9fecSLuiz Capitulino { 715944d9fecSLuiz Capitulino unsigned long i, end_pfn = start_pfn + nr_pages; 716944d9fecSLuiz Capitulino struct page *page; 717944d9fecSLuiz Capitulino 718944d9fecSLuiz Capitulino for (i = start_pfn; i < end_pfn; i++) { 719944d9fecSLuiz Capitulino if (!pfn_valid(i)) 720944d9fecSLuiz Capitulino return false; 721944d9fecSLuiz Capitulino 722944d9fecSLuiz Capitulino page = pfn_to_page(i); 723944d9fecSLuiz Capitulino 724944d9fecSLuiz Capitulino if (PageReserved(page)) 725944d9fecSLuiz Capitulino return false; 726944d9fecSLuiz Capitulino 727944d9fecSLuiz Capitulino if (page_count(page) > 0) 728944d9fecSLuiz Capitulino return false; 729944d9fecSLuiz Capitulino 730944d9fecSLuiz Capitulino if (PageHuge(page)) 731944d9fecSLuiz Capitulino return false; 732944d9fecSLuiz Capitulino } 733944d9fecSLuiz Capitulino 734944d9fecSLuiz Capitulino return true; 735944d9fecSLuiz Capitulino } 736944d9fecSLuiz Capitulino 737944d9fecSLuiz Capitulino static bool zone_spans_last_pfn(const struct zone *zone, 738944d9fecSLuiz Capitulino unsigned long start_pfn, unsigned long nr_pages) 739944d9fecSLuiz Capitulino { 740944d9fecSLuiz Capitulino unsigned long last_pfn = start_pfn + nr_pages - 1; 741944d9fecSLuiz Capitulino return zone_spans_pfn(zone, last_pfn); 742944d9fecSLuiz Capitulino } 743944d9fecSLuiz Capitulino 744944d9fecSLuiz Capitulino static struct page *alloc_gigantic_page(int nid, unsigned order) 745944d9fecSLuiz Capitulino { 746944d9fecSLuiz Capitulino unsigned long nr_pages = 1 << order; 747944d9fecSLuiz Capitulino unsigned long ret, pfn, flags; 748944d9fecSLuiz Capitulino struct zone *z; 749944d9fecSLuiz Capitulino 750944d9fecSLuiz Capitulino z = NODE_DATA(nid)->node_zones; 751944d9fecSLuiz Capitulino for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) { 752944d9fecSLuiz Capitulino spin_lock_irqsave(&z->lock, flags); 753944d9fecSLuiz Capitulino 754944d9fecSLuiz Capitulino pfn = ALIGN(z->zone_start_pfn, nr_pages); 755944d9fecSLuiz Capitulino while (zone_spans_last_pfn(z, pfn, nr_pages)) { 756944d9fecSLuiz Capitulino if (pfn_range_valid_gigantic(pfn, nr_pages)) { 757944d9fecSLuiz Capitulino /* 758944d9fecSLuiz Capitulino * We release the zone lock here because 759944d9fecSLuiz Capitulino * alloc_contig_range() will also lock the zone 760944d9fecSLuiz Capitulino * at some point. If there's an allocation 761944d9fecSLuiz Capitulino * spinning on this lock, it may win the race 762944d9fecSLuiz Capitulino * and cause alloc_contig_range() to fail... 763944d9fecSLuiz Capitulino */ 764944d9fecSLuiz Capitulino spin_unlock_irqrestore(&z->lock, flags); 765944d9fecSLuiz Capitulino ret = __alloc_gigantic_page(pfn, nr_pages); 766944d9fecSLuiz Capitulino if (!ret) 767944d9fecSLuiz Capitulino return pfn_to_page(pfn); 768944d9fecSLuiz Capitulino spin_lock_irqsave(&z->lock, flags); 769944d9fecSLuiz Capitulino } 770944d9fecSLuiz Capitulino pfn += nr_pages; 771944d9fecSLuiz Capitulino } 772944d9fecSLuiz Capitulino 773944d9fecSLuiz Capitulino spin_unlock_irqrestore(&z->lock, flags); 774944d9fecSLuiz Capitulino } 775944d9fecSLuiz Capitulino 776944d9fecSLuiz Capitulino return NULL; 777944d9fecSLuiz Capitulino } 778944d9fecSLuiz Capitulino 779944d9fecSLuiz Capitulino static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); 780944d9fecSLuiz Capitulino static void prep_compound_gigantic_page(struct page *page, unsigned long order); 781944d9fecSLuiz Capitulino 782944d9fecSLuiz Capitulino static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid) 783944d9fecSLuiz Capitulino { 784944d9fecSLuiz Capitulino struct page *page; 785944d9fecSLuiz Capitulino 786944d9fecSLuiz Capitulino page = alloc_gigantic_page(nid, huge_page_order(h)); 787944d9fecSLuiz Capitulino if (page) { 788944d9fecSLuiz Capitulino prep_compound_gigantic_page(page, huge_page_order(h)); 789944d9fecSLuiz Capitulino prep_new_huge_page(h, page, nid); 790944d9fecSLuiz Capitulino } 791944d9fecSLuiz Capitulino 792944d9fecSLuiz Capitulino return page; 793944d9fecSLuiz Capitulino } 794944d9fecSLuiz Capitulino 795944d9fecSLuiz Capitulino static int alloc_fresh_gigantic_page(struct hstate *h, 796944d9fecSLuiz Capitulino nodemask_t *nodes_allowed) 797944d9fecSLuiz Capitulino { 798944d9fecSLuiz Capitulino struct page *page = NULL; 799944d9fecSLuiz Capitulino int nr_nodes, node; 800944d9fecSLuiz Capitulino 801944d9fecSLuiz Capitulino for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 802944d9fecSLuiz Capitulino page = alloc_fresh_gigantic_page_node(h, node); 803944d9fecSLuiz Capitulino if (page) 804944d9fecSLuiz Capitulino return 1; 805944d9fecSLuiz Capitulino } 806944d9fecSLuiz Capitulino 807944d9fecSLuiz Capitulino return 0; 808944d9fecSLuiz Capitulino } 809944d9fecSLuiz Capitulino 810944d9fecSLuiz Capitulino static inline bool gigantic_page_supported(void) { return true; } 811944d9fecSLuiz Capitulino #else 812944d9fecSLuiz Capitulino static inline bool gigantic_page_supported(void) { return false; } 813944d9fecSLuiz Capitulino static inline void free_gigantic_page(struct page *page, unsigned order) { } 814944d9fecSLuiz Capitulino static inline void destroy_compound_gigantic_page(struct page *page, 815944d9fecSLuiz Capitulino unsigned long order) { } 816944d9fecSLuiz Capitulino static inline int alloc_fresh_gigantic_page(struct hstate *h, 817944d9fecSLuiz Capitulino nodemask_t *nodes_allowed) { return 0; } 818944d9fecSLuiz Capitulino #endif 819944d9fecSLuiz Capitulino 820a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page) 8216af2acb6SAdam Litke { 8226af2acb6SAdam Litke int i; 823a5516438SAndi Kleen 824944d9fecSLuiz Capitulino if (hstate_is_gigantic(h) && !gigantic_page_supported()) 825944d9fecSLuiz Capitulino return; 82618229df5SAndy Whitcroft 827a5516438SAndi Kleen h->nr_huge_pages--; 828a5516438SAndi Kleen h->nr_huge_pages_node[page_to_nid(page)]--; 829a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 83032f84528SChris Forbes page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 83132f84528SChris Forbes 1 << PG_referenced | 1 << PG_dirty | 832a7407a27SLuiz Capitulino 1 << PG_active | 1 << PG_private | 833a7407a27SLuiz Capitulino 1 << PG_writeback); 8346af2acb6SAdam Litke } 835309381feSSasha Levin VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); 8366af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 8376af2acb6SAdam Litke set_page_refcounted(page); 838944d9fecSLuiz Capitulino if (hstate_is_gigantic(h)) { 839944d9fecSLuiz Capitulino destroy_compound_gigantic_page(page, huge_page_order(h)); 840944d9fecSLuiz Capitulino free_gigantic_page(page, huge_page_order(h)); 841944d9fecSLuiz Capitulino } else { 8427f2e9525SGerald Schaefer arch_release_hugepage(page); 843a5516438SAndi Kleen __free_pages(page, huge_page_order(h)); 8446af2acb6SAdam Litke } 845944d9fecSLuiz Capitulino } 8466af2acb6SAdam Litke 847e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size) 848e5ff2159SAndi Kleen { 849e5ff2159SAndi Kleen struct hstate *h; 850e5ff2159SAndi Kleen 851e5ff2159SAndi Kleen for_each_hstate(h) { 852e5ff2159SAndi Kleen if (huge_page_size(h) == size) 853e5ff2159SAndi Kleen return h; 854e5ff2159SAndi Kleen } 855e5ff2159SAndi Kleen return NULL; 856e5ff2159SAndi Kleen } 857e5ff2159SAndi Kleen 8588f1d26d0SAtsushi Kumagai void free_huge_page(struct page *page) 85927a85ef1SDavid Gibson { 860a5516438SAndi Kleen /* 861a5516438SAndi Kleen * Can't pass hstate in here because it is called from the 862a5516438SAndi Kleen * compound page destructor. 863a5516438SAndi Kleen */ 864e5ff2159SAndi Kleen struct hstate *h = page_hstate(page); 8657893d1d5SAdam Litke int nid = page_to_nid(page); 86690481622SDavid Gibson struct hugepage_subpool *spool = 86790481622SDavid Gibson (struct hugepage_subpool *)page_private(page); 86807443a85SJoonsoo Kim bool restore_reserve; 86927a85ef1SDavid Gibson 870e5df70abSAndy Whitcroft set_page_private(page, 0); 87123be7468SMel Gorman page->mapping = NULL; 8727893d1d5SAdam Litke BUG_ON(page_count(page)); 8730fe6e20bSNaoya Horiguchi BUG_ON(page_mapcount(page)); 87407443a85SJoonsoo Kim restore_reserve = PagePrivate(page); 87516c794b4SJoonsoo Kim ClearPagePrivate(page); 87627a85ef1SDavid Gibson 87727a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 8786d76dcf4SAneesh Kumar K.V hugetlb_cgroup_uncharge_page(hstate_index(h), 8796d76dcf4SAneesh Kumar K.V pages_per_huge_page(h), page); 88007443a85SJoonsoo Kim if (restore_reserve) 88107443a85SJoonsoo Kim h->resv_huge_pages++; 88207443a85SJoonsoo Kim 883944d9fecSLuiz Capitulino if (h->surplus_huge_pages_node[nid]) { 8840edaecfaSAneesh Kumar K.V /* remove the page from active list */ 8850edaecfaSAneesh Kumar K.V list_del(&page->lru); 886a5516438SAndi Kleen update_and_free_page(h, page); 887a5516438SAndi Kleen h->surplus_huge_pages--; 888a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 8897893d1d5SAdam Litke } else { 8905d3a551cSWill Deacon arch_clear_hugepage_flags(page); 891a5516438SAndi Kleen enqueue_huge_page(h, page); 8927893d1d5SAdam Litke } 89327a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 89490481622SDavid Gibson hugepage_subpool_put_pages(spool, 1); 89527a85ef1SDavid Gibson } 89627a85ef1SDavid Gibson 897a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 898b7ba30c6SAndi Kleen { 8990edaecfaSAneesh Kumar K.V INIT_LIST_HEAD(&page->lru); 900b7ba30c6SAndi Kleen set_compound_page_dtor(page, free_huge_page); 901b7ba30c6SAndi Kleen spin_lock(&hugetlb_lock); 9029dd540e2SAneesh Kumar K.V set_hugetlb_cgroup(page, NULL); 903a5516438SAndi Kleen h->nr_huge_pages++; 904a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 905b7ba30c6SAndi Kleen spin_unlock(&hugetlb_lock); 906b7ba30c6SAndi Kleen put_page(page); /* free it into the hugepage allocator */ 907b7ba30c6SAndi Kleen } 908b7ba30c6SAndi Kleen 9092906dd52SLuiz Capitulino static void prep_compound_gigantic_page(struct page *page, unsigned long order) 91020a0307cSWu Fengguang { 91120a0307cSWu Fengguang int i; 91220a0307cSWu Fengguang int nr_pages = 1 << order; 91320a0307cSWu Fengguang struct page *p = page + 1; 91420a0307cSWu Fengguang 91520a0307cSWu Fengguang /* we rely on prep_new_huge_page to set the destructor */ 91620a0307cSWu Fengguang set_compound_order(page, order); 91720a0307cSWu Fengguang __SetPageHead(page); 918ef5a22beSAndrea Arcangeli __ClearPageReserved(page); 91920a0307cSWu Fengguang for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 92020a0307cSWu Fengguang __SetPageTail(p); 921ef5a22beSAndrea Arcangeli /* 922ef5a22beSAndrea Arcangeli * For gigantic hugepages allocated through bootmem at 923ef5a22beSAndrea Arcangeli * boot, it's safer to be consistent with the not-gigantic 924ef5a22beSAndrea Arcangeli * hugepages and clear the PG_reserved bit from all tail pages 925ef5a22beSAndrea Arcangeli * too. Otherwse drivers using get_user_pages() to access tail 926ef5a22beSAndrea Arcangeli * pages may get the reference counting wrong if they see 927ef5a22beSAndrea Arcangeli * PG_reserved set on a tail page (despite the head page not 928ef5a22beSAndrea Arcangeli * having PG_reserved set). Enforcing this consistency between 929ef5a22beSAndrea Arcangeli * head and tail pages allows drivers to optimize away a check 930ef5a22beSAndrea Arcangeli * on the head page when they need know if put_page() is needed 931ef5a22beSAndrea Arcangeli * after get_user_pages(). 932ef5a22beSAndrea Arcangeli */ 933ef5a22beSAndrea Arcangeli __ClearPageReserved(p); 93458a84aa9SYouquan Song set_page_count(p, 0); 93520a0307cSWu Fengguang p->first_page = page; 93620a0307cSWu Fengguang } 93720a0307cSWu Fengguang } 93820a0307cSWu Fengguang 9397795912cSAndrew Morton /* 9407795912cSAndrew Morton * PageHuge() only returns true for hugetlbfs pages, but not for normal or 9417795912cSAndrew Morton * transparent huge pages. See the PageTransHuge() documentation for more 9427795912cSAndrew Morton * details. 9437795912cSAndrew Morton */ 94420a0307cSWu Fengguang int PageHuge(struct page *page) 94520a0307cSWu Fengguang { 94620a0307cSWu Fengguang if (!PageCompound(page)) 94720a0307cSWu Fengguang return 0; 94820a0307cSWu Fengguang 94920a0307cSWu Fengguang page = compound_head(page); 950758f66a2SAndrew Morton return get_compound_page_dtor(page) == free_huge_page; 95120a0307cSWu Fengguang } 95243131e14SNaoya Horiguchi EXPORT_SYMBOL_GPL(PageHuge); 95343131e14SNaoya Horiguchi 95427c73ae7SAndrea Arcangeli /* 95527c73ae7SAndrea Arcangeli * PageHeadHuge() only returns true for hugetlbfs head page, but not for 95627c73ae7SAndrea Arcangeli * normal or transparent huge pages. 95727c73ae7SAndrea Arcangeli */ 95827c73ae7SAndrea Arcangeli int PageHeadHuge(struct page *page_head) 95927c73ae7SAndrea Arcangeli { 96027c73ae7SAndrea Arcangeli if (!PageHead(page_head)) 96127c73ae7SAndrea Arcangeli return 0; 96227c73ae7SAndrea Arcangeli 963758f66a2SAndrew Morton return get_compound_page_dtor(page_head) == free_huge_page; 96427c73ae7SAndrea Arcangeli } 96527c73ae7SAndrea Arcangeli 96613d60f4bSZhang Yi pgoff_t __basepage_index(struct page *page) 96713d60f4bSZhang Yi { 96813d60f4bSZhang Yi struct page *page_head = compound_head(page); 96913d60f4bSZhang Yi pgoff_t index = page_index(page_head); 97013d60f4bSZhang Yi unsigned long compound_idx; 97113d60f4bSZhang Yi 97213d60f4bSZhang Yi if (!PageHuge(page_head)) 97313d60f4bSZhang Yi return page_index(page); 97413d60f4bSZhang Yi 97513d60f4bSZhang Yi if (compound_order(page_head) >= MAX_ORDER) 97613d60f4bSZhang Yi compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 97713d60f4bSZhang Yi else 97813d60f4bSZhang Yi compound_idx = page - page_head; 97913d60f4bSZhang Yi 98013d60f4bSZhang Yi return (index << compound_order(page_head)) + compound_idx; 98113d60f4bSZhang Yi } 98213d60f4bSZhang Yi 983a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 9841da177e4SLinus Torvalds { 9851da177e4SLinus Torvalds struct page *page; 986f96efd58SJoe Jin 9876484eb3eSMel Gorman page = alloc_pages_exact_node(nid, 98886cdb465SNaoya Horiguchi htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| 989551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 990a5516438SAndi Kleen huge_page_order(h)); 9911da177e4SLinus Torvalds if (page) { 9927f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 993caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 9947b8ee84dSHarvey Harrison return NULL; 9957f2e9525SGerald Schaefer } 996a5516438SAndi Kleen prep_new_huge_page(h, page, nid); 9971da177e4SLinus Torvalds } 99863b4613cSNishanth Aravamudan 99963b4613cSNishanth Aravamudan return page; 100063b4613cSNishanth Aravamudan } 100163b4613cSNishanth Aravamudan 1002b2261026SJoonsoo Kim static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 1003b2261026SJoonsoo Kim { 1004b2261026SJoonsoo Kim struct page *page; 1005b2261026SJoonsoo Kim int nr_nodes, node; 1006b2261026SJoonsoo Kim int ret = 0; 1007b2261026SJoonsoo Kim 1008b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1009b2261026SJoonsoo Kim page = alloc_fresh_huge_page_node(h, node); 1010b2261026SJoonsoo Kim if (page) { 1011b2261026SJoonsoo Kim ret = 1; 1012b2261026SJoonsoo Kim break; 1013b2261026SJoonsoo Kim } 1014b2261026SJoonsoo Kim } 1015b2261026SJoonsoo Kim 1016b2261026SJoonsoo Kim if (ret) 1017b2261026SJoonsoo Kim count_vm_event(HTLB_BUDDY_PGALLOC); 1018b2261026SJoonsoo Kim else 1019b2261026SJoonsoo Kim count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1020b2261026SJoonsoo Kim 1021b2261026SJoonsoo Kim return ret; 1022b2261026SJoonsoo Kim } 1023b2261026SJoonsoo Kim 1024e8c5c824SLee Schermerhorn /* 1025e8c5c824SLee Schermerhorn * Free huge page from pool from next node to free. 1026e8c5c824SLee Schermerhorn * Attempt to keep persistent huge pages more or less 1027e8c5c824SLee Schermerhorn * balanced over allowed nodes. 1028e8c5c824SLee Schermerhorn * Called with hugetlb_lock locked. 1029e8c5c824SLee Schermerhorn */ 10306ae11b27SLee Schermerhorn static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 10316ae11b27SLee Schermerhorn bool acct_surplus) 1032e8c5c824SLee Schermerhorn { 1033b2261026SJoonsoo Kim int nr_nodes, node; 1034e8c5c824SLee Schermerhorn int ret = 0; 1035e8c5c824SLee Schermerhorn 1036b2261026SJoonsoo Kim for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 1037685f3457SLee Schermerhorn /* 1038685f3457SLee Schermerhorn * If we're returning unused surplus pages, only examine 1039685f3457SLee Schermerhorn * nodes with surplus pages. 1040685f3457SLee Schermerhorn */ 1041b2261026SJoonsoo Kim if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 1042b2261026SJoonsoo Kim !list_empty(&h->hugepage_freelists[node])) { 1043e8c5c824SLee Schermerhorn struct page *page = 1044b2261026SJoonsoo Kim list_entry(h->hugepage_freelists[node].next, 1045e8c5c824SLee Schermerhorn struct page, lru); 1046e8c5c824SLee Schermerhorn list_del(&page->lru); 1047e8c5c824SLee Schermerhorn h->free_huge_pages--; 1048b2261026SJoonsoo Kim h->free_huge_pages_node[node]--; 1049685f3457SLee Schermerhorn if (acct_surplus) { 1050685f3457SLee Schermerhorn h->surplus_huge_pages--; 1051b2261026SJoonsoo Kim h->surplus_huge_pages_node[node]--; 1052685f3457SLee Schermerhorn } 1053e8c5c824SLee Schermerhorn update_and_free_page(h, page); 1054e8c5c824SLee Schermerhorn ret = 1; 10559a76db09SLee Schermerhorn break; 1056e8c5c824SLee Schermerhorn } 1057b2261026SJoonsoo Kim } 1058e8c5c824SLee Schermerhorn 1059e8c5c824SLee Schermerhorn return ret; 1060e8c5c824SLee Schermerhorn } 1061e8c5c824SLee Schermerhorn 1062c8721bbbSNaoya Horiguchi /* 1063c8721bbbSNaoya Horiguchi * Dissolve a given free hugepage into free buddy pages. This function does 1064c8721bbbSNaoya Horiguchi * nothing for in-use (including surplus) hugepages. 1065c8721bbbSNaoya Horiguchi */ 1066c8721bbbSNaoya Horiguchi static void dissolve_free_huge_page(struct page *page) 1067c8721bbbSNaoya Horiguchi { 1068c8721bbbSNaoya Horiguchi spin_lock(&hugetlb_lock); 1069c8721bbbSNaoya Horiguchi if (PageHuge(page) && !page_count(page)) { 1070c8721bbbSNaoya Horiguchi struct hstate *h = page_hstate(page); 1071c8721bbbSNaoya Horiguchi int nid = page_to_nid(page); 1072c8721bbbSNaoya Horiguchi list_del(&page->lru); 1073c8721bbbSNaoya Horiguchi h->free_huge_pages--; 1074c8721bbbSNaoya Horiguchi h->free_huge_pages_node[nid]--; 1075c8721bbbSNaoya Horiguchi update_and_free_page(h, page); 1076c8721bbbSNaoya Horiguchi } 1077c8721bbbSNaoya Horiguchi spin_unlock(&hugetlb_lock); 1078c8721bbbSNaoya Horiguchi } 1079c8721bbbSNaoya Horiguchi 1080c8721bbbSNaoya Horiguchi /* 1081c8721bbbSNaoya Horiguchi * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 1082c8721bbbSNaoya Horiguchi * make specified memory blocks removable from the system. 1083c8721bbbSNaoya Horiguchi * Note that start_pfn should aligned with (minimum) hugepage size. 1084c8721bbbSNaoya Horiguchi */ 1085c8721bbbSNaoya Horiguchi void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 1086c8721bbbSNaoya Horiguchi { 1087c8721bbbSNaoya Horiguchi unsigned int order = 8 * sizeof(void *); 1088c8721bbbSNaoya Horiguchi unsigned long pfn; 1089c8721bbbSNaoya Horiguchi struct hstate *h; 1090c8721bbbSNaoya Horiguchi 1091d0177639SLi Zhong if (!hugepages_supported()) 1092d0177639SLi Zhong return; 1093d0177639SLi Zhong 1094c8721bbbSNaoya Horiguchi /* Set scan step to minimum hugepage size */ 1095c8721bbbSNaoya Horiguchi for_each_hstate(h) 1096c8721bbbSNaoya Horiguchi if (order > huge_page_order(h)) 1097c8721bbbSNaoya Horiguchi order = huge_page_order(h); 1098c8721bbbSNaoya Horiguchi VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order)); 1099c8721bbbSNaoya Horiguchi for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) 1100c8721bbbSNaoya Horiguchi dissolve_free_huge_page(pfn_to_page(pfn)); 1101c8721bbbSNaoya Horiguchi } 1102c8721bbbSNaoya Horiguchi 1103bf50bab2SNaoya Horiguchi static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) 11047893d1d5SAdam Litke { 11057893d1d5SAdam Litke struct page *page; 1106bf50bab2SNaoya Horiguchi unsigned int r_nid; 11077893d1d5SAdam Litke 1108bae7f4aeSLuiz Capitulino if (hstate_is_gigantic(h)) 1109aa888a74SAndi Kleen return NULL; 1110aa888a74SAndi Kleen 1111d1c3fb1fSNishanth Aravamudan /* 1112d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 1113d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 1114d1c3fb1fSNishanth Aravamudan * overcommit 1115d1c3fb1fSNishanth Aravamudan * 1116d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 1117d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 1118d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 1119d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 1120d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 1121d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 1122d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 1123d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 1124d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 1125d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 1126d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 1127d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 1128d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 1129d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 1130d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 1131d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 1132d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 1133d1c3fb1fSNishanth Aravamudan */ 1134d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 1135a5516438SAndi Kleen if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 1136d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 1137d1c3fb1fSNishanth Aravamudan return NULL; 1138d1c3fb1fSNishanth Aravamudan } else { 1139a5516438SAndi Kleen h->nr_huge_pages++; 1140a5516438SAndi Kleen h->surplus_huge_pages++; 1141d1c3fb1fSNishanth Aravamudan } 1142d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 1143d1c3fb1fSNishanth Aravamudan 1144bf50bab2SNaoya Horiguchi if (nid == NUMA_NO_NODE) 114586cdb465SNaoya Horiguchi page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP| 1146551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 1147a5516438SAndi Kleen huge_page_order(h)); 1148bf50bab2SNaoya Horiguchi else 1149bf50bab2SNaoya Horiguchi page = alloc_pages_exact_node(nid, 115086cdb465SNaoya Horiguchi htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| 1151bf50bab2SNaoya Horiguchi __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); 1152d1c3fb1fSNishanth Aravamudan 1153caff3a2cSGerald Schaefer if (page && arch_prepare_hugepage(page)) { 1154caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 1155ea5768c7SHillf Danton page = NULL; 1156caff3a2cSGerald Schaefer } 1157caff3a2cSGerald Schaefer 11587893d1d5SAdam Litke spin_lock(&hugetlb_lock); 1159d1c3fb1fSNishanth Aravamudan if (page) { 11600edaecfaSAneesh Kumar K.V INIT_LIST_HEAD(&page->lru); 1161bf50bab2SNaoya Horiguchi r_nid = page_to_nid(page); 1162d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 11639dd540e2SAneesh Kumar K.V set_hugetlb_cgroup(page, NULL); 1164d1c3fb1fSNishanth Aravamudan /* 1165d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 1166d1c3fb1fSNishanth Aravamudan */ 1167bf50bab2SNaoya Horiguchi h->nr_huge_pages_node[r_nid]++; 1168bf50bab2SNaoya Horiguchi h->surplus_huge_pages_node[r_nid]++; 11693b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 1170d1c3fb1fSNishanth Aravamudan } else { 1171a5516438SAndi Kleen h->nr_huge_pages--; 1172a5516438SAndi Kleen h->surplus_huge_pages--; 11733b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 11747893d1d5SAdam Litke } 1175d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 11767893d1d5SAdam Litke 11777893d1d5SAdam Litke return page; 11787893d1d5SAdam Litke } 11797893d1d5SAdam Litke 1180e4e574b7SAdam Litke /* 1181bf50bab2SNaoya Horiguchi * This allocation function is useful in the context where vma is irrelevant. 1182bf50bab2SNaoya Horiguchi * E.g. soft-offlining uses this function because it only cares physical 1183bf50bab2SNaoya Horiguchi * address of error page. 1184bf50bab2SNaoya Horiguchi */ 1185bf50bab2SNaoya Horiguchi struct page *alloc_huge_page_node(struct hstate *h, int nid) 1186bf50bab2SNaoya Horiguchi { 11874ef91848SJoonsoo Kim struct page *page = NULL; 1188bf50bab2SNaoya Horiguchi 1189bf50bab2SNaoya Horiguchi spin_lock(&hugetlb_lock); 11904ef91848SJoonsoo Kim if (h->free_huge_pages - h->resv_huge_pages > 0) 1191bf50bab2SNaoya Horiguchi page = dequeue_huge_page_node(h, nid); 1192bf50bab2SNaoya Horiguchi spin_unlock(&hugetlb_lock); 1193bf50bab2SNaoya Horiguchi 119494ae8ba7SAneesh Kumar K.V if (!page) 1195bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, nid); 1196bf50bab2SNaoya Horiguchi 1197bf50bab2SNaoya Horiguchi return page; 1198bf50bab2SNaoya Horiguchi } 1199bf50bab2SNaoya Horiguchi 1200bf50bab2SNaoya Horiguchi /* 120125985edcSLucas De Marchi * Increase the hugetlb pool such that it can accommodate a reservation 1202e4e574b7SAdam Litke * of size 'delta'. 1203e4e574b7SAdam Litke */ 1204a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta) 1205e4e574b7SAdam Litke { 1206e4e574b7SAdam Litke struct list_head surplus_list; 1207e4e574b7SAdam Litke struct page *page, *tmp; 1208e4e574b7SAdam Litke int ret, i; 1209e4e574b7SAdam Litke int needed, allocated; 121028073b02SHillf Danton bool alloc_ok = true; 1211e4e574b7SAdam Litke 1212a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 1213ac09b3a1SAdam Litke if (needed <= 0) { 1214a5516438SAndi Kleen h->resv_huge_pages += delta; 1215e4e574b7SAdam Litke return 0; 1216ac09b3a1SAdam Litke } 1217e4e574b7SAdam Litke 1218e4e574b7SAdam Litke allocated = 0; 1219e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 1220e4e574b7SAdam Litke 1221e4e574b7SAdam Litke ret = -ENOMEM; 1222e4e574b7SAdam Litke retry: 1223e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 1224e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 1225bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 122628073b02SHillf Danton if (!page) { 122728073b02SHillf Danton alloc_ok = false; 122828073b02SHillf Danton break; 122928073b02SHillf Danton } 1230e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 1231e4e574b7SAdam Litke } 123228073b02SHillf Danton allocated += i; 1233e4e574b7SAdam Litke 1234e4e574b7SAdam Litke /* 1235e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 1236e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 1237e4e574b7SAdam Litke */ 1238e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 1239a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - 1240a5516438SAndi Kleen (h->free_huge_pages + allocated); 124128073b02SHillf Danton if (needed > 0) { 124228073b02SHillf Danton if (alloc_ok) 1243e4e574b7SAdam Litke goto retry; 124428073b02SHillf Danton /* 124528073b02SHillf Danton * We were not able to allocate enough pages to 124628073b02SHillf Danton * satisfy the entire reservation so we free what 124728073b02SHillf Danton * we've allocated so far. 124828073b02SHillf Danton */ 124928073b02SHillf Danton goto free; 125028073b02SHillf Danton } 1251e4e574b7SAdam Litke /* 1252e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 125325985edcSLucas De Marchi * needed to accommodate the reservation. Add the appropriate number 1254e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 1255ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 1256ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 1257ac09b3a1SAdam Litke * before they are reserved. 1258e4e574b7SAdam Litke */ 1259e4e574b7SAdam Litke needed += allocated; 1260a5516438SAndi Kleen h->resv_huge_pages += delta; 1261e4e574b7SAdam Litke ret = 0; 1262a9869b83SNaoya Horiguchi 126319fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 126419fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 126519fc3f0aSAdam Litke if ((--needed) < 0) 126619fc3f0aSAdam Litke break; 1267a9869b83SNaoya Horiguchi /* 1268a9869b83SNaoya Horiguchi * This page is now managed by the hugetlb allocator and has 1269a9869b83SNaoya Horiguchi * no users -- drop the buddy allocator's reference. 1270a9869b83SNaoya Horiguchi */ 1271a9869b83SNaoya Horiguchi put_page_testzero(page); 1272309381feSSasha Levin VM_BUG_ON_PAGE(page_count(page), page); 1273a5516438SAndi Kleen enqueue_huge_page(h, page); 127419fc3f0aSAdam Litke } 127528073b02SHillf Danton free: 1276b0365c8dSHillf Danton spin_unlock(&hugetlb_lock); 127719fc3f0aSAdam Litke 127819fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 1279c0d934baSJoonsoo Kim list_for_each_entry_safe(page, tmp, &surplus_list, lru) 1280a9869b83SNaoya Horiguchi put_page(page); 128119fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 1282e4e574b7SAdam Litke 1283e4e574b7SAdam Litke return ret; 1284e4e574b7SAdam Litke } 1285e4e574b7SAdam Litke 1286e4e574b7SAdam Litke /* 1287e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 1288e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 1289e4e574b7SAdam Litke * never used. 1290685f3457SLee Schermerhorn * Called with hugetlb_lock held. 1291e4e574b7SAdam Litke */ 1292a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h, 1293a5516438SAndi Kleen unsigned long unused_resv_pages) 1294e4e574b7SAdam Litke { 1295e4e574b7SAdam Litke unsigned long nr_pages; 1296e4e574b7SAdam Litke 1297ac09b3a1SAdam Litke /* Uncommit the reservation */ 1298a5516438SAndi Kleen h->resv_huge_pages -= unused_resv_pages; 1299ac09b3a1SAdam Litke 1300aa888a74SAndi Kleen /* Cannot return gigantic pages currently */ 1301bae7f4aeSLuiz Capitulino if (hstate_is_gigantic(h)) 1302aa888a74SAndi Kleen return; 1303aa888a74SAndi Kleen 1304a5516438SAndi Kleen nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 1305e4e574b7SAdam Litke 1306685f3457SLee Schermerhorn /* 1307685f3457SLee Schermerhorn * We want to release as many surplus pages as possible, spread 13089b5e5d0fSLee Schermerhorn * evenly across all nodes with memory. Iterate across these nodes 13099b5e5d0fSLee Schermerhorn * until we can no longer free unreserved surplus pages. This occurs 13109b5e5d0fSLee Schermerhorn * when the nodes with surplus pages have no free pages. 13119b5e5d0fSLee Schermerhorn * free_pool_huge_page() will balance the the freed pages across the 13129b5e5d0fSLee Schermerhorn * on-line nodes with memory and will handle the hstate accounting. 1313685f3457SLee Schermerhorn */ 1314685f3457SLee Schermerhorn while (nr_pages--) { 13158cebfcd0SLai Jiangshan if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) 1316685f3457SLee Schermerhorn break; 13177848a4bfSMizuma, Masayoshi cond_resched_lock(&hugetlb_lock); 1318e4e574b7SAdam Litke } 1319e4e574b7SAdam Litke } 1320e4e574b7SAdam Litke 1321c37f9fb1SAndy Whitcroft /* 1322c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 1323c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 132490481622SDavid Gibson * reservation and actually increase subpool usage before an allocation 132590481622SDavid Gibson * can occur. Where any new reservation would be required the 132690481622SDavid Gibson * reservation change is prepared, but not committed. Once the page 132790481622SDavid Gibson * has been allocated from the subpool and instantiated the change should 132890481622SDavid Gibson * be committed via vma_commit_reservation. No action is required on 132990481622SDavid Gibson * failure. 1330c37f9fb1SAndy Whitcroft */ 1331e2f17d94SRoel Kluin static long vma_needs_reservation(struct hstate *h, 1332a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 1333c37f9fb1SAndy Whitcroft { 13344e35f483SJoonsoo Kim struct resv_map *resv; 13354e35f483SJoonsoo Kim pgoff_t idx; 13364e35f483SJoonsoo Kim long chg; 1337c37f9fb1SAndy Whitcroft 13384e35f483SJoonsoo Kim resv = vma_resv_map(vma); 13394e35f483SJoonsoo Kim if (!resv) 1340c37f9fb1SAndy Whitcroft return 1; 1341c37f9fb1SAndy Whitcroft 13424e35f483SJoonsoo Kim idx = vma_hugecache_offset(h, vma, addr); 13434e35f483SJoonsoo Kim chg = region_chg(resv, idx, idx + 1); 134484afd99bSAndy Whitcroft 13454e35f483SJoonsoo Kim if (vma->vm_flags & VM_MAYSHARE) 13464e35f483SJoonsoo Kim return chg; 13474e35f483SJoonsoo Kim else 13484e35f483SJoonsoo Kim return chg < 0 ? chg : 0; 134984afd99bSAndy Whitcroft } 1350a5516438SAndi Kleen static void vma_commit_reservation(struct hstate *h, 1351a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 1352c37f9fb1SAndy Whitcroft { 13534e35f483SJoonsoo Kim struct resv_map *resv; 13544e35f483SJoonsoo Kim pgoff_t idx; 1355c37f9fb1SAndy Whitcroft 13564e35f483SJoonsoo Kim resv = vma_resv_map(vma); 13574e35f483SJoonsoo Kim if (!resv) 13584e35f483SJoonsoo Kim return; 13599119a41eSJoonsoo Kim 13604e35f483SJoonsoo Kim idx = vma_hugecache_offset(h, vma, addr); 13611406ec9bSJoonsoo Kim region_add(resv, idx, idx + 1); 1362c37f9fb1SAndy Whitcroft } 1363c37f9fb1SAndy Whitcroft 1364348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 136504f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 1366348ea204SAdam Litke { 136790481622SDavid Gibson struct hugepage_subpool *spool = subpool_vma(vma); 1368a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1369348ea204SAdam Litke struct page *page; 1370e2f17d94SRoel Kluin long chg; 13716d76dcf4SAneesh Kumar K.V int ret, idx; 13726d76dcf4SAneesh Kumar K.V struct hugetlb_cgroup *h_cg; 13732fc39cecSAdam Litke 13746d76dcf4SAneesh Kumar K.V idx = hstate_index(h); 1375a1e78772SMel Gorman /* 137690481622SDavid Gibson * Processes that did not create the mapping will have no 137790481622SDavid Gibson * reserves and will not have accounted against subpool 137890481622SDavid Gibson * limit. Check that the subpool limit can be made before 137990481622SDavid Gibson * satisfying the allocation MAP_NORESERVE mappings may also 138090481622SDavid Gibson * need pages and subpool limit allocated allocated if no reserve 138190481622SDavid Gibson * mapping overlaps. 1382a1e78772SMel Gorman */ 1383a5516438SAndi Kleen chg = vma_needs_reservation(h, vma, addr); 1384c37f9fb1SAndy Whitcroft if (chg < 0) 138576dcee75SAneesh Kumar K.V return ERR_PTR(-ENOMEM); 13868bb3f12eSJoonsoo Kim if (chg || avoid_reserve) 13878bb3f12eSJoonsoo Kim if (hugepage_subpool_get_pages(spool, 1)) 138876dcee75SAneesh Kumar K.V return ERR_PTR(-ENOSPC); 138990d8b7e6SAdam Litke 13906d76dcf4SAneesh Kumar K.V ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 13918f34af6fSJianyu Zhan if (ret) 13928f34af6fSJianyu Zhan goto out_subpool_put; 13938f34af6fSJianyu Zhan 1394a1e78772SMel Gorman spin_lock(&hugetlb_lock); 1395af0ed73eSJoonsoo Kim page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); 139681a6fcaeSJoonsoo Kim if (!page) { 139794ae8ba7SAneesh Kumar K.V spin_unlock(&hugetlb_lock); 1398bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 13998f34af6fSJianyu Zhan if (!page) 14008f34af6fSJianyu Zhan goto out_uncharge_cgroup; 14018f34af6fSJianyu Zhan 140279dbb236SAneesh Kumar K.V spin_lock(&hugetlb_lock); 140379dbb236SAneesh Kumar K.V list_move(&page->lru, &h->hugepage_activelist); 140481a6fcaeSJoonsoo Kim /* Fall through */ 1405a1e78772SMel Gorman } 140681a6fcaeSJoonsoo Kim hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 140781a6fcaeSJoonsoo Kim spin_unlock(&hugetlb_lock); 1408a1e78772SMel Gorman 140990481622SDavid Gibson set_page_private(page, (unsigned long)spool); 1410a1e78772SMel Gorman 1411a5516438SAndi Kleen vma_commit_reservation(h, vma, addr); 14127893d1d5SAdam Litke return page; 14138f34af6fSJianyu Zhan 14148f34af6fSJianyu Zhan out_uncharge_cgroup: 14158f34af6fSJianyu Zhan hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 14168f34af6fSJianyu Zhan out_subpool_put: 14178f34af6fSJianyu Zhan if (chg || avoid_reserve) 14188f34af6fSJianyu Zhan hugepage_subpool_put_pages(spool, 1); 14198f34af6fSJianyu Zhan return ERR_PTR(-ENOSPC); 1420b45b5bd6SDavid Gibson } 1421b45b5bd6SDavid Gibson 142274060e4dSNaoya Horiguchi /* 142374060e4dSNaoya Horiguchi * alloc_huge_page()'s wrapper which simply returns the page if allocation 142474060e4dSNaoya Horiguchi * succeeds, otherwise NULL. This function is called from new_vma_page(), 142574060e4dSNaoya Horiguchi * where no ERR_VALUE is expected to be returned. 142674060e4dSNaoya Horiguchi */ 142774060e4dSNaoya Horiguchi struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, 142874060e4dSNaoya Horiguchi unsigned long addr, int avoid_reserve) 142974060e4dSNaoya Horiguchi { 143074060e4dSNaoya Horiguchi struct page *page = alloc_huge_page(vma, addr, avoid_reserve); 143174060e4dSNaoya Horiguchi if (IS_ERR(page)) 143274060e4dSNaoya Horiguchi page = NULL; 143374060e4dSNaoya Horiguchi return page; 143474060e4dSNaoya Horiguchi } 143574060e4dSNaoya Horiguchi 143691f47662SCyrill Gorcunov int __weak alloc_bootmem_huge_page(struct hstate *h) 1437aa888a74SAndi Kleen { 1438aa888a74SAndi Kleen struct huge_bootmem_page *m; 1439b2261026SJoonsoo Kim int nr_nodes, node; 1440aa888a74SAndi Kleen 1441b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 1442aa888a74SAndi Kleen void *addr; 1443aa888a74SAndi Kleen 14448b89a116SGrygorii Strashko addr = memblock_virt_alloc_try_nid_nopanic( 14458b89a116SGrygorii Strashko huge_page_size(h), huge_page_size(h), 14468b89a116SGrygorii Strashko 0, BOOTMEM_ALLOC_ACCESSIBLE, node); 1447aa888a74SAndi Kleen if (addr) { 1448aa888a74SAndi Kleen /* 1449aa888a74SAndi Kleen * Use the beginning of the huge page to store the 1450aa888a74SAndi Kleen * huge_bootmem_page struct (until gather_bootmem 1451aa888a74SAndi Kleen * puts them into the mem_map). 1452aa888a74SAndi Kleen */ 1453aa888a74SAndi Kleen m = addr; 1454aa888a74SAndi Kleen goto found; 1455aa888a74SAndi Kleen } 1456aa888a74SAndi Kleen } 1457aa888a74SAndi Kleen return 0; 1458aa888a74SAndi Kleen 1459aa888a74SAndi Kleen found: 1460df994eadSLuiz Capitulino BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); 1461aa888a74SAndi Kleen /* Put them into a private list first because mem_map is not up yet */ 1462aa888a74SAndi Kleen list_add(&m->list, &huge_boot_pages); 1463aa888a74SAndi Kleen m->hstate = h; 1464aa888a74SAndi Kleen return 1; 1465aa888a74SAndi Kleen } 1466aa888a74SAndi Kleen 1467f412c97aSDavid Rientjes static void __init prep_compound_huge_page(struct page *page, int order) 146818229df5SAndy Whitcroft { 146918229df5SAndy Whitcroft if (unlikely(order > (MAX_ORDER - 1))) 147018229df5SAndy Whitcroft prep_compound_gigantic_page(page, order); 147118229df5SAndy Whitcroft else 147218229df5SAndy Whitcroft prep_compound_page(page, order); 147318229df5SAndy Whitcroft } 147418229df5SAndy Whitcroft 1475aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */ 1476aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void) 1477aa888a74SAndi Kleen { 1478aa888a74SAndi Kleen struct huge_bootmem_page *m; 1479aa888a74SAndi Kleen 1480aa888a74SAndi Kleen list_for_each_entry(m, &huge_boot_pages, list) { 1481aa888a74SAndi Kleen struct hstate *h = m->hstate; 1482ee8f248dSBecky Bruce struct page *page; 1483ee8f248dSBecky Bruce 1484ee8f248dSBecky Bruce #ifdef CONFIG_HIGHMEM 1485ee8f248dSBecky Bruce page = pfn_to_page(m->phys >> PAGE_SHIFT); 14868b89a116SGrygorii Strashko memblock_free_late(__pa(m), 1487ee8f248dSBecky Bruce sizeof(struct huge_bootmem_page)); 1488ee8f248dSBecky Bruce #else 1489ee8f248dSBecky Bruce page = virt_to_page(m); 1490ee8f248dSBecky Bruce #endif 1491aa888a74SAndi Kleen WARN_ON(page_count(page) != 1); 149218229df5SAndy Whitcroft prep_compound_huge_page(page, h->order); 1493ef5a22beSAndrea Arcangeli WARN_ON(PageReserved(page)); 1494aa888a74SAndi Kleen prep_new_huge_page(h, page, page_to_nid(page)); 1495b0320c7bSRafael Aquini /* 1496b0320c7bSRafael Aquini * If we had gigantic hugepages allocated at boot time, we need 1497b0320c7bSRafael Aquini * to restore the 'stolen' pages to totalram_pages in order to 1498b0320c7bSRafael Aquini * fix confusing memory reports from free(1) and another 1499b0320c7bSRafael Aquini * side-effects, like CommitLimit going negative. 1500b0320c7bSRafael Aquini */ 1501bae7f4aeSLuiz Capitulino if (hstate_is_gigantic(h)) 15023dcc0571SJiang Liu adjust_managed_page_count(page, 1 << h->order); 1503aa888a74SAndi Kleen } 1504aa888a74SAndi Kleen } 1505aa888a74SAndi Kleen 15068faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 15071da177e4SLinus Torvalds { 15081da177e4SLinus Torvalds unsigned long i; 15091da177e4SLinus Torvalds 1510e5ff2159SAndi Kleen for (i = 0; i < h->max_huge_pages; ++i) { 1511bae7f4aeSLuiz Capitulino if (hstate_is_gigantic(h)) { 1512aa888a74SAndi Kleen if (!alloc_bootmem_huge_page(h)) 1513aa888a74SAndi Kleen break; 15149b5e5d0fSLee Schermerhorn } else if (!alloc_fresh_huge_page(h, 15158cebfcd0SLai Jiangshan &node_states[N_MEMORY])) 15161da177e4SLinus Torvalds break; 15171da177e4SLinus Torvalds } 15188faa8b07SAndi Kleen h->max_huge_pages = i; 1519e5ff2159SAndi Kleen } 1520e5ff2159SAndi Kleen 1521e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void) 1522e5ff2159SAndi Kleen { 1523e5ff2159SAndi Kleen struct hstate *h; 1524e5ff2159SAndi Kleen 1525e5ff2159SAndi Kleen for_each_hstate(h) { 15268faa8b07SAndi Kleen /* oversize hugepages were init'ed in early boot */ 1527bae7f4aeSLuiz Capitulino if (!hstate_is_gigantic(h)) 15288faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(h); 1529e5ff2159SAndi Kleen } 1530e5ff2159SAndi Kleen } 1531e5ff2159SAndi Kleen 15324abd32dbSAndi Kleen static char * __init memfmt(char *buf, unsigned long n) 15334abd32dbSAndi Kleen { 15344abd32dbSAndi Kleen if (n >= (1UL << 30)) 15354abd32dbSAndi Kleen sprintf(buf, "%lu GB", n >> 30); 15364abd32dbSAndi Kleen else if (n >= (1UL << 20)) 15374abd32dbSAndi Kleen sprintf(buf, "%lu MB", n >> 20); 15384abd32dbSAndi Kleen else 15394abd32dbSAndi Kleen sprintf(buf, "%lu KB", n >> 10); 15404abd32dbSAndi Kleen return buf; 15414abd32dbSAndi Kleen } 15424abd32dbSAndi Kleen 1543e5ff2159SAndi Kleen static void __init report_hugepages(void) 1544e5ff2159SAndi Kleen { 1545e5ff2159SAndi Kleen struct hstate *h; 1546e5ff2159SAndi Kleen 1547e5ff2159SAndi Kleen for_each_hstate(h) { 15484abd32dbSAndi Kleen char buf[32]; 1549ffb22af5SAndrew Morton pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", 15504abd32dbSAndi Kleen memfmt(buf, huge_page_size(h)), 15514abd32dbSAndi Kleen h->free_huge_pages); 1552e5ff2159SAndi Kleen } 1553e5ff2159SAndi Kleen } 1554e5ff2159SAndi Kleen 15551da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 15566ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count, 15576ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 15581da177e4SLinus Torvalds { 15594415cc8dSChristoph Lameter int i; 15604415cc8dSChristoph Lameter 1561bae7f4aeSLuiz Capitulino if (hstate_is_gigantic(h)) 1562aa888a74SAndi Kleen return; 1563aa888a74SAndi Kleen 15646ae11b27SLee Schermerhorn for_each_node_mask(i, *nodes_allowed) { 15651da177e4SLinus Torvalds struct page *page, *next; 1566a5516438SAndi Kleen struct list_head *freel = &h->hugepage_freelists[i]; 1567a5516438SAndi Kleen list_for_each_entry_safe(page, next, freel, lru) { 1568a5516438SAndi Kleen if (count >= h->nr_huge_pages) 15696b0c880dSAdam Litke return; 15701da177e4SLinus Torvalds if (PageHighMem(page)) 15711da177e4SLinus Torvalds continue; 15721da177e4SLinus Torvalds list_del(&page->lru); 1573e5ff2159SAndi Kleen update_and_free_page(h, page); 1574a5516438SAndi Kleen h->free_huge_pages--; 1575a5516438SAndi Kleen h->free_huge_pages_node[page_to_nid(page)]--; 15761da177e4SLinus Torvalds } 15771da177e4SLinus Torvalds } 15781da177e4SLinus Torvalds } 15791da177e4SLinus Torvalds #else 15806ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count, 15816ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 15821da177e4SLinus Torvalds { 15831da177e4SLinus Torvalds } 15841da177e4SLinus Torvalds #endif 15851da177e4SLinus Torvalds 158620a0307cSWu Fengguang /* 158720a0307cSWu Fengguang * Increment or decrement surplus_huge_pages. Keep node-specific counters 158820a0307cSWu Fengguang * balanced by operating on them in a round-robin fashion. 158920a0307cSWu Fengguang * Returns 1 if an adjustment was made. 159020a0307cSWu Fengguang */ 15916ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 15926ae11b27SLee Schermerhorn int delta) 159320a0307cSWu Fengguang { 1594b2261026SJoonsoo Kim int nr_nodes, node; 159520a0307cSWu Fengguang 159620a0307cSWu Fengguang VM_BUG_ON(delta != -1 && delta != 1); 159720a0307cSWu Fengguang 1598e8c5c824SLee Schermerhorn if (delta < 0) { 1599b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1600b2261026SJoonsoo Kim if (h->surplus_huge_pages_node[node]) 1601b2261026SJoonsoo Kim goto found; 1602b2261026SJoonsoo Kim } 1603b2261026SJoonsoo Kim } else { 1604b2261026SJoonsoo Kim for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 1605b2261026SJoonsoo Kim if (h->surplus_huge_pages_node[node] < 1606b2261026SJoonsoo Kim h->nr_huge_pages_node[node]) 1607b2261026SJoonsoo Kim goto found; 1608e8c5c824SLee Schermerhorn } 16099a76db09SLee Schermerhorn } 1610b2261026SJoonsoo Kim return 0; 161120a0307cSWu Fengguang 1612b2261026SJoonsoo Kim found: 161320a0307cSWu Fengguang h->surplus_huge_pages += delta; 1614b2261026SJoonsoo Kim h->surplus_huge_pages_node[node] += delta; 1615b2261026SJoonsoo Kim return 1; 161620a0307cSWu Fengguang } 161720a0307cSWu Fengguang 1618a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 16196ae11b27SLee Schermerhorn static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, 16206ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 16211da177e4SLinus Torvalds { 16227893d1d5SAdam Litke unsigned long min_count, ret; 16231da177e4SLinus Torvalds 1624944d9fecSLuiz Capitulino if (hstate_is_gigantic(h) && !gigantic_page_supported()) 1625aa888a74SAndi Kleen return h->max_huge_pages; 1626aa888a74SAndi Kleen 16277893d1d5SAdam Litke /* 16287893d1d5SAdam Litke * Increase the pool size 16297893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 16307893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 1631d1c3fb1fSNishanth Aravamudan * 1632d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 1633d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 1634d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 1635d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 1636d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 16377893d1d5SAdam Litke */ 16381da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 1639a5516438SAndi Kleen while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 16406ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, -1)) 16417893d1d5SAdam Litke break; 16427893d1d5SAdam Litke } 16437893d1d5SAdam Litke 1644a5516438SAndi Kleen while (count > persistent_huge_pages(h)) { 16457893d1d5SAdam Litke /* 16467893d1d5SAdam Litke * If this allocation races such that we no longer need the 16477893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 16487893d1d5SAdam Litke * and reducing the surplus. 16497893d1d5SAdam Litke */ 16507893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 1651944d9fecSLuiz Capitulino if (hstate_is_gigantic(h)) 1652944d9fecSLuiz Capitulino ret = alloc_fresh_gigantic_page(h, nodes_allowed); 1653944d9fecSLuiz Capitulino else 16546ae11b27SLee Schermerhorn ret = alloc_fresh_huge_page(h, nodes_allowed); 16557893d1d5SAdam Litke spin_lock(&hugetlb_lock); 16567893d1d5SAdam Litke if (!ret) 16577893d1d5SAdam Litke goto out; 16587893d1d5SAdam Litke 1659536240f2SMel Gorman /* Bail for signals. Probably ctrl-c from user */ 1660536240f2SMel Gorman if (signal_pending(current)) 1661536240f2SMel Gorman goto out; 16627893d1d5SAdam Litke } 16637893d1d5SAdam Litke 16647893d1d5SAdam Litke /* 16657893d1d5SAdam Litke * Decrease the pool size 16667893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 16677893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 16687893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 16697893d1d5SAdam Litke * to the desired size as pages become free. 1670d1c3fb1fSNishanth Aravamudan * 1671d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 1672d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 1673d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 1674d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 1675d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 1676d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 1677d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 16787893d1d5SAdam Litke */ 1679a5516438SAndi Kleen min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 16806b0c880dSAdam Litke min_count = max(count, min_count); 16816ae11b27SLee Schermerhorn try_to_free_low(h, min_count, nodes_allowed); 1682a5516438SAndi Kleen while (min_count < persistent_huge_pages(h)) { 16836ae11b27SLee Schermerhorn if (!free_pool_huge_page(h, nodes_allowed, 0)) 16841da177e4SLinus Torvalds break; 168555f67141SMizuma, Masayoshi cond_resched_lock(&hugetlb_lock); 16861da177e4SLinus Torvalds } 1687a5516438SAndi Kleen while (count < persistent_huge_pages(h)) { 16886ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, 1)) 16897893d1d5SAdam Litke break; 16907893d1d5SAdam Litke } 16917893d1d5SAdam Litke out: 1692a5516438SAndi Kleen ret = persistent_huge_pages(h); 16931da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 16947893d1d5SAdam Litke return ret; 16951da177e4SLinus Torvalds } 16961da177e4SLinus Torvalds 1697a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \ 1698a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 1699a3437870SNishanth Aravamudan 1700a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \ 1701a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = \ 1702a3437870SNishanth Aravamudan __ATTR(_name, 0644, _name##_show, _name##_store) 1703a3437870SNishanth Aravamudan 1704a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj; 1705a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1706a3437870SNishanth Aravamudan 17079a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 17089a305230SLee Schermerhorn 17099a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 1710a3437870SNishanth Aravamudan { 1711a3437870SNishanth Aravamudan int i; 17129a305230SLee Schermerhorn 1713a3437870SNishanth Aravamudan for (i = 0; i < HUGE_MAX_HSTATE; i++) 17149a305230SLee Schermerhorn if (hstate_kobjs[i] == kobj) { 17159a305230SLee Schermerhorn if (nidp) 17169a305230SLee Schermerhorn *nidp = NUMA_NO_NODE; 1717a3437870SNishanth Aravamudan return &hstates[i]; 17189a305230SLee Schermerhorn } 17199a305230SLee Schermerhorn 17209a305230SLee Schermerhorn return kobj_to_node_hstate(kobj, nidp); 1721a3437870SNishanth Aravamudan } 1722a3437870SNishanth Aravamudan 172306808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj, 1724a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1725a3437870SNishanth Aravamudan { 17269a305230SLee Schermerhorn struct hstate *h; 17279a305230SLee Schermerhorn unsigned long nr_huge_pages; 17289a305230SLee Schermerhorn int nid; 17299a305230SLee Schermerhorn 17309a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 17319a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 17329a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages; 17339a305230SLee Schermerhorn else 17349a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages_node[nid]; 17359a305230SLee Schermerhorn 17369a305230SLee Schermerhorn return sprintf(buf, "%lu\n", nr_huge_pages); 1737a3437870SNishanth Aravamudan } 1738adbe8726SEric B Munson 1739238d3c13SDavid Rientjes static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 1740238d3c13SDavid Rientjes struct hstate *h, int nid, 1741238d3c13SDavid Rientjes unsigned long count, size_t len) 1742a3437870SNishanth Aravamudan { 1743a3437870SNishanth Aravamudan int err; 1744bad44b5bSDavid Rientjes NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); 1745a3437870SNishanth Aravamudan 1746944d9fecSLuiz Capitulino if (hstate_is_gigantic(h) && !gigantic_page_supported()) { 1747adbe8726SEric B Munson err = -EINVAL; 1748adbe8726SEric B Munson goto out; 1749adbe8726SEric B Munson } 1750adbe8726SEric B Munson 17519a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) { 17529a305230SLee Schermerhorn /* 17539a305230SLee Schermerhorn * global hstate attribute 17549a305230SLee Schermerhorn */ 17559a305230SLee Schermerhorn if (!(obey_mempolicy && 17569a305230SLee Schermerhorn init_nodemask_of_mempolicy(nodes_allowed))) { 175706808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 17588cebfcd0SLai Jiangshan nodes_allowed = &node_states[N_MEMORY]; 175906808b08SLee Schermerhorn } 17609a305230SLee Schermerhorn } else if (nodes_allowed) { 17619a305230SLee Schermerhorn /* 17629a305230SLee Schermerhorn * per node hstate attribute: adjust count to global, 17639a305230SLee Schermerhorn * but restrict alloc/free to the specified node. 17649a305230SLee Schermerhorn */ 17659a305230SLee Schermerhorn count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 17669a305230SLee Schermerhorn init_nodemask_of_node(nodes_allowed, nid); 17679a305230SLee Schermerhorn } else 17688cebfcd0SLai Jiangshan nodes_allowed = &node_states[N_MEMORY]; 17699a305230SLee Schermerhorn 177006808b08SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); 1771a3437870SNishanth Aravamudan 17728cebfcd0SLai Jiangshan if (nodes_allowed != &node_states[N_MEMORY]) 177306808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 177406808b08SLee Schermerhorn 177506808b08SLee Schermerhorn return len; 1776adbe8726SEric B Munson out: 1777adbe8726SEric B Munson NODEMASK_FREE(nodes_allowed); 1778adbe8726SEric B Munson return err; 177906808b08SLee Schermerhorn } 178006808b08SLee Schermerhorn 1781238d3c13SDavid Rientjes static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 1782238d3c13SDavid Rientjes struct kobject *kobj, const char *buf, 1783238d3c13SDavid Rientjes size_t len) 1784238d3c13SDavid Rientjes { 1785238d3c13SDavid Rientjes struct hstate *h; 1786238d3c13SDavid Rientjes unsigned long count; 1787238d3c13SDavid Rientjes int nid; 1788238d3c13SDavid Rientjes int err; 1789238d3c13SDavid Rientjes 1790238d3c13SDavid Rientjes err = kstrtoul(buf, 10, &count); 1791238d3c13SDavid Rientjes if (err) 1792238d3c13SDavid Rientjes return err; 1793238d3c13SDavid Rientjes 1794238d3c13SDavid Rientjes h = kobj_to_hstate(kobj, &nid); 1795238d3c13SDavid Rientjes return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 1796238d3c13SDavid Rientjes } 1797238d3c13SDavid Rientjes 179806808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj, 179906808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf) 180006808b08SLee Schermerhorn { 180106808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf); 180206808b08SLee Schermerhorn } 180306808b08SLee Schermerhorn 180406808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj, 180506808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len) 180606808b08SLee Schermerhorn { 1807238d3c13SDavid Rientjes return nr_hugepages_store_common(false, kobj, buf, len); 1808a3437870SNishanth Aravamudan } 1809a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages); 1810a3437870SNishanth Aravamudan 181106808b08SLee Schermerhorn #ifdef CONFIG_NUMA 181206808b08SLee Schermerhorn 181306808b08SLee Schermerhorn /* 181406808b08SLee Schermerhorn * hstate attribute for optionally mempolicy-based constraint on persistent 181506808b08SLee Schermerhorn * huge page alloc/free. 181606808b08SLee Schermerhorn */ 181706808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 181806808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf) 181906808b08SLee Schermerhorn { 182006808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf); 182106808b08SLee Schermerhorn } 182206808b08SLee Schermerhorn 182306808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 182406808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len) 182506808b08SLee Schermerhorn { 1826238d3c13SDavid Rientjes return nr_hugepages_store_common(true, kobj, buf, len); 182706808b08SLee Schermerhorn } 182806808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy); 182906808b08SLee Schermerhorn #endif 183006808b08SLee Schermerhorn 183106808b08SLee Schermerhorn 1832a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 1833a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1834a3437870SNishanth Aravamudan { 18359a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1836a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 1837a3437870SNishanth Aravamudan } 1838adbe8726SEric B Munson 1839a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 1840a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1841a3437870SNishanth Aravamudan { 1842a3437870SNishanth Aravamudan int err; 1843a3437870SNishanth Aravamudan unsigned long input; 18449a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1845a3437870SNishanth Aravamudan 1846bae7f4aeSLuiz Capitulino if (hstate_is_gigantic(h)) 1847adbe8726SEric B Munson return -EINVAL; 1848adbe8726SEric B Munson 18493dbb95f7SJingoo Han err = kstrtoul(buf, 10, &input); 1850a3437870SNishanth Aravamudan if (err) 185173ae31e5SEric B Munson return err; 1852a3437870SNishanth Aravamudan 1853a3437870SNishanth Aravamudan spin_lock(&hugetlb_lock); 1854a3437870SNishanth Aravamudan h->nr_overcommit_huge_pages = input; 1855a3437870SNishanth Aravamudan spin_unlock(&hugetlb_lock); 1856a3437870SNishanth Aravamudan 1857a3437870SNishanth Aravamudan return count; 1858a3437870SNishanth Aravamudan } 1859a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages); 1860a3437870SNishanth Aravamudan 1861a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj, 1862a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1863a3437870SNishanth Aravamudan { 18649a305230SLee Schermerhorn struct hstate *h; 18659a305230SLee Schermerhorn unsigned long free_huge_pages; 18669a305230SLee Schermerhorn int nid; 18679a305230SLee Schermerhorn 18689a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 18699a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 18709a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages; 18719a305230SLee Schermerhorn else 18729a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages_node[nid]; 18739a305230SLee Schermerhorn 18749a305230SLee Schermerhorn return sprintf(buf, "%lu\n", free_huge_pages); 1875a3437870SNishanth Aravamudan } 1876a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages); 1877a3437870SNishanth Aravamudan 1878a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj, 1879a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1880a3437870SNishanth Aravamudan { 18819a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1882a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->resv_huge_pages); 1883a3437870SNishanth Aravamudan } 1884a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages); 1885a3437870SNishanth Aravamudan 1886a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj, 1887a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1888a3437870SNishanth Aravamudan { 18899a305230SLee Schermerhorn struct hstate *h; 18909a305230SLee Schermerhorn unsigned long surplus_huge_pages; 18919a305230SLee Schermerhorn int nid; 18929a305230SLee Schermerhorn 18939a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 18949a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 18959a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages; 18969a305230SLee Schermerhorn else 18979a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages_node[nid]; 18989a305230SLee Schermerhorn 18999a305230SLee Schermerhorn return sprintf(buf, "%lu\n", surplus_huge_pages); 1900a3437870SNishanth Aravamudan } 1901a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages); 1902a3437870SNishanth Aravamudan 1903a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = { 1904a3437870SNishanth Aravamudan &nr_hugepages_attr.attr, 1905a3437870SNishanth Aravamudan &nr_overcommit_hugepages_attr.attr, 1906a3437870SNishanth Aravamudan &free_hugepages_attr.attr, 1907a3437870SNishanth Aravamudan &resv_hugepages_attr.attr, 1908a3437870SNishanth Aravamudan &surplus_hugepages_attr.attr, 190906808b08SLee Schermerhorn #ifdef CONFIG_NUMA 191006808b08SLee Schermerhorn &nr_hugepages_mempolicy_attr.attr, 191106808b08SLee Schermerhorn #endif 1912a3437870SNishanth Aravamudan NULL, 1913a3437870SNishanth Aravamudan }; 1914a3437870SNishanth Aravamudan 1915a3437870SNishanth Aravamudan static struct attribute_group hstate_attr_group = { 1916a3437870SNishanth Aravamudan .attrs = hstate_attrs, 1917a3437870SNishanth Aravamudan }; 1918a3437870SNishanth Aravamudan 1919094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 19209a305230SLee Schermerhorn struct kobject **hstate_kobjs, 19219a305230SLee Schermerhorn struct attribute_group *hstate_attr_group) 1922a3437870SNishanth Aravamudan { 1923a3437870SNishanth Aravamudan int retval; 1924972dc4deSAneesh Kumar K.V int hi = hstate_index(h); 1925a3437870SNishanth Aravamudan 19269a305230SLee Schermerhorn hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 19279a305230SLee Schermerhorn if (!hstate_kobjs[hi]) 1928a3437870SNishanth Aravamudan return -ENOMEM; 1929a3437870SNishanth Aravamudan 19309a305230SLee Schermerhorn retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 1931a3437870SNishanth Aravamudan if (retval) 19329a305230SLee Schermerhorn kobject_put(hstate_kobjs[hi]); 1933a3437870SNishanth Aravamudan 1934a3437870SNishanth Aravamudan return retval; 1935a3437870SNishanth Aravamudan } 1936a3437870SNishanth Aravamudan 1937a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void) 1938a3437870SNishanth Aravamudan { 1939a3437870SNishanth Aravamudan struct hstate *h; 1940a3437870SNishanth Aravamudan int err; 1941a3437870SNishanth Aravamudan 1942a3437870SNishanth Aravamudan hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 1943a3437870SNishanth Aravamudan if (!hugepages_kobj) 1944a3437870SNishanth Aravamudan return; 1945a3437870SNishanth Aravamudan 1946a3437870SNishanth Aravamudan for_each_hstate(h) { 19479a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 19489a305230SLee Schermerhorn hstate_kobjs, &hstate_attr_group); 1949a3437870SNishanth Aravamudan if (err) 1950ffb22af5SAndrew Morton pr_err("Hugetlb: Unable to add hstate %s", h->name); 1951a3437870SNishanth Aravamudan } 1952a3437870SNishanth Aravamudan } 1953a3437870SNishanth Aravamudan 19549a305230SLee Schermerhorn #ifdef CONFIG_NUMA 19559a305230SLee Schermerhorn 19569a305230SLee Schermerhorn /* 19579a305230SLee Schermerhorn * node_hstate/s - associate per node hstate attributes, via their kobjects, 195810fbcf4cSKay Sievers * with node devices in node_devices[] using a parallel array. The array 195910fbcf4cSKay Sievers * index of a node device or _hstate == node id. 196010fbcf4cSKay Sievers * This is here to avoid any static dependency of the node device driver, in 19619a305230SLee Schermerhorn * the base kernel, on the hugetlb module. 19629a305230SLee Schermerhorn */ 19639a305230SLee Schermerhorn struct node_hstate { 19649a305230SLee Schermerhorn struct kobject *hugepages_kobj; 19659a305230SLee Schermerhorn struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 19669a305230SLee Schermerhorn }; 19679a305230SLee Schermerhorn struct node_hstate node_hstates[MAX_NUMNODES]; 19689a305230SLee Schermerhorn 19699a305230SLee Schermerhorn /* 197010fbcf4cSKay Sievers * A subset of global hstate attributes for node devices 19719a305230SLee Schermerhorn */ 19729a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = { 19739a305230SLee Schermerhorn &nr_hugepages_attr.attr, 19749a305230SLee Schermerhorn &free_hugepages_attr.attr, 19759a305230SLee Schermerhorn &surplus_hugepages_attr.attr, 19769a305230SLee Schermerhorn NULL, 19779a305230SLee Schermerhorn }; 19789a305230SLee Schermerhorn 19799a305230SLee Schermerhorn static struct attribute_group per_node_hstate_attr_group = { 19809a305230SLee Schermerhorn .attrs = per_node_hstate_attrs, 19819a305230SLee Schermerhorn }; 19829a305230SLee Schermerhorn 19839a305230SLee Schermerhorn /* 198410fbcf4cSKay Sievers * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 19859a305230SLee Schermerhorn * Returns node id via non-NULL nidp. 19869a305230SLee Schermerhorn */ 19879a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 19889a305230SLee Schermerhorn { 19899a305230SLee Schermerhorn int nid; 19909a305230SLee Schermerhorn 19919a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) { 19929a305230SLee Schermerhorn struct node_hstate *nhs = &node_hstates[nid]; 19939a305230SLee Schermerhorn int i; 19949a305230SLee Schermerhorn for (i = 0; i < HUGE_MAX_HSTATE; i++) 19959a305230SLee Schermerhorn if (nhs->hstate_kobjs[i] == kobj) { 19969a305230SLee Schermerhorn if (nidp) 19979a305230SLee Schermerhorn *nidp = nid; 19989a305230SLee Schermerhorn return &hstates[i]; 19999a305230SLee Schermerhorn } 20009a305230SLee Schermerhorn } 20019a305230SLee Schermerhorn 20029a305230SLee Schermerhorn BUG(); 20039a305230SLee Schermerhorn return NULL; 20049a305230SLee Schermerhorn } 20059a305230SLee Schermerhorn 20069a305230SLee Schermerhorn /* 200710fbcf4cSKay Sievers * Unregister hstate attributes from a single node device. 20089a305230SLee Schermerhorn * No-op if no hstate attributes attached. 20099a305230SLee Schermerhorn */ 20103cd8b44fSClaudiu Ghioc static void hugetlb_unregister_node(struct node *node) 20119a305230SLee Schermerhorn { 20129a305230SLee Schermerhorn struct hstate *h; 201310fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id]; 20149a305230SLee Schermerhorn 20159a305230SLee Schermerhorn if (!nhs->hugepages_kobj) 20169b5e5d0fSLee Schermerhorn return; /* no hstate attributes */ 20179a305230SLee Schermerhorn 2018972dc4deSAneesh Kumar K.V for_each_hstate(h) { 2019972dc4deSAneesh Kumar K.V int idx = hstate_index(h); 2020972dc4deSAneesh Kumar K.V if (nhs->hstate_kobjs[idx]) { 2021972dc4deSAneesh Kumar K.V kobject_put(nhs->hstate_kobjs[idx]); 2022972dc4deSAneesh Kumar K.V nhs->hstate_kobjs[idx] = NULL; 2023972dc4deSAneesh Kumar K.V } 20249a305230SLee Schermerhorn } 20259a305230SLee Schermerhorn 20269a305230SLee Schermerhorn kobject_put(nhs->hugepages_kobj); 20279a305230SLee Schermerhorn nhs->hugepages_kobj = NULL; 20289a305230SLee Schermerhorn } 20299a305230SLee Schermerhorn 20309a305230SLee Schermerhorn /* 203110fbcf4cSKay Sievers * hugetlb module exit: unregister hstate attributes from node devices 20329a305230SLee Schermerhorn * that have them. 20339a305230SLee Schermerhorn */ 20349a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) 20359a305230SLee Schermerhorn { 20369a305230SLee Schermerhorn int nid; 20379a305230SLee Schermerhorn 20389a305230SLee Schermerhorn /* 203910fbcf4cSKay Sievers * disable node device registrations. 20409a305230SLee Schermerhorn */ 20419a305230SLee Schermerhorn register_hugetlbfs_with_node(NULL, NULL); 20429a305230SLee Schermerhorn 20439a305230SLee Schermerhorn /* 20449a305230SLee Schermerhorn * remove hstate attributes from any nodes that have them. 20459a305230SLee Schermerhorn */ 20469a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) 20478732794bSWen Congyang hugetlb_unregister_node(node_devices[nid]); 20489a305230SLee Schermerhorn } 20499a305230SLee Schermerhorn 20509a305230SLee Schermerhorn /* 205110fbcf4cSKay Sievers * Register hstate attributes for a single node device. 20529a305230SLee Schermerhorn * No-op if attributes already registered. 20539a305230SLee Schermerhorn */ 20543cd8b44fSClaudiu Ghioc static void hugetlb_register_node(struct node *node) 20559a305230SLee Schermerhorn { 20569a305230SLee Schermerhorn struct hstate *h; 205710fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id]; 20589a305230SLee Schermerhorn int err; 20599a305230SLee Schermerhorn 20609a305230SLee Schermerhorn if (nhs->hugepages_kobj) 20619a305230SLee Schermerhorn return; /* already allocated */ 20629a305230SLee Schermerhorn 20639a305230SLee Schermerhorn nhs->hugepages_kobj = kobject_create_and_add("hugepages", 206410fbcf4cSKay Sievers &node->dev.kobj); 20659a305230SLee Schermerhorn if (!nhs->hugepages_kobj) 20669a305230SLee Schermerhorn return; 20679a305230SLee Schermerhorn 20689a305230SLee Schermerhorn for_each_hstate(h) { 20699a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 20709a305230SLee Schermerhorn nhs->hstate_kobjs, 20719a305230SLee Schermerhorn &per_node_hstate_attr_group); 20729a305230SLee Schermerhorn if (err) { 2073ffb22af5SAndrew Morton pr_err("Hugetlb: Unable to add hstate %s for node %d\n", 207410fbcf4cSKay Sievers h->name, node->dev.id); 20759a305230SLee Schermerhorn hugetlb_unregister_node(node); 20769a305230SLee Schermerhorn break; 20779a305230SLee Schermerhorn } 20789a305230SLee Schermerhorn } 20799a305230SLee Schermerhorn } 20809a305230SLee Schermerhorn 20819a305230SLee Schermerhorn /* 20829b5e5d0fSLee Schermerhorn * hugetlb init time: register hstate attributes for all registered node 208310fbcf4cSKay Sievers * devices of nodes that have memory. All on-line nodes should have 208410fbcf4cSKay Sievers * registered their associated device by this time. 20859a305230SLee Schermerhorn */ 20867d9ca000SLuiz Capitulino static void __init hugetlb_register_all_nodes(void) 20879a305230SLee Schermerhorn { 20889a305230SLee Schermerhorn int nid; 20899a305230SLee Schermerhorn 20908cebfcd0SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 20918732794bSWen Congyang struct node *node = node_devices[nid]; 209210fbcf4cSKay Sievers if (node->dev.id == nid) 20939a305230SLee Schermerhorn hugetlb_register_node(node); 20949a305230SLee Schermerhorn } 20959a305230SLee Schermerhorn 20969a305230SLee Schermerhorn /* 209710fbcf4cSKay Sievers * Let the node device driver know we're here so it can 20989a305230SLee Schermerhorn * [un]register hstate attributes on node hotplug. 20999a305230SLee Schermerhorn */ 21009a305230SLee Schermerhorn register_hugetlbfs_with_node(hugetlb_register_node, 21019a305230SLee Schermerhorn hugetlb_unregister_node); 21029a305230SLee Schermerhorn } 21039a305230SLee Schermerhorn #else /* !CONFIG_NUMA */ 21049a305230SLee Schermerhorn 21059a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 21069a305230SLee Schermerhorn { 21079a305230SLee Schermerhorn BUG(); 21089a305230SLee Schermerhorn if (nidp) 21099a305230SLee Schermerhorn *nidp = -1; 21109a305230SLee Schermerhorn return NULL; 21119a305230SLee Schermerhorn } 21129a305230SLee Schermerhorn 21139a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) { } 21149a305230SLee Schermerhorn 21159a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { } 21169a305230SLee Schermerhorn 21179a305230SLee Schermerhorn #endif 21189a305230SLee Schermerhorn 2119a3437870SNishanth Aravamudan static void __exit hugetlb_exit(void) 2120a3437870SNishanth Aravamudan { 2121a3437870SNishanth Aravamudan struct hstate *h; 2122a3437870SNishanth Aravamudan 21239a305230SLee Schermerhorn hugetlb_unregister_all_nodes(); 21249a305230SLee Schermerhorn 2125a3437870SNishanth Aravamudan for_each_hstate(h) { 2126972dc4deSAneesh Kumar K.V kobject_put(hstate_kobjs[hstate_index(h)]); 2127a3437870SNishanth Aravamudan } 2128a3437870SNishanth Aravamudan 2129a3437870SNishanth Aravamudan kobject_put(hugepages_kobj); 21308382d914SDavidlohr Bueso kfree(htlb_fault_mutex_table); 2131a3437870SNishanth Aravamudan } 2132a3437870SNishanth Aravamudan module_exit(hugetlb_exit); 2133a3437870SNishanth Aravamudan 2134a3437870SNishanth Aravamudan static int __init hugetlb_init(void) 2135a3437870SNishanth Aravamudan { 21368382d914SDavidlohr Bueso int i; 21378382d914SDavidlohr Bueso 2138457c1b27SNishanth Aravamudan if (!hugepages_supported()) 21390ef89d25SBenjamin Herrenschmidt return 0; 2140a3437870SNishanth Aravamudan 2141e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) { 2142e11bfbfcSNick Piggin default_hstate_size = HPAGE_SIZE; 2143e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) 2144a3437870SNishanth Aravamudan hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 2145a3437870SNishanth Aravamudan } 2146972dc4deSAneesh Kumar K.V default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 2147e11bfbfcSNick Piggin if (default_hstate_max_huge_pages) 2148e11bfbfcSNick Piggin default_hstate.max_huge_pages = default_hstate_max_huge_pages; 2149a3437870SNishanth Aravamudan 2150a3437870SNishanth Aravamudan hugetlb_init_hstates(); 2151aa888a74SAndi Kleen gather_bootmem_prealloc(); 2152a3437870SNishanth Aravamudan report_hugepages(); 2153a3437870SNishanth Aravamudan 2154a3437870SNishanth Aravamudan hugetlb_sysfs_init(); 21559a305230SLee Schermerhorn hugetlb_register_all_nodes(); 21567179e7bfSJianguo Wu hugetlb_cgroup_file_init(); 21579a305230SLee Schermerhorn 21588382d914SDavidlohr Bueso #ifdef CONFIG_SMP 21598382d914SDavidlohr Bueso num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 21608382d914SDavidlohr Bueso #else 21618382d914SDavidlohr Bueso num_fault_mutexes = 1; 21628382d914SDavidlohr Bueso #endif 21638382d914SDavidlohr Bueso htlb_fault_mutex_table = 21648382d914SDavidlohr Bueso kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL); 21658382d914SDavidlohr Bueso BUG_ON(!htlb_fault_mutex_table); 21668382d914SDavidlohr Bueso 21678382d914SDavidlohr Bueso for (i = 0; i < num_fault_mutexes; i++) 21688382d914SDavidlohr Bueso mutex_init(&htlb_fault_mutex_table[i]); 2169a3437870SNishanth Aravamudan return 0; 2170a3437870SNishanth Aravamudan } 2171a3437870SNishanth Aravamudan module_init(hugetlb_init); 2172a3437870SNishanth Aravamudan 2173a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */ 2174a3437870SNishanth Aravamudan void __init hugetlb_add_hstate(unsigned order) 2175a3437870SNishanth Aravamudan { 2176a3437870SNishanth Aravamudan struct hstate *h; 21778faa8b07SAndi Kleen unsigned long i; 21788faa8b07SAndi Kleen 2179a3437870SNishanth Aravamudan if (size_to_hstate(PAGE_SIZE << order)) { 2180ffb22af5SAndrew Morton pr_warning("hugepagesz= specified twice, ignoring\n"); 2181a3437870SNishanth Aravamudan return; 2182a3437870SNishanth Aravamudan } 218347d38344SAneesh Kumar K.V BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 2184a3437870SNishanth Aravamudan BUG_ON(order == 0); 218547d38344SAneesh Kumar K.V h = &hstates[hugetlb_max_hstate++]; 2186a3437870SNishanth Aravamudan h->order = order; 2187a3437870SNishanth Aravamudan h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 21888faa8b07SAndi Kleen h->nr_huge_pages = 0; 21898faa8b07SAndi Kleen h->free_huge_pages = 0; 21908faa8b07SAndi Kleen for (i = 0; i < MAX_NUMNODES; ++i) 21918faa8b07SAndi Kleen INIT_LIST_HEAD(&h->hugepage_freelists[i]); 21920edaecfaSAneesh Kumar K.V INIT_LIST_HEAD(&h->hugepage_activelist); 21938cebfcd0SLai Jiangshan h->next_nid_to_alloc = first_node(node_states[N_MEMORY]); 21948cebfcd0SLai Jiangshan h->next_nid_to_free = first_node(node_states[N_MEMORY]); 2195a3437870SNishanth Aravamudan snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 2196a3437870SNishanth Aravamudan huge_page_size(h)/1024); 21978faa8b07SAndi Kleen 2198a3437870SNishanth Aravamudan parsed_hstate = h; 2199a3437870SNishanth Aravamudan } 2200a3437870SNishanth Aravamudan 2201e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s) 2202a3437870SNishanth Aravamudan { 2203a3437870SNishanth Aravamudan unsigned long *mhp; 22048faa8b07SAndi Kleen static unsigned long *last_mhp; 2205a3437870SNishanth Aravamudan 2206a3437870SNishanth Aravamudan /* 220747d38344SAneesh Kumar K.V * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, 2208a3437870SNishanth Aravamudan * so this hugepages= parameter goes to the "default hstate". 2209a3437870SNishanth Aravamudan */ 221047d38344SAneesh Kumar K.V if (!hugetlb_max_hstate) 2211a3437870SNishanth Aravamudan mhp = &default_hstate_max_huge_pages; 2212a3437870SNishanth Aravamudan else 2213a3437870SNishanth Aravamudan mhp = &parsed_hstate->max_huge_pages; 2214a3437870SNishanth Aravamudan 22158faa8b07SAndi Kleen if (mhp == last_mhp) { 2216ffb22af5SAndrew Morton pr_warning("hugepages= specified twice without " 22178faa8b07SAndi Kleen "interleaving hugepagesz=, ignoring\n"); 22188faa8b07SAndi Kleen return 1; 22198faa8b07SAndi Kleen } 22208faa8b07SAndi Kleen 2221a3437870SNishanth Aravamudan if (sscanf(s, "%lu", mhp) <= 0) 2222a3437870SNishanth Aravamudan *mhp = 0; 2223a3437870SNishanth Aravamudan 22248faa8b07SAndi Kleen /* 22258faa8b07SAndi Kleen * Global state is always initialized later in hugetlb_init. 22268faa8b07SAndi Kleen * But we need to allocate >= MAX_ORDER hstates here early to still 22278faa8b07SAndi Kleen * use the bootmem allocator. 22288faa8b07SAndi Kleen */ 222947d38344SAneesh Kumar K.V if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) 22308faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(parsed_hstate); 22318faa8b07SAndi Kleen 22328faa8b07SAndi Kleen last_mhp = mhp; 22338faa8b07SAndi Kleen 2234a3437870SNishanth Aravamudan return 1; 2235a3437870SNishanth Aravamudan } 2236e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup); 2237e11bfbfcSNick Piggin 2238e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s) 2239e11bfbfcSNick Piggin { 2240e11bfbfcSNick Piggin default_hstate_size = memparse(s, &s); 2241e11bfbfcSNick Piggin return 1; 2242e11bfbfcSNick Piggin } 2243e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup); 2244a3437870SNishanth Aravamudan 22458a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array) 22468a213460SNishanth Aravamudan { 22478a213460SNishanth Aravamudan int node; 22488a213460SNishanth Aravamudan unsigned int nr = 0; 22498a213460SNishanth Aravamudan 22508a213460SNishanth Aravamudan for_each_node_mask(node, cpuset_current_mems_allowed) 22518a213460SNishanth Aravamudan nr += array[node]; 22528a213460SNishanth Aravamudan 22538a213460SNishanth Aravamudan return nr; 22548a213460SNishanth Aravamudan } 22558a213460SNishanth Aravamudan 22568a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL 225706808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 225806808b08SLee Schermerhorn struct ctl_table *table, int write, 225906808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 22601da177e4SLinus Torvalds { 2261e5ff2159SAndi Kleen struct hstate *h = &default_hstate; 2262238d3c13SDavid Rientjes unsigned long tmp = h->max_huge_pages; 226308d4a246SMichal Hocko int ret; 2264e5ff2159SAndi Kleen 2265457c1b27SNishanth Aravamudan if (!hugepages_supported()) 2266457c1b27SNishanth Aravamudan return -ENOTSUPP; 2267457c1b27SNishanth Aravamudan 2268e5ff2159SAndi Kleen table->data = &tmp; 2269e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 227008d4a246SMichal Hocko ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 227108d4a246SMichal Hocko if (ret) 227208d4a246SMichal Hocko goto out; 2273e5ff2159SAndi Kleen 2274238d3c13SDavid Rientjes if (write) 2275238d3c13SDavid Rientjes ret = __nr_hugepages_store_common(obey_mempolicy, h, 2276238d3c13SDavid Rientjes NUMA_NO_NODE, tmp, *length); 227708d4a246SMichal Hocko out: 227808d4a246SMichal Hocko return ret; 22791da177e4SLinus Torvalds } 2280396faf03SMel Gorman 228106808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write, 228206808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 228306808b08SLee Schermerhorn { 228406808b08SLee Schermerhorn 228506808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(false, table, write, 228606808b08SLee Schermerhorn buffer, length, ppos); 228706808b08SLee Schermerhorn } 228806808b08SLee Schermerhorn 228906808b08SLee Schermerhorn #ifdef CONFIG_NUMA 229006808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 229106808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 229206808b08SLee Schermerhorn { 229306808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(true, table, write, 229406808b08SLee Schermerhorn buffer, length, ppos); 229506808b08SLee Schermerhorn } 229606808b08SLee Schermerhorn #endif /* CONFIG_NUMA */ 229706808b08SLee Schermerhorn 2298a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 22998d65af78SAlexey Dobriyan void __user *buffer, 2300a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 2301a3d0c6aaSNishanth Aravamudan { 2302a5516438SAndi Kleen struct hstate *h = &default_hstate; 2303e5ff2159SAndi Kleen unsigned long tmp; 230408d4a246SMichal Hocko int ret; 2305e5ff2159SAndi Kleen 2306457c1b27SNishanth Aravamudan if (!hugepages_supported()) 2307457c1b27SNishanth Aravamudan return -ENOTSUPP; 2308457c1b27SNishanth Aravamudan 2309e5ff2159SAndi Kleen tmp = h->nr_overcommit_huge_pages; 2310e5ff2159SAndi Kleen 2311bae7f4aeSLuiz Capitulino if (write && hstate_is_gigantic(h)) 2312adbe8726SEric B Munson return -EINVAL; 2313adbe8726SEric B Munson 2314e5ff2159SAndi Kleen table->data = &tmp; 2315e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 231608d4a246SMichal Hocko ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 231708d4a246SMichal Hocko if (ret) 231808d4a246SMichal Hocko goto out; 2319e5ff2159SAndi Kleen 2320e5ff2159SAndi Kleen if (write) { 2321064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 2322e5ff2159SAndi Kleen h->nr_overcommit_huge_pages = tmp; 2323a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 2324e5ff2159SAndi Kleen } 232508d4a246SMichal Hocko out: 232608d4a246SMichal Hocko return ret; 2327a3d0c6aaSNishanth Aravamudan } 2328a3d0c6aaSNishanth Aravamudan 23291da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 23301da177e4SLinus Torvalds 2331e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m) 23321da177e4SLinus Torvalds { 2333a5516438SAndi Kleen struct hstate *h = &default_hstate; 2334457c1b27SNishanth Aravamudan if (!hugepages_supported()) 2335457c1b27SNishanth Aravamudan return; 2336e1759c21SAlexey Dobriyan seq_printf(m, 23371da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 23381da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 2339b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 23407893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 23414f98a2feSRik van Riel "Hugepagesize: %8lu kB\n", 2342a5516438SAndi Kleen h->nr_huge_pages, 2343a5516438SAndi Kleen h->free_huge_pages, 2344a5516438SAndi Kleen h->resv_huge_pages, 2345a5516438SAndi Kleen h->surplus_huge_pages, 2346a5516438SAndi Kleen 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 23471da177e4SLinus Torvalds } 23481da177e4SLinus Torvalds 23491da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 23501da177e4SLinus Torvalds { 2351a5516438SAndi Kleen struct hstate *h = &default_hstate; 2352457c1b27SNishanth Aravamudan if (!hugepages_supported()) 2353457c1b27SNishanth Aravamudan return 0; 23541da177e4SLinus Torvalds return sprintf(buf, 23551da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 2356a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 2357a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 2358a5516438SAndi Kleen nid, h->nr_huge_pages_node[nid], 2359a5516438SAndi Kleen nid, h->free_huge_pages_node[nid], 2360a5516438SAndi Kleen nid, h->surplus_huge_pages_node[nid]); 23611da177e4SLinus Torvalds } 23621da177e4SLinus Torvalds 2363949f7ec5SDavid Rientjes void hugetlb_show_meminfo(void) 2364949f7ec5SDavid Rientjes { 2365949f7ec5SDavid Rientjes struct hstate *h; 2366949f7ec5SDavid Rientjes int nid; 2367949f7ec5SDavid Rientjes 2368457c1b27SNishanth Aravamudan if (!hugepages_supported()) 2369457c1b27SNishanth Aravamudan return; 2370457c1b27SNishanth Aravamudan 2371949f7ec5SDavid Rientjes for_each_node_state(nid, N_MEMORY) 2372949f7ec5SDavid Rientjes for_each_hstate(h) 2373949f7ec5SDavid Rientjes pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 2374949f7ec5SDavid Rientjes nid, 2375949f7ec5SDavid Rientjes h->nr_huge_pages_node[nid], 2376949f7ec5SDavid Rientjes h->free_huge_pages_node[nid], 2377949f7ec5SDavid Rientjes h->surplus_huge_pages_node[nid], 2378949f7ec5SDavid Rientjes 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 2379949f7ec5SDavid Rientjes } 2380949f7ec5SDavid Rientjes 23811da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 23821da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 23831da177e4SLinus Torvalds { 2384d0028588SWanpeng Li struct hstate *h; 2385d0028588SWanpeng Li unsigned long nr_total_pages = 0; 2386d0028588SWanpeng Li 2387d0028588SWanpeng Li for_each_hstate(h) 2388d0028588SWanpeng Li nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 2389d0028588SWanpeng Li return nr_total_pages; 23901da177e4SLinus Torvalds } 23911da177e4SLinus Torvalds 2392a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta) 2393fc1b8a73SMel Gorman { 2394fc1b8a73SMel Gorman int ret = -ENOMEM; 2395fc1b8a73SMel Gorman 2396fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 2397fc1b8a73SMel Gorman /* 2398fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 2399fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 2400fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 2401fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 2402fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 2403fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 2404fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 2405fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 2406fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 2407fc1b8a73SMel Gorman * 2408fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 2409fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 2410fc1b8a73SMel Gorman * we fall back to check against current free page availability as 2411fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 2412fc1b8a73SMel Gorman * semantics that cpuset has. 2413fc1b8a73SMel Gorman */ 2414fc1b8a73SMel Gorman if (delta > 0) { 2415a5516438SAndi Kleen if (gather_surplus_pages(h, delta) < 0) 2416fc1b8a73SMel Gorman goto out; 2417fc1b8a73SMel Gorman 2418a5516438SAndi Kleen if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 2419a5516438SAndi Kleen return_unused_surplus_pages(h, delta); 2420fc1b8a73SMel Gorman goto out; 2421fc1b8a73SMel Gorman } 2422fc1b8a73SMel Gorman } 2423fc1b8a73SMel Gorman 2424fc1b8a73SMel Gorman ret = 0; 2425fc1b8a73SMel Gorman if (delta < 0) 2426a5516438SAndi Kleen return_unused_surplus_pages(h, (unsigned long) -delta); 2427fc1b8a73SMel Gorman 2428fc1b8a73SMel Gorman out: 2429fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 2430fc1b8a73SMel Gorman return ret; 2431fc1b8a73SMel Gorman } 2432fc1b8a73SMel Gorman 243384afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma) 243484afd99bSAndy Whitcroft { 2435f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 243684afd99bSAndy Whitcroft 243784afd99bSAndy Whitcroft /* 243884afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present. 243984afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where 244084afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA 244125985edcSLucas De Marchi * has a reference to the reservation map it cannot disappear until 244284afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a 244384afd99bSAndy Whitcroft * new reference here without additional locking. 244484afd99bSAndy Whitcroft */ 24454e35f483SJoonsoo Kim if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 2446f522c3acSJoonsoo Kim kref_get(&resv->refs); 244784afd99bSAndy Whitcroft } 244884afd99bSAndy Whitcroft 2449a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 2450a1e78772SMel Gorman { 2451a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2452f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma); 245390481622SDavid Gibson struct hugepage_subpool *spool = subpool_vma(vma); 24544e35f483SJoonsoo Kim unsigned long reserve, start, end; 245584afd99bSAndy Whitcroft 24564e35f483SJoonsoo Kim if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 24574e35f483SJoonsoo Kim return; 24584e35f483SJoonsoo Kim 2459a5516438SAndi Kleen start = vma_hugecache_offset(h, vma, vma->vm_start); 2460a5516438SAndi Kleen end = vma_hugecache_offset(h, vma, vma->vm_end); 246184afd99bSAndy Whitcroft 24624e35f483SJoonsoo Kim reserve = (end - start) - region_count(resv, start, end); 246384afd99bSAndy Whitcroft 2464f031dd27SJoonsoo Kim kref_put(&resv->refs, resv_map_release); 246584afd99bSAndy Whitcroft 24667251ff78SAdam Litke if (reserve) { 2467a5516438SAndi Kleen hugetlb_acct_memory(h, -reserve); 246890481622SDavid Gibson hugepage_subpool_put_pages(spool, reserve); 24697251ff78SAdam Litke } 2470a1e78772SMel Gorman } 2471a1e78772SMel Gorman 24721da177e4SLinus Torvalds /* 24731da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 24741da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 24751da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 24761da177e4SLinus Torvalds * this far. 24771da177e4SLinus Torvalds */ 2478d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 24791da177e4SLinus Torvalds { 24801da177e4SLinus Torvalds BUG(); 2481d0217ac0SNick Piggin return 0; 24821da177e4SLinus Torvalds } 24831da177e4SLinus Torvalds 2484f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = { 2485d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 248684afd99bSAndy Whitcroft .open = hugetlb_vm_op_open, 2487a1e78772SMel Gorman .close = hugetlb_vm_op_close, 24881da177e4SLinus Torvalds }; 24891da177e4SLinus Torvalds 24901e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 24911e8f889bSDavid Gibson int writable) 249263551ae0SDavid Gibson { 249363551ae0SDavid Gibson pte_t entry; 249463551ae0SDavid Gibson 24951e8f889bSDavid Gibson if (writable) { 2496106c992aSGerald Schaefer entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 2497106c992aSGerald Schaefer vma->vm_page_prot))); 249863551ae0SDavid Gibson } else { 2499106c992aSGerald Schaefer entry = huge_pte_wrprotect(mk_huge_pte(page, 2500106c992aSGerald Schaefer vma->vm_page_prot)); 250163551ae0SDavid Gibson } 250263551ae0SDavid Gibson entry = pte_mkyoung(entry); 250363551ae0SDavid Gibson entry = pte_mkhuge(entry); 2504d9ed9faaSChris Metcalf entry = arch_make_huge_pte(entry, vma, page, writable); 250563551ae0SDavid Gibson 250663551ae0SDavid Gibson return entry; 250763551ae0SDavid Gibson } 250863551ae0SDavid Gibson 25091e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 25101e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 25111e8f889bSDavid Gibson { 25121e8f889bSDavid Gibson pte_t entry; 25131e8f889bSDavid Gibson 2514106c992aSGerald Schaefer entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 251532f84528SChris Forbes if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 25164b3073e1SRussell King update_mmu_cache(vma, address, ptep); 25171e8f889bSDavid Gibson } 25181e8f889bSDavid Gibson 25194a705fefSNaoya Horiguchi static int is_hugetlb_entry_migration(pte_t pte) 25204a705fefSNaoya Horiguchi { 25214a705fefSNaoya Horiguchi swp_entry_t swp; 25224a705fefSNaoya Horiguchi 25234a705fefSNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte)) 25244a705fefSNaoya Horiguchi return 0; 25254a705fefSNaoya Horiguchi swp = pte_to_swp_entry(pte); 25264a705fefSNaoya Horiguchi if (non_swap_entry(swp) && is_migration_entry(swp)) 25274a705fefSNaoya Horiguchi return 1; 25284a705fefSNaoya Horiguchi else 25294a705fefSNaoya Horiguchi return 0; 25304a705fefSNaoya Horiguchi } 25314a705fefSNaoya Horiguchi 25324a705fefSNaoya Horiguchi static int is_hugetlb_entry_hwpoisoned(pte_t pte) 25334a705fefSNaoya Horiguchi { 25344a705fefSNaoya Horiguchi swp_entry_t swp; 25354a705fefSNaoya Horiguchi 25364a705fefSNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte)) 25374a705fefSNaoya Horiguchi return 0; 25384a705fefSNaoya Horiguchi swp = pte_to_swp_entry(pte); 25394a705fefSNaoya Horiguchi if (non_swap_entry(swp) && is_hwpoison_entry(swp)) 25404a705fefSNaoya Horiguchi return 1; 25414a705fefSNaoya Horiguchi else 25424a705fefSNaoya Horiguchi return 0; 25434a705fefSNaoya Horiguchi } 25441e8f889bSDavid Gibson 254563551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 254663551ae0SDavid Gibson struct vm_area_struct *vma) 254763551ae0SDavid Gibson { 254863551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 254963551ae0SDavid Gibson struct page *ptepage; 25501c59827dSHugh Dickins unsigned long addr; 25511e8f889bSDavid Gibson int cow; 2552a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2553a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 2554e8569dd2SAndreas Sandberg unsigned long mmun_start; /* For mmu_notifiers */ 2555e8569dd2SAndreas Sandberg unsigned long mmun_end; /* For mmu_notifiers */ 2556e8569dd2SAndreas Sandberg int ret = 0; 25571e8f889bSDavid Gibson 25581e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 255963551ae0SDavid Gibson 2560e8569dd2SAndreas Sandberg mmun_start = vma->vm_start; 2561e8569dd2SAndreas Sandberg mmun_end = vma->vm_end; 2562e8569dd2SAndreas Sandberg if (cow) 2563e8569dd2SAndreas Sandberg mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end); 2564e8569dd2SAndreas Sandberg 2565a5516438SAndi Kleen for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 2566cb900f41SKirill A. Shutemov spinlock_t *src_ptl, *dst_ptl; 2567c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 2568c74df32cSHugh Dickins if (!src_pte) 2569c74df32cSHugh Dickins continue; 2570a5516438SAndi Kleen dst_pte = huge_pte_alloc(dst, addr, sz); 2571e8569dd2SAndreas Sandberg if (!dst_pte) { 2572e8569dd2SAndreas Sandberg ret = -ENOMEM; 2573e8569dd2SAndreas Sandberg break; 2574e8569dd2SAndreas Sandberg } 2575c5c99429SLarry Woodman 2576c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 2577c5c99429SLarry Woodman if (dst_pte == src_pte) 2578c5c99429SLarry Woodman continue; 2579c5c99429SLarry Woodman 2580cb900f41SKirill A. Shutemov dst_ptl = huge_pte_lock(h, dst, dst_pte); 2581cb900f41SKirill A. Shutemov src_ptl = huge_pte_lockptr(h, src, src_pte); 2582cb900f41SKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 25834a705fefSNaoya Horiguchi entry = huge_ptep_get(src_pte); 25844a705fefSNaoya Horiguchi if (huge_pte_none(entry)) { /* skip none entry */ 25854a705fefSNaoya Horiguchi ; 25864a705fefSNaoya Horiguchi } else if (unlikely(is_hugetlb_entry_migration(entry) || 25874a705fefSNaoya Horiguchi is_hugetlb_entry_hwpoisoned(entry))) { 25884a705fefSNaoya Horiguchi swp_entry_t swp_entry = pte_to_swp_entry(entry); 25894a705fefSNaoya Horiguchi 25904a705fefSNaoya Horiguchi if (is_write_migration_entry(swp_entry) && cow) { 25914a705fefSNaoya Horiguchi /* 25924a705fefSNaoya Horiguchi * COW mappings require pages in both 25934a705fefSNaoya Horiguchi * parent and child to be set to read. 25944a705fefSNaoya Horiguchi */ 25954a705fefSNaoya Horiguchi make_migration_entry_read(&swp_entry); 25964a705fefSNaoya Horiguchi entry = swp_entry_to_pte(swp_entry); 25974a705fefSNaoya Horiguchi set_huge_pte_at(src, addr, src_pte, entry); 25984a705fefSNaoya Horiguchi } 25994a705fefSNaoya Horiguchi set_huge_pte_at(dst, addr, dst_pte, entry); 26004a705fefSNaoya Horiguchi } else { 260134ee645eSJoerg Roedel if (cow) { 26027f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 260334ee645eSJoerg Roedel mmu_notifier_invalidate_range(src, mmun_start, 260434ee645eSJoerg Roedel mmun_end); 260534ee645eSJoerg Roedel } 26060253d634SNaoya Horiguchi entry = huge_ptep_get(src_pte); 260763551ae0SDavid Gibson ptepage = pte_page(entry); 260863551ae0SDavid Gibson get_page(ptepage); 26090fe6e20bSNaoya Horiguchi page_dup_rmap(ptepage); 261063551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 26111c59827dSHugh Dickins } 2612cb900f41SKirill A. Shutemov spin_unlock(src_ptl); 2613cb900f41SKirill A. Shutemov spin_unlock(dst_ptl); 261463551ae0SDavid Gibson } 261563551ae0SDavid Gibson 2616e8569dd2SAndreas Sandberg if (cow) 2617e8569dd2SAndreas Sandberg mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end); 2618e8569dd2SAndreas Sandberg 2619e8569dd2SAndreas Sandberg return ret; 262063551ae0SDavid Gibson } 262163551ae0SDavid Gibson 262224669e58SAneesh Kumar K.V void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 262324669e58SAneesh Kumar K.V unsigned long start, unsigned long end, 262424669e58SAneesh Kumar K.V struct page *ref_page) 262563551ae0SDavid Gibson { 262624669e58SAneesh Kumar K.V int force_flush = 0; 262763551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 262863551ae0SDavid Gibson unsigned long address; 2629c7546f8fSDavid Gibson pte_t *ptep; 263063551ae0SDavid Gibson pte_t pte; 2631cb900f41SKirill A. Shutemov spinlock_t *ptl; 263263551ae0SDavid Gibson struct page *page; 2633a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2634a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 26352ec74c3eSSagi Grimberg const unsigned long mmun_start = start; /* For mmu_notifiers */ 26362ec74c3eSSagi Grimberg const unsigned long mmun_end = end; /* For mmu_notifiers */ 2637a5516438SAndi Kleen 263863551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 2639a5516438SAndi Kleen BUG_ON(start & ~huge_page_mask(h)); 2640a5516438SAndi Kleen BUG_ON(end & ~huge_page_mask(h)); 264163551ae0SDavid Gibson 264224669e58SAneesh Kumar K.V tlb_start_vma(tlb, vma); 26432ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2644569f48b8SHillf Danton address = start; 264524669e58SAneesh Kumar K.V again: 2646569f48b8SHillf Danton for (; address < end; address += sz) { 2647c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 2648c7546f8fSDavid Gibson if (!ptep) 2649c7546f8fSDavid Gibson continue; 2650c7546f8fSDavid Gibson 2651cb900f41SKirill A. Shutemov ptl = huge_pte_lock(h, mm, ptep); 265239dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 2653cb900f41SKirill A. Shutemov goto unlock; 265439dde65cSChen, Kenneth W 26556629326bSHillf Danton pte = huge_ptep_get(ptep); 26566629326bSHillf Danton if (huge_pte_none(pte)) 2657cb900f41SKirill A. Shutemov goto unlock; 26586629326bSHillf Danton 26596629326bSHillf Danton /* 26609fbc1f63SNaoya Horiguchi * Migrating hugepage or HWPoisoned hugepage is already 26619fbc1f63SNaoya Horiguchi * unmapped and its refcount is dropped, so just clear pte here. 26626629326bSHillf Danton */ 26639fbc1f63SNaoya Horiguchi if (unlikely(!pte_present(pte))) { 2664106c992aSGerald Schaefer huge_pte_clear(mm, address, ptep); 2665cb900f41SKirill A. Shutemov goto unlock; 26668c4894c6SNaoya Horiguchi } 26676629326bSHillf Danton 26686629326bSHillf Danton page = pte_page(pte); 266904f2cbe3SMel Gorman /* 267004f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 267104f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 267204f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 267304f2cbe3SMel Gorman */ 267404f2cbe3SMel Gorman if (ref_page) { 267504f2cbe3SMel Gorman if (page != ref_page) 2676cb900f41SKirill A. Shutemov goto unlock; 267704f2cbe3SMel Gorman 267804f2cbe3SMel Gorman /* 267904f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 268004f2cbe3SMel Gorman * future faults in this VMA will fail rather than 268104f2cbe3SMel Gorman * looking like data was lost 268204f2cbe3SMel Gorman */ 268304f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 268404f2cbe3SMel Gorman } 268504f2cbe3SMel Gorman 2686c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 268724669e58SAneesh Kumar K.V tlb_remove_tlb_entry(tlb, ptep, address); 2688106c992aSGerald Schaefer if (huge_pte_dirty(pte)) 26896649a386SKen Chen set_page_dirty(page); 26909e81130bSHillf Danton 269124669e58SAneesh Kumar K.V page_remove_rmap(page); 269224669e58SAneesh Kumar K.V force_flush = !__tlb_remove_page(tlb, page); 2693cb900f41SKirill A. Shutemov if (force_flush) { 2694569f48b8SHillf Danton address += sz; 2695cb900f41SKirill A. Shutemov spin_unlock(ptl); 26969e81130bSHillf Danton break; 269763551ae0SDavid Gibson } 2698cb900f41SKirill A. Shutemov /* Bail out after unmapping reference page if supplied */ 2699cb900f41SKirill A. Shutemov if (ref_page) { 2700cb900f41SKirill A. Shutemov spin_unlock(ptl); 2701cb900f41SKirill A. Shutemov break; 2702cb900f41SKirill A. Shutemov } 2703cb900f41SKirill A. Shutemov unlock: 2704cb900f41SKirill A. Shutemov spin_unlock(ptl); 2705cb900f41SKirill A. Shutemov } 270624669e58SAneesh Kumar K.V /* 270724669e58SAneesh Kumar K.V * mmu_gather ran out of room to batch pages, we break out of 270824669e58SAneesh Kumar K.V * the PTE lock to avoid doing the potential expensive TLB invalidate 270924669e58SAneesh Kumar K.V * and page-free while holding it. 271024669e58SAneesh Kumar K.V */ 271124669e58SAneesh Kumar K.V if (force_flush) { 271224669e58SAneesh Kumar K.V force_flush = 0; 271324669e58SAneesh Kumar K.V tlb_flush_mmu(tlb); 271424669e58SAneesh Kumar K.V if (address < end && !ref_page) 271524669e58SAneesh Kumar K.V goto again; 2716fe1668aeSChen, Kenneth W } 27172ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 271824669e58SAneesh Kumar K.V tlb_end_vma(tlb, vma); 27191da177e4SLinus Torvalds } 272063551ae0SDavid Gibson 2721d833352aSMel Gorman void __unmap_hugepage_range_final(struct mmu_gather *tlb, 2722d833352aSMel Gorman struct vm_area_struct *vma, unsigned long start, 2723d833352aSMel Gorman unsigned long end, struct page *ref_page) 2724d833352aSMel Gorman { 2725d833352aSMel Gorman __unmap_hugepage_range(tlb, vma, start, end, ref_page); 2726d833352aSMel Gorman 2727d833352aSMel Gorman /* 2728d833352aSMel Gorman * Clear this flag so that x86's huge_pmd_share page_table_shareable 2729d833352aSMel Gorman * test will fail on a vma being torn down, and not grab a page table 2730d833352aSMel Gorman * on its way out. We're lucky that the flag has such an appropriate 2731d833352aSMel Gorman * name, and can in fact be safely cleared here. We could clear it 2732d833352aSMel Gorman * before the __unmap_hugepage_range above, but all that's necessary 2733c8c06efaSDavidlohr Bueso * is to clear it before releasing the i_mmap_rwsem. This works 2734d833352aSMel Gorman * because in the context this is called, the VMA is about to be 2735c8c06efaSDavidlohr Bueso * destroyed and the i_mmap_rwsem is held. 2736d833352aSMel Gorman */ 2737d833352aSMel Gorman vma->vm_flags &= ~VM_MAYSHARE; 2738d833352aSMel Gorman } 2739d833352aSMel Gorman 2740502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 274104f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 2742502717f4SChen, Kenneth W { 274324669e58SAneesh Kumar K.V struct mm_struct *mm; 274424669e58SAneesh Kumar K.V struct mmu_gather tlb; 274524669e58SAneesh Kumar K.V 274624669e58SAneesh Kumar K.V mm = vma->vm_mm; 274724669e58SAneesh Kumar K.V 27482b047252SLinus Torvalds tlb_gather_mmu(&tlb, mm, start, end); 274924669e58SAneesh Kumar K.V __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 275024669e58SAneesh Kumar K.V tlb_finish_mmu(&tlb, start, end); 2751502717f4SChen, Kenneth W } 2752502717f4SChen, Kenneth W 275304f2cbe3SMel Gorman /* 275404f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 275504f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 275604f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 275704f2cbe3SMel Gorman * same region. 275804f2cbe3SMel Gorman */ 27592f4612afSDavidlohr Bueso static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 27602a4b3dedSHarvey Harrison struct page *page, unsigned long address) 276104f2cbe3SMel Gorman { 27627526674dSAdam Litke struct hstate *h = hstate_vma(vma); 276304f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 276404f2cbe3SMel Gorman struct address_space *mapping; 276504f2cbe3SMel Gorman pgoff_t pgoff; 276604f2cbe3SMel Gorman 276704f2cbe3SMel Gorman /* 276804f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 276904f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 277004f2cbe3SMel Gorman */ 27717526674dSAdam Litke address = address & huge_page_mask(h); 277236e4f20aSMichal Hocko pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 277336e4f20aSMichal Hocko vma->vm_pgoff; 2774496ad9aaSAl Viro mapping = file_inode(vma->vm_file)->i_mapping; 277504f2cbe3SMel Gorman 27764eb2b1dcSMel Gorman /* 27774eb2b1dcSMel Gorman * Take the mapping lock for the duration of the table walk. As 27784eb2b1dcSMel Gorman * this mapping should be shared between all the VMAs, 27794eb2b1dcSMel Gorman * __unmap_hugepage_range() is called as the lock is already held 27804eb2b1dcSMel Gorman */ 278183cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 27826b2dbba8SMichel Lespinasse vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 278304f2cbe3SMel Gorman /* Do not unmap the current VMA */ 278404f2cbe3SMel Gorman if (iter_vma == vma) 278504f2cbe3SMel Gorman continue; 278604f2cbe3SMel Gorman 278704f2cbe3SMel Gorman /* 278804f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 278904f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 279004f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 279104f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 279204f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 279304f2cbe3SMel Gorman */ 279404f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 279524669e58SAneesh Kumar K.V unmap_hugepage_range(iter_vma, address, 279624669e58SAneesh Kumar K.V address + huge_page_size(h), page); 279704f2cbe3SMel Gorman } 279883cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 279904f2cbe3SMel Gorman } 280004f2cbe3SMel Gorman 28010fe6e20bSNaoya Horiguchi /* 28020fe6e20bSNaoya Horiguchi * Hugetlb_cow() should be called with page lock of the original hugepage held. 2803ef009b25SMichal Hocko * Called with hugetlb_instantiation_mutex held and pte_page locked so we 2804ef009b25SMichal Hocko * cannot race with other handlers or page migration. 2805ef009b25SMichal Hocko * Keep the pte_same checks anyway to make transition from the mutex easier. 28060fe6e20bSNaoya Horiguchi */ 28071e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 280804f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 2809cb900f41SKirill A. Shutemov struct page *pagecache_page, spinlock_t *ptl) 28101e8f889bSDavid Gibson { 2811a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 28121e8f889bSDavid Gibson struct page *old_page, *new_page; 2813ad4404a2SDavidlohr Bueso int ret = 0, outside_reserve = 0; 28142ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 28152ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 28161e8f889bSDavid Gibson 28171e8f889bSDavid Gibson old_page = pte_page(pte); 28181e8f889bSDavid Gibson 281904f2cbe3SMel Gorman retry_avoidcopy: 28201e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 28211e8f889bSDavid Gibson * and just make the page writable */ 282237a2140dSJoonsoo Kim if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 28230fe6e20bSNaoya Horiguchi page_move_anon_rmap(old_page, vma, address); 28241e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 282583c54070SNick Piggin return 0; 28261e8f889bSDavid Gibson } 28271e8f889bSDavid Gibson 282804f2cbe3SMel Gorman /* 282904f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 283004f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 283104f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 283204f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 283304f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 283404f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 283504f2cbe3SMel Gorman * of the full address range. 283604f2cbe3SMel Gorman */ 28375944d011SJoonsoo Kim if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 283804f2cbe3SMel Gorman old_page != pagecache_page) 283904f2cbe3SMel Gorman outside_reserve = 1; 284004f2cbe3SMel Gorman 28411e8f889bSDavid Gibson page_cache_get(old_page); 2842b76c8cfbSLarry Woodman 2843ad4404a2SDavidlohr Bueso /* 2844ad4404a2SDavidlohr Bueso * Drop page table lock as buddy allocator may be called. It will 2845ad4404a2SDavidlohr Bueso * be acquired again before returning to the caller, as expected. 2846ad4404a2SDavidlohr Bueso */ 2847cb900f41SKirill A. Shutemov spin_unlock(ptl); 284804f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 28491e8f889bSDavid Gibson 28502fc39cecSAdam Litke if (IS_ERR(new_page)) { 285104f2cbe3SMel Gorman /* 285204f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 285304f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 285404f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 285504f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 285604f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 285704f2cbe3SMel Gorman */ 285804f2cbe3SMel Gorman if (outside_reserve) { 2859ad4404a2SDavidlohr Bueso page_cache_release(old_page); 286004f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 28612f4612afSDavidlohr Bueso unmap_ref_private(mm, vma, old_page, address); 286204f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 2863cb900f41SKirill A. Shutemov spin_lock(ptl); 2864a734bcc8SHillf Danton ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2865a9af0c5dSNaoya Horiguchi if (likely(ptep && 2866a9af0c5dSNaoya Horiguchi pte_same(huge_ptep_get(ptep), pte))) 286704f2cbe3SMel Gorman goto retry_avoidcopy; 2868a734bcc8SHillf Danton /* 2869cb900f41SKirill A. Shutemov * race occurs while re-acquiring page table 2870cb900f41SKirill A. Shutemov * lock, and our job is done. 2871a734bcc8SHillf Danton */ 2872a734bcc8SHillf Danton return 0; 287304f2cbe3SMel Gorman } 287404f2cbe3SMel Gorman 2875ad4404a2SDavidlohr Bueso ret = (PTR_ERR(new_page) == -ENOMEM) ? 2876ad4404a2SDavidlohr Bueso VM_FAULT_OOM : VM_FAULT_SIGBUS; 2877ad4404a2SDavidlohr Bueso goto out_release_old; 28781e8f889bSDavid Gibson } 28791e8f889bSDavid Gibson 28800fe6e20bSNaoya Horiguchi /* 28810fe6e20bSNaoya Horiguchi * When the original hugepage is shared one, it does not have 28820fe6e20bSNaoya Horiguchi * anon_vma prepared. 28830fe6e20bSNaoya Horiguchi */ 288444e2aa93SDean Nelson if (unlikely(anon_vma_prepare(vma))) { 2885ad4404a2SDavidlohr Bueso ret = VM_FAULT_OOM; 2886ad4404a2SDavidlohr Bueso goto out_release_all; 288744e2aa93SDean Nelson } 28880fe6e20bSNaoya Horiguchi 288947ad8475SAndrea Arcangeli copy_user_huge_page(new_page, old_page, address, vma, 289047ad8475SAndrea Arcangeli pages_per_huge_page(h)); 28910ed361deSNick Piggin __SetPageUptodate(new_page); 28921e8f889bSDavid Gibson 28932ec74c3eSSagi Grimberg mmun_start = address & huge_page_mask(h); 28942ec74c3eSSagi Grimberg mmun_end = mmun_start + huge_page_size(h); 28952ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2896ad4404a2SDavidlohr Bueso 2897b76c8cfbSLarry Woodman /* 2898cb900f41SKirill A. Shutemov * Retake the page table lock to check for racing updates 2899b76c8cfbSLarry Woodman * before the page tables are altered 2900b76c8cfbSLarry Woodman */ 2901cb900f41SKirill A. Shutemov spin_lock(ptl); 2902a5516438SAndi Kleen ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2903a9af0c5dSNaoya Horiguchi if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 290407443a85SJoonsoo Kim ClearPagePrivate(new_page); 290507443a85SJoonsoo Kim 29061e8f889bSDavid Gibson /* Break COW */ 29078fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 290834ee645eSJoerg Roedel mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); 29091e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 29101e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 29110fe6e20bSNaoya Horiguchi page_remove_rmap(old_page); 2912cd67f0d2SNaoya Horiguchi hugepage_add_new_anon_rmap(new_page, vma, address); 29131e8f889bSDavid Gibson /* Make the old page be freed below */ 29141e8f889bSDavid Gibson new_page = old_page; 29151e8f889bSDavid Gibson } 2916cb900f41SKirill A. Shutemov spin_unlock(ptl); 29172ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2918ad4404a2SDavidlohr Bueso out_release_all: 29191e8f889bSDavid Gibson page_cache_release(new_page); 2920ad4404a2SDavidlohr Bueso out_release_old: 29211e8f889bSDavid Gibson page_cache_release(old_page); 29228312034fSJoonsoo Kim 2923ad4404a2SDavidlohr Bueso spin_lock(ptl); /* Caller expects lock to be held */ 2924ad4404a2SDavidlohr Bueso return ret; 29251e8f889bSDavid Gibson } 29261e8f889bSDavid Gibson 292704f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 2928a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h, 2929a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 293004f2cbe3SMel Gorman { 293104f2cbe3SMel Gorman struct address_space *mapping; 2932e7c4b0bfSAndy Whitcroft pgoff_t idx; 293304f2cbe3SMel Gorman 293404f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 2935a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 293604f2cbe3SMel Gorman 293704f2cbe3SMel Gorman return find_lock_page(mapping, idx); 293804f2cbe3SMel Gorman } 293904f2cbe3SMel Gorman 29403ae77f43SHugh Dickins /* 29413ae77f43SHugh Dickins * Return whether there is a pagecache page to back given address within VMA. 29423ae77f43SHugh Dickins * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 29433ae77f43SHugh Dickins */ 29443ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h, 29452a15efc9SHugh Dickins struct vm_area_struct *vma, unsigned long address) 29462a15efc9SHugh Dickins { 29472a15efc9SHugh Dickins struct address_space *mapping; 29482a15efc9SHugh Dickins pgoff_t idx; 29492a15efc9SHugh Dickins struct page *page; 29502a15efc9SHugh Dickins 29512a15efc9SHugh Dickins mapping = vma->vm_file->f_mapping; 29522a15efc9SHugh Dickins idx = vma_hugecache_offset(h, vma, address); 29532a15efc9SHugh Dickins 29542a15efc9SHugh Dickins page = find_get_page(mapping, idx); 29552a15efc9SHugh Dickins if (page) 29562a15efc9SHugh Dickins put_page(page); 29572a15efc9SHugh Dickins return page != NULL; 29582a15efc9SHugh Dickins } 29592a15efc9SHugh Dickins 2960a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 29618382d914SDavidlohr Bueso struct address_space *mapping, pgoff_t idx, 2962788c7df4SHugh Dickins unsigned long address, pte_t *ptep, unsigned int flags) 2963ac9b9c66SHugh Dickins { 2964a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2965ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 2966409eb8c2SHillf Danton int anon_rmap = 0; 29674c887265SAdam Litke unsigned long size; 29684c887265SAdam Litke struct page *page; 29691e8f889bSDavid Gibson pte_t new_pte; 2970cb900f41SKirill A. Shutemov spinlock_t *ptl; 29714c887265SAdam Litke 297204f2cbe3SMel Gorman /* 297304f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 297404f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 297525985edcSLucas De Marchi * COW. Warn that such a situation has occurred as it may not be obvious 297604f2cbe3SMel Gorman */ 297704f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 2978ffb22af5SAndrew Morton pr_warning("PID %d killed due to inadequate hugepage pool\n", 297904f2cbe3SMel Gorman current->pid); 298004f2cbe3SMel Gorman return ret; 298104f2cbe3SMel Gorman } 298204f2cbe3SMel Gorman 29834c887265SAdam Litke /* 29844c887265SAdam Litke * Use page lock to guard against racing truncation 29854c887265SAdam Litke * before we get page_table_lock. 29864c887265SAdam Litke */ 29876bda666aSChristoph Lameter retry: 29886bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 29896bda666aSChristoph Lameter if (!page) { 2990a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 2991ebed4bfcSHugh Dickins if (idx >= size) 2992ebed4bfcSHugh Dickins goto out; 299304f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 29942fc39cecSAdam Litke if (IS_ERR(page)) { 299576dcee75SAneesh Kumar K.V ret = PTR_ERR(page); 299676dcee75SAneesh Kumar K.V if (ret == -ENOMEM) 299776dcee75SAneesh Kumar K.V ret = VM_FAULT_OOM; 299876dcee75SAneesh Kumar K.V else 299976dcee75SAneesh Kumar K.V ret = VM_FAULT_SIGBUS; 30006bda666aSChristoph Lameter goto out; 30016bda666aSChristoph Lameter } 300247ad8475SAndrea Arcangeli clear_huge_page(page, address, pages_per_huge_page(h)); 30030ed361deSNick Piggin __SetPageUptodate(page); 3004ac9b9c66SHugh Dickins 3005f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 30066bda666aSChristoph Lameter int err; 300745c682a6SKen Chen struct inode *inode = mapping->host; 30086bda666aSChristoph Lameter 30096bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 30106bda666aSChristoph Lameter if (err) { 30116bda666aSChristoph Lameter put_page(page); 30126bda666aSChristoph Lameter if (err == -EEXIST) 30136bda666aSChristoph Lameter goto retry; 30146bda666aSChristoph Lameter goto out; 30156bda666aSChristoph Lameter } 301607443a85SJoonsoo Kim ClearPagePrivate(page); 301745c682a6SKen Chen 301845c682a6SKen Chen spin_lock(&inode->i_lock); 3019a5516438SAndi Kleen inode->i_blocks += blocks_per_huge_page(h); 302045c682a6SKen Chen spin_unlock(&inode->i_lock); 302123be7468SMel Gorman } else { 30226bda666aSChristoph Lameter lock_page(page); 30230fe6e20bSNaoya Horiguchi if (unlikely(anon_vma_prepare(vma))) { 30240fe6e20bSNaoya Horiguchi ret = VM_FAULT_OOM; 30250fe6e20bSNaoya Horiguchi goto backout_unlocked; 302623be7468SMel Gorman } 3027409eb8c2SHillf Danton anon_rmap = 1; 30280fe6e20bSNaoya Horiguchi } 30290fe6e20bSNaoya Horiguchi } else { 303057303d80SAndy Whitcroft /* 3031998b4382SNaoya Horiguchi * If memory error occurs between mmap() and fault, some process 3032998b4382SNaoya Horiguchi * don't have hwpoisoned swap entry for errored virtual address. 3033998b4382SNaoya Horiguchi * So we need to block hugepage fault by PG_hwpoison bit check. 3034fd6a03edSNaoya Horiguchi */ 3035fd6a03edSNaoya Horiguchi if (unlikely(PageHWPoison(page))) { 3036aa50d3a7SAndi Kleen ret = VM_FAULT_HWPOISON | 3037972dc4deSAneesh Kumar K.V VM_FAULT_SET_HINDEX(hstate_index(h)); 3038fd6a03edSNaoya Horiguchi goto backout_unlocked; 30396bda666aSChristoph Lameter } 3040998b4382SNaoya Horiguchi } 30411e8f889bSDavid Gibson 304257303d80SAndy Whitcroft /* 304357303d80SAndy Whitcroft * If we are going to COW a private mapping later, we examine the 304457303d80SAndy Whitcroft * pending reservations for this page now. This will ensure that 304557303d80SAndy Whitcroft * any allocations necessary to record that reservation occur outside 304657303d80SAndy Whitcroft * the spinlock. 304757303d80SAndy Whitcroft */ 3048788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) 30492b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 30502b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 30512b26736cSAndy Whitcroft goto backout_unlocked; 30522b26736cSAndy Whitcroft } 305357303d80SAndy Whitcroft 3054cb900f41SKirill A. Shutemov ptl = huge_pte_lockptr(h, mm, ptep); 3055cb900f41SKirill A. Shutemov spin_lock(ptl); 3056a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 30574c887265SAdam Litke if (idx >= size) 30584c887265SAdam Litke goto backout; 30594c887265SAdam Litke 306083c54070SNick Piggin ret = 0; 30617f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 30624c887265SAdam Litke goto backout; 30634c887265SAdam Litke 306407443a85SJoonsoo Kim if (anon_rmap) { 306507443a85SJoonsoo Kim ClearPagePrivate(page); 3066409eb8c2SHillf Danton hugepage_add_new_anon_rmap(page, vma, address); 3067ac714904SChoi Gi-yong } else 3068409eb8c2SHillf Danton page_dup_rmap(page); 30691e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 30701e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 30711e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 30721e8f889bSDavid Gibson 3073788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 30741e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 3075cb900f41SKirill A. Shutemov ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); 30761e8f889bSDavid Gibson } 30771e8f889bSDavid Gibson 3078cb900f41SKirill A. Shutemov spin_unlock(ptl); 30794c887265SAdam Litke unlock_page(page); 30804c887265SAdam Litke out: 3081ac9b9c66SHugh Dickins return ret; 30824c887265SAdam Litke 30834c887265SAdam Litke backout: 3084cb900f41SKirill A. Shutemov spin_unlock(ptl); 30852b26736cSAndy Whitcroft backout_unlocked: 30864c887265SAdam Litke unlock_page(page); 30874c887265SAdam Litke put_page(page); 30884c887265SAdam Litke goto out; 3089ac9b9c66SHugh Dickins } 3090ac9b9c66SHugh Dickins 30918382d914SDavidlohr Bueso #ifdef CONFIG_SMP 30928382d914SDavidlohr Bueso static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, 30938382d914SDavidlohr Bueso struct vm_area_struct *vma, 30948382d914SDavidlohr Bueso struct address_space *mapping, 30958382d914SDavidlohr Bueso pgoff_t idx, unsigned long address) 30968382d914SDavidlohr Bueso { 30978382d914SDavidlohr Bueso unsigned long key[2]; 30988382d914SDavidlohr Bueso u32 hash; 30998382d914SDavidlohr Bueso 31008382d914SDavidlohr Bueso if (vma->vm_flags & VM_SHARED) { 31018382d914SDavidlohr Bueso key[0] = (unsigned long) mapping; 31028382d914SDavidlohr Bueso key[1] = idx; 31038382d914SDavidlohr Bueso } else { 31048382d914SDavidlohr Bueso key[0] = (unsigned long) mm; 31058382d914SDavidlohr Bueso key[1] = address >> huge_page_shift(h); 31068382d914SDavidlohr Bueso } 31078382d914SDavidlohr Bueso 31088382d914SDavidlohr Bueso hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); 31098382d914SDavidlohr Bueso 31108382d914SDavidlohr Bueso return hash & (num_fault_mutexes - 1); 31118382d914SDavidlohr Bueso } 31128382d914SDavidlohr Bueso #else 31138382d914SDavidlohr Bueso /* 31148382d914SDavidlohr Bueso * For uniprocesor systems we always use a single mutex, so just 31158382d914SDavidlohr Bueso * return 0 and avoid the hashing overhead. 31168382d914SDavidlohr Bueso */ 31178382d914SDavidlohr Bueso static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, 31188382d914SDavidlohr Bueso struct vm_area_struct *vma, 31198382d914SDavidlohr Bueso struct address_space *mapping, 31208382d914SDavidlohr Bueso pgoff_t idx, unsigned long address) 31218382d914SDavidlohr Bueso { 31228382d914SDavidlohr Bueso return 0; 31238382d914SDavidlohr Bueso } 31248382d914SDavidlohr Bueso #endif 31258382d914SDavidlohr Bueso 312686e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 3127788c7df4SHugh Dickins unsigned long address, unsigned int flags) 312886e5216fSAdam Litke { 31298382d914SDavidlohr Bueso pte_t *ptep, entry; 3130cb900f41SKirill A. Shutemov spinlock_t *ptl; 31311e8f889bSDavid Gibson int ret; 31328382d914SDavidlohr Bueso u32 hash; 31338382d914SDavidlohr Bueso pgoff_t idx; 31340fe6e20bSNaoya Horiguchi struct page *page = NULL; 313557303d80SAndy Whitcroft struct page *pagecache_page = NULL; 3136a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 31378382d914SDavidlohr Bueso struct address_space *mapping; 31380f792cf9SNaoya Horiguchi int need_wait_lock = 0; 313986e5216fSAdam Litke 31401e16a539SKAMEZAWA Hiroyuki address &= huge_page_mask(h); 31411e16a539SKAMEZAWA Hiroyuki 3142fd6a03edSNaoya Horiguchi ptep = huge_pte_offset(mm, address); 3143fd6a03edSNaoya Horiguchi if (ptep) { 3144fd6a03edSNaoya Horiguchi entry = huge_ptep_get(ptep); 3145290408d4SNaoya Horiguchi if (unlikely(is_hugetlb_entry_migration(entry))) { 3146cb900f41SKirill A. Shutemov migration_entry_wait_huge(vma, mm, ptep); 3147290408d4SNaoya Horiguchi return 0; 3148290408d4SNaoya Horiguchi } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 3149aa50d3a7SAndi Kleen return VM_FAULT_HWPOISON_LARGE | 3150972dc4deSAneesh Kumar K.V VM_FAULT_SET_HINDEX(hstate_index(h)); 3151fd6a03edSNaoya Horiguchi } 3152fd6a03edSNaoya Horiguchi 3153a5516438SAndi Kleen ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 315486e5216fSAdam Litke if (!ptep) 315586e5216fSAdam Litke return VM_FAULT_OOM; 315686e5216fSAdam Litke 31578382d914SDavidlohr Bueso mapping = vma->vm_file->f_mapping; 31588382d914SDavidlohr Bueso idx = vma_hugecache_offset(h, vma, address); 31598382d914SDavidlohr Bueso 31603935baa9SDavid Gibson /* 31613935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 31623935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 31633935baa9SDavid Gibson * the same page in the page cache. 31643935baa9SDavid Gibson */ 31658382d914SDavidlohr Bueso hash = fault_mutex_hash(h, mm, vma, mapping, idx, address); 31668382d914SDavidlohr Bueso mutex_lock(&htlb_fault_mutex_table[hash]); 31678382d914SDavidlohr Bueso 31687f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 31697f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 31708382d914SDavidlohr Bueso ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); 3171b4d1d99fSDavid Gibson goto out_mutex; 31723935baa9SDavid Gibson } 317386e5216fSAdam Litke 317483c54070SNick Piggin ret = 0; 31751e8f889bSDavid Gibson 317657303d80SAndy Whitcroft /* 31770f792cf9SNaoya Horiguchi * entry could be a migration/hwpoison entry at this point, so this 31780f792cf9SNaoya Horiguchi * check prevents the kernel from going below assuming that we have 31790f792cf9SNaoya Horiguchi * a active hugepage in pagecache. This goto expects the 2nd page fault, 31800f792cf9SNaoya Horiguchi * and is_hugetlb_entry_(migration|hwpoisoned) check will properly 31810f792cf9SNaoya Horiguchi * handle it. 31820f792cf9SNaoya Horiguchi */ 31830f792cf9SNaoya Horiguchi if (!pte_present(entry)) 31840f792cf9SNaoya Horiguchi goto out_mutex; 31850f792cf9SNaoya Horiguchi 31860f792cf9SNaoya Horiguchi /* 318757303d80SAndy Whitcroft * If we are going to COW the mapping later, we examine the pending 318857303d80SAndy Whitcroft * reservations for this page now. This will ensure that any 318957303d80SAndy Whitcroft * allocations necessary to record that reservation occur outside the 319057303d80SAndy Whitcroft * spinlock. For private mappings, we also lookup the pagecache 319157303d80SAndy Whitcroft * page now as it is used to determine if a reservation has been 319257303d80SAndy Whitcroft * consumed. 319357303d80SAndy Whitcroft */ 3194106c992aSGerald Schaefer if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 31952b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 31962b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 3197b4d1d99fSDavid Gibson goto out_mutex; 31982b26736cSAndy Whitcroft } 319957303d80SAndy Whitcroft 3200f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 320157303d80SAndy Whitcroft pagecache_page = hugetlbfs_pagecache_page(h, 320257303d80SAndy Whitcroft vma, address); 320357303d80SAndy Whitcroft } 320457303d80SAndy Whitcroft 32050f792cf9SNaoya Horiguchi ptl = huge_pte_lock(h, mm, ptep); 32060fe6e20bSNaoya Horiguchi 32071e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 3208b4d1d99fSDavid Gibson if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 3209cb900f41SKirill A. Shutemov goto out_ptl; 3210b4d1d99fSDavid Gibson 32110f792cf9SNaoya Horiguchi /* 32120f792cf9SNaoya Horiguchi * hugetlb_cow() requires page locks of pte_page(entry) and 32130f792cf9SNaoya Horiguchi * pagecache_page, so here we need take the former one 32140f792cf9SNaoya Horiguchi * when page != pagecache_page or !pagecache_page. 32150f792cf9SNaoya Horiguchi */ 32160f792cf9SNaoya Horiguchi page = pte_page(entry); 32170f792cf9SNaoya Horiguchi if (page != pagecache_page) 32180f792cf9SNaoya Horiguchi if (!trylock_page(page)) { 32190f792cf9SNaoya Horiguchi need_wait_lock = 1; 32200f792cf9SNaoya Horiguchi goto out_ptl; 32210f792cf9SNaoya Horiguchi } 32220f792cf9SNaoya Horiguchi 32230f792cf9SNaoya Horiguchi get_page(page); 3224b4d1d99fSDavid Gibson 3225788c7df4SHugh Dickins if (flags & FAULT_FLAG_WRITE) { 3226106c992aSGerald Schaefer if (!huge_pte_write(entry)) { 322757303d80SAndy Whitcroft ret = hugetlb_cow(mm, vma, address, ptep, entry, 3228cb900f41SKirill A. Shutemov pagecache_page, ptl); 32290f792cf9SNaoya Horiguchi goto out_put_page; 3230b4d1d99fSDavid Gibson } 3231106c992aSGerald Schaefer entry = huge_pte_mkdirty(entry); 3232b4d1d99fSDavid Gibson } 3233b4d1d99fSDavid Gibson entry = pte_mkyoung(entry); 3234788c7df4SHugh Dickins if (huge_ptep_set_access_flags(vma, address, ptep, entry, 3235788c7df4SHugh Dickins flags & FAULT_FLAG_WRITE)) 32364b3073e1SRussell King update_mmu_cache(vma, address, ptep); 32370f792cf9SNaoya Horiguchi out_put_page: 32380f792cf9SNaoya Horiguchi if (page != pagecache_page) 32390f792cf9SNaoya Horiguchi unlock_page(page); 32400f792cf9SNaoya Horiguchi put_page(page); 3241cb900f41SKirill A. Shutemov out_ptl: 3242cb900f41SKirill A. Shutemov spin_unlock(ptl); 324357303d80SAndy Whitcroft 324457303d80SAndy Whitcroft if (pagecache_page) { 324557303d80SAndy Whitcroft unlock_page(pagecache_page); 324657303d80SAndy Whitcroft put_page(pagecache_page); 324757303d80SAndy Whitcroft } 3248b4d1d99fSDavid Gibson out_mutex: 32498382d914SDavidlohr Bueso mutex_unlock(&htlb_fault_mutex_table[hash]); 32500f792cf9SNaoya Horiguchi /* 32510f792cf9SNaoya Horiguchi * Generally it's safe to hold refcount during waiting page lock. But 32520f792cf9SNaoya Horiguchi * here we just wait to defer the next page fault to avoid busy loop and 32530f792cf9SNaoya Horiguchi * the page is not used after unlocked before returning from the current 32540f792cf9SNaoya Horiguchi * page fault. So we are safe from accessing freed page, even if we wait 32550f792cf9SNaoya Horiguchi * here without taking refcount. 32560f792cf9SNaoya Horiguchi */ 32570f792cf9SNaoya Horiguchi if (need_wait_lock) 32580f792cf9SNaoya Horiguchi wait_on_page_locked(page); 32591e8f889bSDavid Gibson return ret; 326086e5216fSAdam Litke } 326186e5216fSAdam Litke 326228a35716SMichel Lespinasse long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 326363551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 326428a35716SMichel Lespinasse unsigned long *position, unsigned long *nr_pages, 326528a35716SMichel Lespinasse long i, unsigned int flags) 326663551ae0SDavid Gibson { 3267d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 3268d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 326928a35716SMichel Lespinasse unsigned long remainder = *nr_pages; 3270a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 327163551ae0SDavid Gibson 327263551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 327363551ae0SDavid Gibson pte_t *pte; 3274cb900f41SKirill A. Shutemov spinlock_t *ptl = NULL; 32752a15efc9SHugh Dickins int absent; 327663551ae0SDavid Gibson struct page *page; 327763551ae0SDavid Gibson 32784c887265SAdam Litke /* 32794c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 32802a15efc9SHugh Dickins * each hugepage. We have to make sure we get the 32814c887265SAdam Litke * first, for the page indexing below to work. 3282cb900f41SKirill A. Shutemov * 3283cb900f41SKirill A. Shutemov * Note that page table lock is not held when pte is null. 32844c887265SAdam Litke */ 3285a5516438SAndi Kleen pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 3286cb900f41SKirill A. Shutemov if (pte) 3287cb900f41SKirill A. Shutemov ptl = huge_pte_lock(h, mm, pte); 32882a15efc9SHugh Dickins absent = !pte || huge_pte_none(huge_ptep_get(pte)); 328963551ae0SDavid Gibson 32902a15efc9SHugh Dickins /* 32912a15efc9SHugh Dickins * When coredumping, it suits get_dump_page if we just return 32923ae77f43SHugh Dickins * an error where there's an empty slot with no huge pagecache 32933ae77f43SHugh Dickins * to back it. This way, we avoid allocating a hugepage, and 32943ae77f43SHugh Dickins * the sparse dumpfile avoids allocating disk blocks, but its 32953ae77f43SHugh Dickins * huge holes still show up with zeroes where they need to be. 32962a15efc9SHugh Dickins */ 32973ae77f43SHugh Dickins if (absent && (flags & FOLL_DUMP) && 32983ae77f43SHugh Dickins !hugetlbfs_pagecache_present(h, vma, vaddr)) { 3299cb900f41SKirill A. Shutemov if (pte) 3300cb900f41SKirill A. Shutemov spin_unlock(ptl); 33012a15efc9SHugh Dickins remainder = 0; 33022a15efc9SHugh Dickins break; 33032a15efc9SHugh Dickins } 33042a15efc9SHugh Dickins 33059cc3a5bdSNaoya Horiguchi /* 33069cc3a5bdSNaoya Horiguchi * We need call hugetlb_fault for both hugepages under migration 33079cc3a5bdSNaoya Horiguchi * (in which case hugetlb_fault waits for the migration,) and 33089cc3a5bdSNaoya Horiguchi * hwpoisoned hugepages (in which case we need to prevent the 33099cc3a5bdSNaoya Horiguchi * caller from accessing to them.) In order to do this, we use 33109cc3a5bdSNaoya Horiguchi * here is_swap_pte instead of is_hugetlb_entry_migration and 33119cc3a5bdSNaoya Horiguchi * is_hugetlb_entry_hwpoisoned. This is because it simply covers 33129cc3a5bdSNaoya Horiguchi * both cases, and because we can't follow correct pages 33139cc3a5bdSNaoya Horiguchi * directly from any kind of swap entries. 33149cc3a5bdSNaoya Horiguchi */ 33159cc3a5bdSNaoya Horiguchi if (absent || is_swap_pte(huge_ptep_get(pte)) || 3316106c992aSGerald Schaefer ((flags & FOLL_WRITE) && 3317106c992aSGerald Schaefer !huge_pte_write(huge_ptep_get(pte)))) { 33184c887265SAdam Litke int ret; 33194c887265SAdam Litke 3320cb900f41SKirill A. Shutemov if (pte) 3321cb900f41SKirill A. Shutemov spin_unlock(ptl); 33222a15efc9SHugh Dickins ret = hugetlb_fault(mm, vma, vaddr, 33232a15efc9SHugh Dickins (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); 3324a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 33254c887265SAdam Litke continue; 33264c887265SAdam Litke 33271c59827dSHugh Dickins remainder = 0; 33281c59827dSHugh Dickins break; 33291c59827dSHugh Dickins } 333063551ae0SDavid Gibson 3331a5516438SAndi Kleen pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 33327f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 3333d5d4b0aaSChen, Kenneth W same_page: 3334d6692183SChen, Kenneth W if (pages) { 333569d177c2SAndy Whitcroft pages[i] = mem_map_offset(page, pfn_offset); 3336a0368d4eSAndrea Arcangeli get_page_foll(pages[i]); 3337d6692183SChen, Kenneth W } 333863551ae0SDavid Gibson 333963551ae0SDavid Gibson if (vmas) 334063551ae0SDavid Gibson vmas[i] = vma; 334163551ae0SDavid Gibson 334263551ae0SDavid Gibson vaddr += PAGE_SIZE; 3343d5d4b0aaSChen, Kenneth W ++pfn_offset; 334463551ae0SDavid Gibson --remainder; 334563551ae0SDavid Gibson ++i; 3346d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 3347a5516438SAndi Kleen pfn_offset < pages_per_huge_page(h)) { 3348d5d4b0aaSChen, Kenneth W /* 3349d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 3350d5d4b0aaSChen, Kenneth W * of this compound page. 3351d5d4b0aaSChen, Kenneth W */ 3352d5d4b0aaSChen, Kenneth W goto same_page; 3353d5d4b0aaSChen, Kenneth W } 3354cb900f41SKirill A. Shutemov spin_unlock(ptl); 335563551ae0SDavid Gibson } 335628a35716SMichel Lespinasse *nr_pages = remainder; 335763551ae0SDavid Gibson *position = vaddr; 335863551ae0SDavid Gibson 33592a15efc9SHugh Dickins return i ? i : -EFAULT; 336063551ae0SDavid Gibson } 33618f860591SZhang, Yanmin 33627da4d641SPeter Zijlstra unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 33638f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 33648f860591SZhang, Yanmin { 33658f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 33668f860591SZhang, Yanmin unsigned long start = address; 33678f860591SZhang, Yanmin pte_t *ptep; 33688f860591SZhang, Yanmin pte_t pte; 3369a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 33707da4d641SPeter Zijlstra unsigned long pages = 0; 33718f860591SZhang, Yanmin 33728f860591SZhang, Yanmin BUG_ON(address >= end); 33738f860591SZhang, Yanmin flush_cache_range(vma, address, end); 33748f860591SZhang, Yanmin 3375a5338093SRik van Riel mmu_notifier_invalidate_range_start(mm, start, end); 337683cde9e8SDavidlohr Bueso i_mmap_lock_write(vma->vm_file->f_mapping); 3377a5516438SAndi Kleen for (; address < end; address += huge_page_size(h)) { 3378cb900f41SKirill A. Shutemov spinlock_t *ptl; 33798f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 33808f860591SZhang, Yanmin if (!ptep) 33818f860591SZhang, Yanmin continue; 3382cb900f41SKirill A. Shutemov ptl = huge_pte_lock(h, mm, ptep); 33837da4d641SPeter Zijlstra if (huge_pmd_unshare(mm, &address, ptep)) { 33847da4d641SPeter Zijlstra pages++; 3385cb900f41SKirill A. Shutemov spin_unlock(ptl); 338639dde65cSChen, Kenneth W continue; 33877da4d641SPeter Zijlstra } 3388a8bda28dSNaoya Horiguchi pte = huge_ptep_get(ptep); 3389a8bda28dSNaoya Horiguchi if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 3390a8bda28dSNaoya Horiguchi spin_unlock(ptl); 3391a8bda28dSNaoya Horiguchi continue; 3392a8bda28dSNaoya Horiguchi } 3393a8bda28dSNaoya Horiguchi if (unlikely(is_hugetlb_entry_migration(pte))) { 3394a8bda28dSNaoya Horiguchi swp_entry_t entry = pte_to_swp_entry(pte); 3395a8bda28dSNaoya Horiguchi 3396a8bda28dSNaoya Horiguchi if (is_write_migration_entry(entry)) { 3397a8bda28dSNaoya Horiguchi pte_t newpte; 3398a8bda28dSNaoya Horiguchi 3399a8bda28dSNaoya Horiguchi make_migration_entry_read(&entry); 3400a8bda28dSNaoya Horiguchi newpte = swp_entry_to_pte(entry); 3401a8bda28dSNaoya Horiguchi set_huge_pte_at(mm, address, ptep, newpte); 3402a8bda28dSNaoya Horiguchi pages++; 3403a8bda28dSNaoya Horiguchi } 3404a8bda28dSNaoya Horiguchi spin_unlock(ptl); 3405a8bda28dSNaoya Horiguchi continue; 3406a8bda28dSNaoya Horiguchi } 3407a8bda28dSNaoya Horiguchi if (!huge_pte_none(pte)) { 34088f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 3409106c992aSGerald Schaefer pte = pte_mkhuge(huge_pte_modify(pte, newprot)); 3410be7517d6STony Lu pte = arch_make_huge_pte(pte, vma, NULL, 0); 34118f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 34127da4d641SPeter Zijlstra pages++; 34138f860591SZhang, Yanmin } 3414cb900f41SKirill A. Shutemov spin_unlock(ptl); 34158f860591SZhang, Yanmin } 3416d833352aSMel Gorman /* 3417c8c06efaSDavidlohr Bueso * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 3418d833352aSMel Gorman * may have cleared our pud entry and done put_page on the page table: 3419c8c06efaSDavidlohr Bueso * once we release i_mmap_rwsem, another task can do the final put_page 3420d833352aSMel Gorman * and that page table be reused and filled with junk. 3421d833352aSMel Gorman */ 34228f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 342334ee645eSJoerg Roedel mmu_notifier_invalidate_range(mm, start, end); 342483cde9e8SDavidlohr Bueso i_mmap_unlock_write(vma->vm_file->f_mapping); 3425a5338093SRik van Riel mmu_notifier_invalidate_range_end(mm, start, end); 34267da4d641SPeter Zijlstra 34277da4d641SPeter Zijlstra return pages << h->order; 34288f860591SZhang, Yanmin } 34298f860591SZhang, Yanmin 3430a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 3431a1e78772SMel Gorman long from, long to, 34325a6fe125SMel Gorman struct vm_area_struct *vma, 3433ca16d140SKOSAKI Motohiro vm_flags_t vm_flags) 3434e4e574b7SAdam Litke { 343517c9d12eSMel Gorman long ret, chg; 3436a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 343790481622SDavid Gibson struct hugepage_subpool *spool = subpool_inode(inode); 34389119a41eSJoonsoo Kim struct resv_map *resv_map; 3439e4e574b7SAdam Litke 3440a1e78772SMel Gorman /* 344117c9d12eSMel Gorman * Only apply hugepage reservation if asked. At fault time, an 344217c9d12eSMel Gorman * attempt will be made for VM_NORESERVE to allocate a page 344390481622SDavid Gibson * without using reserves 344417c9d12eSMel Gorman */ 3445ca16d140SKOSAKI Motohiro if (vm_flags & VM_NORESERVE) 344617c9d12eSMel Gorman return 0; 344717c9d12eSMel Gorman 344817c9d12eSMel Gorman /* 3449a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 3450a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 3451a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 3452a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 3453a1e78772SMel Gorman */ 34549119a41eSJoonsoo Kim if (!vma || vma->vm_flags & VM_MAYSHARE) { 34554e35f483SJoonsoo Kim resv_map = inode_resv_map(inode); 34569119a41eSJoonsoo Kim 34571406ec9bSJoonsoo Kim chg = region_chg(resv_map, from, to); 34589119a41eSJoonsoo Kim 34599119a41eSJoonsoo Kim } else { 34609119a41eSJoonsoo Kim resv_map = resv_map_alloc(); 34615a6fe125SMel Gorman if (!resv_map) 34625a6fe125SMel Gorman return -ENOMEM; 34635a6fe125SMel Gorman 346417c9d12eSMel Gorman chg = to - from; 346517c9d12eSMel Gorman 34665a6fe125SMel Gorman set_vma_resv_map(vma, resv_map); 34675a6fe125SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 34685a6fe125SMel Gorman } 34695a6fe125SMel Gorman 3470c50ac050SDave Hansen if (chg < 0) { 3471c50ac050SDave Hansen ret = chg; 3472c50ac050SDave Hansen goto out_err; 3473c50ac050SDave Hansen } 347417c9d12eSMel Gorman 347590481622SDavid Gibson /* There must be enough pages in the subpool for the mapping */ 3476c50ac050SDave Hansen if (hugepage_subpool_get_pages(spool, chg)) { 3477c50ac050SDave Hansen ret = -ENOSPC; 3478c50ac050SDave Hansen goto out_err; 3479c50ac050SDave Hansen } 348017c9d12eSMel Gorman 348117c9d12eSMel Gorman /* 348217c9d12eSMel Gorman * Check enough hugepages are available for the reservation. 348390481622SDavid Gibson * Hand the pages back to the subpool if there are not 348417c9d12eSMel Gorman */ 348517c9d12eSMel Gorman ret = hugetlb_acct_memory(h, chg); 348617c9d12eSMel Gorman if (ret < 0) { 348790481622SDavid Gibson hugepage_subpool_put_pages(spool, chg); 3488c50ac050SDave Hansen goto out_err; 348917c9d12eSMel Gorman } 349017c9d12eSMel Gorman 349117c9d12eSMel Gorman /* 349217c9d12eSMel Gorman * Account for the reservations made. Shared mappings record regions 349317c9d12eSMel Gorman * that have reservations as they are shared by multiple VMAs. 349417c9d12eSMel Gorman * When the last VMA disappears, the region map says how much 349517c9d12eSMel Gorman * the reservation was and the page cache tells how much of 349617c9d12eSMel Gorman * the reservation was consumed. Private mappings are per-VMA and 349717c9d12eSMel Gorman * only the consumed reservations are tracked. When the VMA 349817c9d12eSMel Gorman * disappears, the original reservation is the VMA size and the 349917c9d12eSMel Gorman * consumed reservations are stored in the map. Hence, nothing 350017c9d12eSMel Gorman * else has to be done for private mappings here 350117c9d12eSMel Gorman */ 3502f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 35031406ec9bSJoonsoo Kim region_add(resv_map, from, to); 3504a43a8c39SChen, Kenneth W return 0; 3505c50ac050SDave Hansen out_err: 3506f031dd27SJoonsoo Kim if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3507f031dd27SJoonsoo Kim kref_put(&resv_map->refs, resv_map_release); 3508c50ac050SDave Hansen return ret; 3509a43a8c39SChen, Kenneth W } 3510a43a8c39SChen, Kenneth W 3511a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 3512a43a8c39SChen, Kenneth W { 3513a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 35144e35f483SJoonsoo Kim struct resv_map *resv_map = inode_resv_map(inode); 35159119a41eSJoonsoo Kim long chg = 0; 351690481622SDavid Gibson struct hugepage_subpool *spool = subpool_inode(inode); 351745c682a6SKen Chen 35189119a41eSJoonsoo Kim if (resv_map) 35191406ec9bSJoonsoo Kim chg = region_truncate(resv_map, offset); 352045c682a6SKen Chen spin_lock(&inode->i_lock); 3521e4c6f8beSEric Sandeen inode->i_blocks -= (blocks_per_huge_page(h) * freed); 352245c682a6SKen Chen spin_unlock(&inode->i_lock); 352345c682a6SKen Chen 352490481622SDavid Gibson hugepage_subpool_put_pages(spool, (chg - freed)); 3525a5516438SAndi Kleen hugetlb_acct_memory(h, -(chg - freed)); 3526a43a8c39SChen, Kenneth W } 352793f70f90SNaoya Horiguchi 35283212b535SSteve Capper #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 35293212b535SSteve Capper static unsigned long page_table_shareable(struct vm_area_struct *svma, 35303212b535SSteve Capper struct vm_area_struct *vma, 35313212b535SSteve Capper unsigned long addr, pgoff_t idx) 35323212b535SSteve Capper { 35333212b535SSteve Capper unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 35343212b535SSteve Capper svma->vm_start; 35353212b535SSteve Capper unsigned long sbase = saddr & PUD_MASK; 35363212b535SSteve Capper unsigned long s_end = sbase + PUD_SIZE; 35373212b535SSteve Capper 35383212b535SSteve Capper /* Allow segments to share if only one is marked locked */ 35393212b535SSteve Capper unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; 35403212b535SSteve Capper unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED; 35413212b535SSteve Capper 35423212b535SSteve Capper /* 35433212b535SSteve Capper * match the virtual addresses, permission and the alignment of the 35443212b535SSteve Capper * page table page. 35453212b535SSteve Capper */ 35463212b535SSteve Capper if (pmd_index(addr) != pmd_index(saddr) || 35473212b535SSteve Capper vm_flags != svm_flags || 35483212b535SSteve Capper sbase < svma->vm_start || svma->vm_end < s_end) 35493212b535SSteve Capper return 0; 35503212b535SSteve Capper 35513212b535SSteve Capper return saddr; 35523212b535SSteve Capper } 35533212b535SSteve Capper 35543212b535SSteve Capper static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) 35553212b535SSteve Capper { 35563212b535SSteve Capper unsigned long base = addr & PUD_MASK; 35573212b535SSteve Capper unsigned long end = base + PUD_SIZE; 35583212b535SSteve Capper 35593212b535SSteve Capper /* 35603212b535SSteve Capper * check on proper vm_flags and page table alignment 35613212b535SSteve Capper */ 35623212b535SSteve Capper if (vma->vm_flags & VM_MAYSHARE && 35633212b535SSteve Capper vma->vm_start <= base && end <= vma->vm_end) 35643212b535SSteve Capper return 1; 35653212b535SSteve Capper return 0; 35663212b535SSteve Capper } 35673212b535SSteve Capper 35683212b535SSteve Capper /* 35693212b535SSteve Capper * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 35703212b535SSteve Capper * and returns the corresponding pte. While this is not necessary for the 35713212b535SSteve Capper * !shared pmd case because we can allocate the pmd later as well, it makes the 35723212b535SSteve Capper * code much cleaner. pmd allocation is essential for the shared case because 3573c8c06efaSDavidlohr Bueso * pud has to be populated inside the same i_mmap_rwsem section - otherwise 35743212b535SSteve Capper * racing tasks could either miss the sharing (see huge_pte_offset) or select a 35753212b535SSteve Capper * bad pmd for sharing. 35763212b535SSteve Capper */ 35773212b535SSteve Capper pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 35783212b535SSteve Capper { 35793212b535SSteve Capper struct vm_area_struct *vma = find_vma(mm, addr); 35803212b535SSteve Capper struct address_space *mapping = vma->vm_file->f_mapping; 35813212b535SSteve Capper pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 35823212b535SSteve Capper vma->vm_pgoff; 35833212b535SSteve Capper struct vm_area_struct *svma; 35843212b535SSteve Capper unsigned long saddr; 35853212b535SSteve Capper pte_t *spte = NULL; 35863212b535SSteve Capper pte_t *pte; 3587cb900f41SKirill A. Shutemov spinlock_t *ptl; 35883212b535SSteve Capper 35893212b535SSteve Capper if (!vma_shareable(vma, addr)) 35903212b535SSteve Capper return (pte_t *)pmd_alloc(mm, pud, addr); 35913212b535SSteve Capper 359283cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 35933212b535SSteve Capper vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 35943212b535SSteve Capper if (svma == vma) 35953212b535SSteve Capper continue; 35963212b535SSteve Capper 35973212b535SSteve Capper saddr = page_table_shareable(svma, vma, addr, idx); 35983212b535SSteve Capper if (saddr) { 35993212b535SSteve Capper spte = huge_pte_offset(svma->vm_mm, saddr); 36003212b535SSteve Capper if (spte) { 36013212b535SSteve Capper get_page(virt_to_page(spte)); 36023212b535SSteve Capper break; 36033212b535SSteve Capper } 36043212b535SSteve Capper } 36053212b535SSteve Capper } 36063212b535SSteve Capper 36073212b535SSteve Capper if (!spte) 36083212b535SSteve Capper goto out; 36093212b535SSteve Capper 3610cb900f41SKirill A. Shutemov ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); 3611cb900f41SKirill A. Shutemov spin_lock(ptl); 36123212b535SSteve Capper if (pud_none(*pud)) 36133212b535SSteve Capper pud_populate(mm, pud, 36143212b535SSteve Capper (pmd_t *)((unsigned long)spte & PAGE_MASK)); 36153212b535SSteve Capper else 36163212b535SSteve Capper put_page(virt_to_page(spte)); 3617cb900f41SKirill A. Shutemov spin_unlock(ptl); 36183212b535SSteve Capper out: 36193212b535SSteve Capper pte = (pte_t *)pmd_alloc(mm, pud, addr); 362083cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 36213212b535SSteve Capper return pte; 36223212b535SSteve Capper } 36233212b535SSteve Capper 36243212b535SSteve Capper /* 36253212b535SSteve Capper * unmap huge page backed by shared pte. 36263212b535SSteve Capper * 36273212b535SSteve Capper * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 36283212b535SSteve Capper * indicated by page_count > 1, unmap is achieved by clearing pud and 36293212b535SSteve Capper * decrementing the ref count. If count == 1, the pte page is not shared. 36303212b535SSteve Capper * 3631cb900f41SKirill A. Shutemov * called with page table lock held. 36323212b535SSteve Capper * 36333212b535SSteve Capper * returns: 1 successfully unmapped a shared pte page 36343212b535SSteve Capper * 0 the underlying pte page is not shared, or it is the last user 36353212b535SSteve Capper */ 36363212b535SSteve Capper int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 36373212b535SSteve Capper { 36383212b535SSteve Capper pgd_t *pgd = pgd_offset(mm, *addr); 36393212b535SSteve Capper pud_t *pud = pud_offset(pgd, *addr); 36403212b535SSteve Capper 36413212b535SSteve Capper BUG_ON(page_count(virt_to_page(ptep)) == 0); 36423212b535SSteve Capper if (page_count(virt_to_page(ptep)) == 1) 36433212b535SSteve Capper return 0; 36443212b535SSteve Capper 36453212b535SSteve Capper pud_clear(pud); 36463212b535SSteve Capper put_page(virt_to_page(ptep)); 36473212b535SSteve Capper *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 36483212b535SSteve Capper return 1; 36493212b535SSteve Capper } 36509e5fc74cSSteve Capper #define want_pmd_share() (1) 36519e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 36529e5fc74cSSteve Capper pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 36539e5fc74cSSteve Capper { 36549e5fc74cSSteve Capper return NULL; 36559e5fc74cSSteve Capper } 36569e5fc74cSSteve Capper #define want_pmd_share() (0) 36573212b535SSteve Capper #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 36583212b535SSteve Capper 36599e5fc74cSSteve Capper #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 36609e5fc74cSSteve Capper pte_t *huge_pte_alloc(struct mm_struct *mm, 36619e5fc74cSSteve Capper unsigned long addr, unsigned long sz) 36629e5fc74cSSteve Capper { 36639e5fc74cSSteve Capper pgd_t *pgd; 36649e5fc74cSSteve Capper pud_t *pud; 36659e5fc74cSSteve Capper pte_t *pte = NULL; 36669e5fc74cSSteve Capper 36679e5fc74cSSteve Capper pgd = pgd_offset(mm, addr); 36689e5fc74cSSteve Capper pud = pud_alloc(mm, pgd, addr); 36699e5fc74cSSteve Capper if (pud) { 36709e5fc74cSSteve Capper if (sz == PUD_SIZE) { 36719e5fc74cSSteve Capper pte = (pte_t *)pud; 36729e5fc74cSSteve Capper } else { 36739e5fc74cSSteve Capper BUG_ON(sz != PMD_SIZE); 36749e5fc74cSSteve Capper if (want_pmd_share() && pud_none(*pud)) 36759e5fc74cSSteve Capper pte = huge_pmd_share(mm, addr, pud); 36769e5fc74cSSteve Capper else 36779e5fc74cSSteve Capper pte = (pte_t *)pmd_alloc(mm, pud, addr); 36789e5fc74cSSteve Capper } 36799e5fc74cSSteve Capper } 36809e5fc74cSSteve Capper BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 36819e5fc74cSSteve Capper 36829e5fc74cSSteve Capper return pte; 36839e5fc74cSSteve Capper } 36849e5fc74cSSteve Capper 36859e5fc74cSSteve Capper pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 36869e5fc74cSSteve Capper { 36879e5fc74cSSteve Capper pgd_t *pgd; 36889e5fc74cSSteve Capper pud_t *pud; 36899e5fc74cSSteve Capper pmd_t *pmd = NULL; 36909e5fc74cSSteve Capper 36919e5fc74cSSteve Capper pgd = pgd_offset(mm, addr); 36929e5fc74cSSteve Capper if (pgd_present(*pgd)) { 36939e5fc74cSSteve Capper pud = pud_offset(pgd, addr); 36949e5fc74cSSteve Capper if (pud_present(*pud)) { 36959e5fc74cSSteve Capper if (pud_huge(*pud)) 36969e5fc74cSSteve Capper return (pte_t *)pud; 36979e5fc74cSSteve Capper pmd = pmd_offset(pud, addr); 36989e5fc74cSSteve Capper } 36999e5fc74cSSteve Capper } 37009e5fc74cSSteve Capper return (pte_t *) pmd; 37019e5fc74cSSteve Capper } 37029e5fc74cSSteve Capper 370361f77edaSNaoya Horiguchi #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 370461f77edaSNaoya Horiguchi 370561f77edaSNaoya Horiguchi /* 370661f77edaSNaoya Horiguchi * These functions are overwritable if your architecture needs its own 370761f77edaSNaoya Horiguchi * behavior. 370861f77edaSNaoya Horiguchi */ 370961f77edaSNaoya Horiguchi struct page * __weak 371061f77edaSNaoya Horiguchi follow_huge_addr(struct mm_struct *mm, unsigned long address, 371161f77edaSNaoya Horiguchi int write) 371261f77edaSNaoya Horiguchi { 371361f77edaSNaoya Horiguchi return ERR_PTR(-EINVAL); 371461f77edaSNaoya Horiguchi } 371561f77edaSNaoya Horiguchi 371661f77edaSNaoya Horiguchi struct page * __weak 37179e5fc74cSSteve Capper follow_huge_pmd(struct mm_struct *mm, unsigned long address, 3718e66f17ffSNaoya Horiguchi pmd_t *pmd, int flags) 37199e5fc74cSSteve Capper { 3720e66f17ffSNaoya Horiguchi struct page *page = NULL; 3721e66f17ffSNaoya Horiguchi spinlock_t *ptl; 3722e66f17ffSNaoya Horiguchi retry: 3723e66f17ffSNaoya Horiguchi ptl = pmd_lockptr(mm, pmd); 3724e66f17ffSNaoya Horiguchi spin_lock(ptl); 3725e66f17ffSNaoya Horiguchi /* 3726e66f17ffSNaoya Horiguchi * make sure that the address range covered by this pmd is not 3727e66f17ffSNaoya Horiguchi * unmapped from other threads. 3728e66f17ffSNaoya Horiguchi */ 3729e66f17ffSNaoya Horiguchi if (!pmd_huge(*pmd)) 3730e66f17ffSNaoya Horiguchi goto out; 3731e66f17ffSNaoya Horiguchi if (pmd_present(*pmd)) { 3732e66f17ffSNaoya Horiguchi page = pte_page(*(pte_t *)pmd) + 3733e66f17ffSNaoya Horiguchi ((address & ~PMD_MASK) >> PAGE_SHIFT); 3734e66f17ffSNaoya Horiguchi if (flags & FOLL_GET) 3735e66f17ffSNaoya Horiguchi get_page(page); 3736e66f17ffSNaoya Horiguchi } else { 3737e66f17ffSNaoya Horiguchi if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { 3738e66f17ffSNaoya Horiguchi spin_unlock(ptl); 3739e66f17ffSNaoya Horiguchi __migration_entry_wait(mm, (pte_t *)pmd, ptl); 3740e66f17ffSNaoya Horiguchi goto retry; 3741e66f17ffSNaoya Horiguchi } 3742e66f17ffSNaoya Horiguchi /* 3743e66f17ffSNaoya Horiguchi * hwpoisoned entry is treated as no_page_table in 3744e66f17ffSNaoya Horiguchi * follow_page_mask(). 3745e66f17ffSNaoya Horiguchi */ 3746e66f17ffSNaoya Horiguchi } 3747e66f17ffSNaoya Horiguchi out: 3748e66f17ffSNaoya Horiguchi spin_unlock(ptl); 37499e5fc74cSSteve Capper return page; 37509e5fc74cSSteve Capper } 37519e5fc74cSSteve Capper 375261f77edaSNaoya Horiguchi struct page * __weak 37539e5fc74cSSteve Capper follow_huge_pud(struct mm_struct *mm, unsigned long address, 3754e66f17ffSNaoya Horiguchi pud_t *pud, int flags) 37559e5fc74cSSteve Capper { 3756e66f17ffSNaoya Horiguchi if (flags & FOLL_GET) 3757e66f17ffSNaoya Horiguchi return NULL; 37589e5fc74cSSteve Capper 3759e66f17ffSNaoya Horiguchi return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); 37609e5fc74cSSteve Capper } 37619e5fc74cSSteve Capper 3762d5bd9106SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 3763d5bd9106SAndi Kleen 37646de2b1aaSNaoya Horiguchi /* Should be called in hugetlb_lock */ 37656de2b1aaSNaoya Horiguchi static int is_hugepage_on_freelist(struct page *hpage) 37666de2b1aaSNaoya Horiguchi { 37676de2b1aaSNaoya Horiguchi struct page *page; 37686de2b1aaSNaoya Horiguchi struct page *tmp; 37696de2b1aaSNaoya Horiguchi struct hstate *h = page_hstate(hpage); 37706de2b1aaSNaoya Horiguchi int nid = page_to_nid(hpage); 37716de2b1aaSNaoya Horiguchi 37726de2b1aaSNaoya Horiguchi list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru) 37736de2b1aaSNaoya Horiguchi if (page == hpage) 37746de2b1aaSNaoya Horiguchi return 1; 37756de2b1aaSNaoya Horiguchi return 0; 37766de2b1aaSNaoya Horiguchi } 37776de2b1aaSNaoya Horiguchi 377893f70f90SNaoya Horiguchi /* 377993f70f90SNaoya Horiguchi * This function is called from memory failure code. 378093f70f90SNaoya Horiguchi * Assume the caller holds page lock of the head page. 378193f70f90SNaoya Horiguchi */ 37826de2b1aaSNaoya Horiguchi int dequeue_hwpoisoned_huge_page(struct page *hpage) 378393f70f90SNaoya Horiguchi { 378493f70f90SNaoya Horiguchi struct hstate *h = page_hstate(hpage); 378593f70f90SNaoya Horiguchi int nid = page_to_nid(hpage); 37866de2b1aaSNaoya Horiguchi int ret = -EBUSY; 378793f70f90SNaoya Horiguchi 378893f70f90SNaoya Horiguchi spin_lock(&hugetlb_lock); 37896de2b1aaSNaoya Horiguchi if (is_hugepage_on_freelist(hpage)) { 379056f2fb14SNaoya Horiguchi /* 379156f2fb14SNaoya Horiguchi * Hwpoisoned hugepage isn't linked to activelist or freelist, 379256f2fb14SNaoya Horiguchi * but dangling hpage->lru can trigger list-debug warnings 379356f2fb14SNaoya Horiguchi * (this happens when we call unpoison_memory() on it), 379456f2fb14SNaoya Horiguchi * so let it point to itself with list_del_init(). 379556f2fb14SNaoya Horiguchi */ 379656f2fb14SNaoya Horiguchi list_del_init(&hpage->lru); 37978c6c2ecbSNaoya Horiguchi set_page_refcounted(hpage); 379893f70f90SNaoya Horiguchi h->free_huge_pages--; 379993f70f90SNaoya Horiguchi h->free_huge_pages_node[nid]--; 38006de2b1aaSNaoya Horiguchi ret = 0; 380193f70f90SNaoya Horiguchi } 38026de2b1aaSNaoya Horiguchi spin_unlock(&hugetlb_lock); 38036de2b1aaSNaoya Horiguchi return ret; 38046de2b1aaSNaoya Horiguchi } 38056de2b1aaSNaoya Horiguchi #endif 380631caf665SNaoya Horiguchi 380731caf665SNaoya Horiguchi bool isolate_huge_page(struct page *page, struct list_head *list) 380831caf665SNaoya Horiguchi { 3809309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 381031caf665SNaoya Horiguchi if (!get_page_unless_zero(page)) 381131caf665SNaoya Horiguchi return false; 381231caf665SNaoya Horiguchi spin_lock(&hugetlb_lock); 381331caf665SNaoya Horiguchi list_move_tail(&page->lru, list); 381431caf665SNaoya Horiguchi spin_unlock(&hugetlb_lock); 381531caf665SNaoya Horiguchi return true; 381631caf665SNaoya Horiguchi } 381731caf665SNaoya Horiguchi 381831caf665SNaoya Horiguchi void putback_active_hugepage(struct page *page) 381931caf665SNaoya Horiguchi { 3820309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 382131caf665SNaoya Horiguchi spin_lock(&hugetlb_lock); 382231caf665SNaoya Horiguchi list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 382331caf665SNaoya Horiguchi spin_unlock(&hugetlb_lock); 382431caf665SNaoya Horiguchi put_page(page); 382531caf665SNaoya Horiguchi } 3826c8721bbbSNaoya Horiguchi 3827c8721bbbSNaoya Horiguchi bool is_hugepage_active(struct page *page) 3828c8721bbbSNaoya Horiguchi { 3829309381feSSasha Levin VM_BUG_ON_PAGE(!PageHuge(page), page); 3830c8721bbbSNaoya Horiguchi /* 3831c8721bbbSNaoya Horiguchi * This function can be called for a tail page because the caller, 3832c8721bbbSNaoya Horiguchi * scan_movable_pages, scans through a given pfn-range which typically 3833c8721bbbSNaoya Horiguchi * covers one memory block. In systems using gigantic hugepage (1GB 3834c8721bbbSNaoya Horiguchi * for x86_64,) a hugepage is larger than a memory block, and we don't 3835c8721bbbSNaoya Horiguchi * support migrating such large hugepages for now, so return false 3836c8721bbbSNaoya Horiguchi * when called for tail pages. 3837c8721bbbSNaoya Horiguchi */ 3838c8721bbbSNaoya Horiguchi if (PageTail(page)) 3839c8721bbbSNaoya Horiguchi return false; 3840c8721bbbSNaoya Horiguchi /* 3841c8721bbbSNaoya Horiguchi * Refcount of a hwpoisoned hugepages is 1, but they are not active, 3842c8721bbbSNaoya Horiguchi * so we should return false for them. 3843c8721bbbSNaoya Horiguchi */ 3844c8721bbbSNaoya Horiguchi if (unlikely(PageHWPoison(page))) 3845c8721bbbSNaoya Horiguchi return false; 3846c8721bbbSNaoya Horiguchi return page_count(page) > 0; 3847c8721bbbSNaoya Horiguchi } 3848