11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 121da177e4SLinus Torvalds #include <linux/nodemask.h> 1363551ae0SDavid Gibson #include <linux/pagemap.h> 145da7ca86SChristoph Lameter #include <linux/mempolicy.h> 15aea47ff3SChristoph Lameter #include <linux/cpuset.h> 163935baa9SDavid Gibson #include <linux/mutex.h> 175da7ca86SChristoph Lameter 1863551ae0SDavid Gibson #include <asm/page.h> 1963551ae0SDavid Gibson #include <asm/pgtable.h> 2063551ae0SDavid Gibson 2163551ae0SDavid Gibson #include <linux/hugetlb.h> 227835e98bSNick Piggin #include "internal.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25a43a8c39SChen, Kenneth W static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; 261da177e4SLinus Torvalds unsigned long max_huge_pages; 271da177e4SLinus Torvalds static struct list_head hugepage_freelists[MAX_NUMNODES]; 281da177e4SLinus Torvalds static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 291da177e4SLinus Torvalds static unsigned int free_huge_pages_node[MAX_NUMNODES]; 303935baa9SDavid Gibson /* 313935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 323935baa9SDavid Gibson */ 333935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 340bd0f9fbSEric Paris 3579ac6ba4SDavid Gibson static void clear_huge_page(struct page *page, unsigned long addr) 3679ac6ba4SDavid Gibson { 3779ac6ba4SDavid Gibson int i; 3879ac6ba4SDavid Gibson 3979ac6ba4SDavid Gibson might_sleep(); 4079ac6ba4SDavid Gibson for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 4179ac6ba4SDavid Gibson cond_resched(); 4279ac6ba4SDavid Gibson clear_user_highpage(page + i, addr); 4379ac6ba4SDavid Gibson } 4479ac6ba4SDavid Gibson } 4579ac6ba4SDavid Gibson 4679ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 479de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 4879ac6ba4SDavid Gibson { 4979ac6ba4SDavid Gibson int i; 5079ac6ba4SDavid Gibson 5179ac6ba4SDavid Gibson might_sleep(); 5279ac6ba4SDavid Gibson for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 5379ac6ba4SDavid Gibson cond_resched(); 549de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 5579ac6ba4SDavid Gibson } 5679ac6ba4SDavid Gibson } 5779ac6ba4SDavid Gibson 581da177e4SLinus Torvalds static void enqueue_huge_page(struct page *page) 591da177e4SLinus Torvalds { 601da177e4SLinus Torvalds int nid = page_to_nid(page); 611da177e4SLinus Torvalds list_add(&page->lru, &hugepage_freelists[nid]); 621da177e4SLinus Torvalds free_huge_pages++; 631da177e4SLinus Torvalds free_huge_pages_node[nid]++; 641da177e4SLinus Torvalds } 651da177e4SLinus Torvalds 665da7ca86SChristoph Lameter static struct page *dequeue_huge_page(struct vm_area_struct *vma, 675da7ca86SChristoph Lameter unsigned long address) 681da177e4SLinus Torvalds { 6931a5c6e4SNishanth Aravamudan int nid; 701da177e4SLinus Torvalds struct page *page = NULL; 715da7ca86SChristoph Lameter struct zonelist *zonelist = huge_zonelist(vma, address); 7296df9333SChristoph Lameter struct zone **z; 731da177e4SLinus Torvalds 7496df9333SChristoph Lameter for (z = zonelist->zones; *z; z++) { 7589fa3024SChristoph Lameter nid = zone_to_nid(*z); 7602a0e53dSPaul Jackson if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) && 77aea47ff3SChristoph Lameter !list_empty(&hugepage_freelists[nid])) 781da177e4SLinus Torvalds break; 791da177e4SLinus Torvalds } 8096df9333SChristoph Lameter 8196df9333SChristoph Lameter if (*z) { 821da177e4SLinus Torvalds page = list_entry(hugepage_freelists[nid].next, 831da177e4SLinus Torvalds struct page, lru); 841da177e4SLinus Torvalds list_del(&page->lru); 851da177e4SLinus Torvalds free_huge_pages--; 861da177e4SLinus Torvalds free_huge_pages_node[nid]--; 871da177e4SLinus Torvalds } 881da177e4SLinus Torvalds return page; 891da177e4SLinus Torvalds } 901da177e4SLinus Torvalds 9127a85ef1SDavid Gibson static void free_huge_page(struct page *page) 9227a85ef1SDavid Gibson { 9327a85ef1SDavid Gibson BUG_ON(page_count(page)); 9427a85ef1SDavid Gibson 9527a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 9627a85ef1SDavid Gibson 9727a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 9827a85ef1SDavid Gibson enqueue_huge_page(page); 9927a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 10027a85ef1SDavid Gibson } 10127a85ef1SDavid Gibson 102a482289dSNick Piggin static int alloc_fresh_huge_page(void) 1031da177e4SLinus Torvalds { 104f96efd58SJoe Jin static int prev_nid; 1051da177e4SLinus Torvalds struct page *page; 106f96efd58SJoe Jin static DEFINE_SPINLOCK(nid_lock); 107f96efd58SJoe Jin int nid; 108f96efd58SJoe Jin 109f96efd58SJoe Jin spin_lock(&nid_lock); 110f96efd58SJoe Jin nid = next_node(prev_nid, node_online_map); 111fdb7cc59SPaul Jackson if (nid == MAX_NUMNODES) 112fdb7cc59SPaul Jackson nid = first_node(node_online_map); 113f96efd58SJoe Jin prev_nid = nid; 114f96efd58SJoe Jin spin_unlock(&nid_lock); 115f96efd58SJoe Jin 116f96efd58SJoe Jin page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, 117f96efd58SJoe Jin HUGETLB_PAGE_ORDER); 1181da177e4SLinus Torvalds if (page) { 11933f2ef89SAndy Whitcroft set_compound_page_dtor(page, free_huge_page); 1200bd0f9fbSEric Paris spin_lock(&hugetlb_lock); 1211da177e4SLinus Torvalds nr_huge_pages++; 1221da177e4SLinus Torvalds nr_huge_pages_node[page_to_nid(page)]++; 1230bd0f9fbSEric Paris spin_unlock(&hugetlb_lock); 124a482289dSNick Piggin put_page(page); /* free it into the hugepage allocator */ 125a482289dSNick Piggin return 1; 1261da177e4SLinus Torvalds } 127a482289dSNick Piggin return 0; 1281da177e4SLinus Torvalds } 1291da177e4SLinus Torvalds 13027a85ef1SDavid Gibson static struct page *alloc_huge_page(struct vm_area_struct *vma, 13127a85ef1SDavid Gibson unsigned long addr) 1321da177e4SLinus Torvalds { 1331da177e4SLinus Torvalds struct page *page; 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 136a43a8c39SChen, Kenneth W if (vma->vm_flags & VM_MAYSHARE) 137a43a8c39SChen, Kenneth W resv_huge_pages--; 138a43a8c39SChen, Kenneth W else if (free_huge_pages <= resv_huge_pages) 139b45b5bd6SDavid Gibson goto fail; 140b45b5bd6SDavid Gibson 141b45b5bd6SDavid Gibson page = dequeue_huge_page(vma, addr); 142b45b5bd6SDavid Gibson if (!page) 143b45b5bd6SDavid Gibson goto fail; 144b45b5bd6SDavid Gibson 1451da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 1467835e98bSNick Piggin set_page_refcounted(page); 1471da177e4SLinus Torvalds return page; 148b45b5bd6SDavid Gibson 149b45b5bd6SDavid Gibson fail: 150ace4bd29SKen Chen if (vma->vm_flags & VM_MAYSHARE) 151ace4bd29SKen Chen resv_huge_pages++; 152b45b5bd6SDavid Gibson spin_unlock(&hugetlb_lock); 153b45b5bd6SDavid Gibson return NULL; 154b45b5bd6SDavid Gibson } 155b45b5bd6SDavid Gibson 1561da177e4SLinus Torvalds static int __init hugetlb_init(void) 1571da177e4SLinus Torvalds { 1581da177e4SLinus Torvalds unsigned long i; 1591da177e4SLinus Torvalds 1603c726f8dSBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 1613c726f8dSBenjamin Herrenschmidt return 0; 1623c726f8dSBenjamin Herrenschmidt 1631da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) 1641da177e4SLinus Torvalds INIT_LIST_HEAD(&hugepage_freelists[i]); 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds for (i = 0; i < max_huge_pages; ++i) { 167a482289dSNick Piggin if (!alloc_fresh_huge_page()) 1681da177e4SLinus Torvalds break; 1691da177e4SLinus Torvalds } 1701da177e4SLinus Torvalds max_huge_pages = free_huge_pages = nr_huge_pages = i; 1711da177e4SLinus Torvalds printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); 1721da177e4SLinus Torvalds return 0; 1731da177e4SLinus Torvalds } 1741da177e4SLinus Torvalds module_init(hugetlb_init); 1751da177e4SLinus Torvalds 1761da177e4SLinus Torvalds static int __init hugetlb_setup(char *s) 1771da177e4SLinus Torvalds { 1781da177e4SLinus Torvalds if (sscanf(s, "%lu", &max_huge_pages) <= 0) 1791da177e4SLinus Torvalds max_huge_pages = 0; 1801da177e4SLinus Torvalds return 1; 1811da177e4SLinus Torvalds } 1821da177e4SLinus Torvalds __setup("hugepages=", hugetlb_setup); 1831da177e4SLinus Torvalds 1848a630112SKen Chen static unsigned int cpuset_mems_nr(unsigned int *array) 1858a630112SKen Chen { 1868a630112SKen Chen int node; 1878a630112SKen Chen unsigned int nr = 0; 1888a630112SKen Chen 1898a630112SKen Chen for_each_node_mask(node, cpuset_current_mems_allowed) 1908a630112SKen Chen nr += array[node]; 1918a630112SKen Chen 1928a630112SKen Chen return nr; 1938a630112SKen Chen } 1948a630112SKen Chen 1951da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL 1961da177e4SLinus Torvalds static void update_and_free_page(struct page *page) 1971da177e4SLinus Torvalds { 1981da177e4SLinus Torvalds int i; 1991da177e4SLinus Torvalds nr_huge_pages--; 2004415cc8dSChristoph Lameter nr_huge_pages_node[page_to_nid(page)]--; 2011da177e4SLinus Torvalds for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 2021da177e4SLinus Torvalds page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 2031da177e4SLinus Torvalds 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 2041da177e4SLinus Torvalds 1 << PG_private | 1<< PG_writeback); 2051da177e4SLinus Torvalds } 206a482289dSNick Piggin page[1].lru.next = NULL; 2077835e98bSNick Piggin set_page_refcounted(page); 2081da177e4SLinus Torvalds __free_pages(page, HUGETLB_PAGE_ORDER); 2091da177e4SLinus Torvalds } 2101da177e4SLinus Torvalds 2111da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 2121da177e4SLinus Torvalds static void try_to_free_low(unsigned long count) 2131da177e4SLinus Torvalds { 2144415cc8dSChristoph Lameter int i; 2154415cc8dSChristoph Lameter 2161da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 2171da177e4SLinus Torvalds struct page *page, *next; 2181da177e4SLinus Torvalds list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 2191da177e4SLinus Torvalds if (PageHighMem(page)) 2201da177e4SLinus Torvalds continue; 2211da177e4SLinus Torvalds list_del(&page->lru); 2221da177e4SLinus Torvalds update_and_free_page(page); 2231da177e4SLinus Torvalds free_huge_pages--; 2244415cc8dSChristoph Lameter free_huge_pages_node[page_to_nid(page)]--; 2251da177e4SLinus Torvalds if (count >= nr_huge_pages) 2261da177e4SLinus Torvalds return; 2271da177e4SLinus Torvalds } 2281da177e4SLinus Torvalds } 2291da177e4SLinus Torvalds } 2301da177e4SLinus Torvalds #else 2311da177e4SLinus Torvalds static inline void try_to_free_low(unsigned long count) 2321da177e4SLinus Torvalds { 2331da177e4SLinus Torvalds } 2341da177e4SLinus Torvalds #endif 2351da177e4SLinus Torvalds 2361da177e4SLinus Torvalds static unsigned long set_max_huge_pages(unsigned long count) 2371da177e4SLinus Torvalds { 2381da177e4SLinus Torvalds while (count > nr_huge_pages) { 239a482289dSNick Piggin if (!alloc_fresh_huge_page()) 2401da177e4SLinus Torvalds return nr_huge_pages; 2411da177e4SLinus Torvalds } 2421da177e4SLinus Torvalds if (count >= nr_huge_pages) 2431da177e4SLinus Torvalds return nr_huge_pages; 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 246a43a8c39SChen, Kenneth W count = max(count, resv_huge_pages); 2471da177e4SLinus Torvalds try_to_free_low(count); 2481da177e4SLinus Torvalds while (count < nr_huge_pages) { 2495da7ca86SChristoph Lameter struct page *page = dequeue_huge_page(NULL, 0); 2501da177e4SLinus Torvalds if (!page) 2511da177e4SLinus Torvalds break; 2521da177e4SLinus Torvalds update_and_free_page(page); 2531da177e4SLinus Torvalds } 2541da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 2551da177e4SLinus Torvalds return nr_huge_pages; 2561da177e4SLinus Torvalds } 2571da177e4SLinus Torvalds 2581da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 2591da177e4SLinus Torvalds struct file *file, void __user *buffer, 2601da177e4SLinus Torvalds size_t *length, loff_t *ppos) 2611da177e4SLinus Torvalds { 2621da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 2631da177e4SLinus Torvalds max_huge_pages = set_max_huge_pages(max_huge_pages); 2641da177e4SLinus Torvalds return 0; 2651da177e4SLinus Torvalds } 2661da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 2671da177e4SLinus Torvalds 2681da177e4SLinus Torvalds int hugetlb_report_meminfo(char *buf) 2691da177e4SLinus Torvalds { 2701da177e4SLinus Torvalds return sprintf(buf, 2711da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 2721da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 273b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 2741da177e4SLinus Torvalds "Hugepagesize: %5lu kB\n", 2751da177e4SLinus Torvalds nr_huge_pages, 2761da177e4SLinus Torvalds free_huge_pages, 277a43a8c39SChen, Kenneth W resv_huge_pages, 2781da177e4SLinus Torvalds HPAGE_SIZE/1024); 2791da177e4SLinus Torvalds } 2801da177e4SLinus Torvalds 2811da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 2821da177e4SLinus Torvalds { 2831da177e4SLinus Torvalds return sprintf(buf, 2841da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 2851da177e4SLinus Torvalds "Node %d HugePages_Free: %5u\n", 2861da177e4SLinus Torvalds nid, nr_huge_pages_node[nid], 2871da177e4SLinus Torvalds nid, free_huge_pages_node[nid]); 2881da177e4SLinus Torvalds } 2891da177e4SLinus Torvalds 2901da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 2911da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 2921da177e4SLinus Torvalds { 2931da177e4SLinus Torvalds return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 2941da177e4SLinus Torvalds } 2951da177e4SLinus Torvalds 2961da177e4SLinus Torvalds /* 2971da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 2981da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 2991da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 3001da177e4SLinus Torvalds * this far. 3011da177e4SLinus Torvalds */ 3021da177e4SLinus Torvalds static struct page *hugetlb_nopage(struct vm_area_struct *vma, 3031da177e4SLinus Torvalds unsigned long address, int *unused) 3041da177e4SLinus Torvalds { 3051da177e4SLinus Torvalds BUG(); 3061da177e4SLinus Torvalds return NULL; 3071da177e4SLinus Torvalds } 3081da177e4SLinus Torvalds 3091da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 3101da177e4SLinus Torvalds .nopage = hugetlb_nopage, 3111da177e4SLinus Torvalds }; 3121da177e4SLinus Torvalds 3131e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 3141e8f889bSDavid Gibson int writable) 31563551ae0SDavid Gibson { 31663551ae0SDavid Gibson pte_t entry; 31763551ae0SDavid Gibson 3181e8f889bSDavid Gibson if (writable) { 31963551ae0SDavid Gibson entry = 32063551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 32163551ae0SDavid Gibson } else { 32263551ae0SDavid Gibson entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 32363551ae0SDavid Gibson } 32463551ae0SDavid Gibson entry = pte_mkyoung(entry); 32563551ae0SDavid Gibson entry = pte_mkhuge(entry); 32663551ae0SDavid Gibson 32763551ae0SDavid Gibson return entry; 32863551ae0SDavid Gibson } 32963551ae0SDavid Gibson 3301e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 3311e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 3321e8f889bSDavid Gibson { 3331e8f889bSDavid Gibson pte_t entry; 3341e8f889bSDavid Gibson 3351e8f889bSDavid Gibson entry = pte_mkwrite(pte_mkdirty(*ptep)); 3368dab5241SBenjamin Herrenschmidt if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { 3371e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 3381e8f889bSDavid Gibson lazy_mmu_prot_update(entry); 3391e8f889bSDavid Gibson } 3408dab5241SBenjamin Herrenschmidt } 3411e8f889bSDavid Gibson 3421e8f889bSDavid Gibson 34363551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 34463551ae0SDavid Gibson struct vm_area_struct *vma) 34563551ae0SDavid Gibson { 34663551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 34763551ae0SDavid Gibson struct page *ptepage; 3481c59827dSHugh Dickins unsigned long addr; 3491e8f889bSDavid Gibson int cow; 3501e8f889bSDavid Gibson 3511e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 35263551ae0SDavid Gibson 3531c59827dSHugh Dickins for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 354c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 355c74df32cSHugh Dickins if (!src_pte) 356c74df32cSHugh Dickins continue; 35763551ae0SDavid Gibson dst_pte = huge_pte_alloc(dst, addr); 35863551ae0SDavid Gibson if (!dst_pte) 35963551ae0SDavid Gibson goto nomem; 360c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 3611c59827dSHugh Dickins spin_lock(&src->page_table_lock); 362c74df32cSHugh Dickins if (!pte_none(*src_pte)) { 3631e8f889bSDavid Gibson if (cow) 3641e8f889bSDavid Gibson ptep_set_wrprotect(src, addr, src_pte); 36563551ae0SDavid Gibson entry = *src_pte; 36663551ae0SDavid Gibson ptepage = pte_page(entry); 36763551ae0SDavid Gibson get_page(ptepage); 36863551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 3691c59827dSHugh Dickins } 3701c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 371c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 37263551ae0SDavid Gibson } 37363551ae0SDavid Gibson return 0; 37463551ae0SDavid Gibson 37563551ae0SDavid Gibson nomem: 37663551ae0SDavid Gibson return -ENOMEM; 37763551ae0SDavid Gibson } 37863551ae0SDavid Gibson 379502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 38063551ae0SDavid Gibson unsigned long end) 38163551ae0SDavid Gibson { 38263551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 38363551ae0SDavid Gibson unsigned long address; 384c7546f8fSDavid Gibson pte_t *ptep; 38563551ae0SDavid Gibson pte_t pte; 38663551ae0SDavid Gibson struct page *page; 387fe1668aeSChen, Kenneth W struct page *tmp; 388c0a499c2SChen, Kenneth W /* 389c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 390c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 391c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 392c0a499c2SChen, Kenneth W */ 393fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 39463551ae0SDavid Gibson 39563551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 39663551ae0SDavid Gibson BUG_ON(start & ~HPAGE_MASK); 39763551ae0SDavid Gibson BUG_ON(end & ~HPAGE_MASK); 39863551ae0SDavid Gibson 399508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 40063551ae0SDavid Gibson for (address = start; address < end; address += HPAGE_SIZE) { 401c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 402c7546f8fSDavid Gibson if (!ptep) 403c7546f8fSDavid Gibson continue; 404c7546f8fSDavid Gibson 40539dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 40639dde65cSChen, Kenneth W continue; 40739dde65cSChen, Kenneth W 408c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 40963551ae0SDavid Gibson if (pte_none(pte)) 41063551ae0SDavid Gibson continue; 411c7546f8fSDavid Gibson 41263551ae0SDavid Gibson page = pte_page(pte); 4136649a386SKen Chen if (pte_dirty(pte)) 4146649a386SKen Chen set_page_dirty(page); 415fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 41663551ae0SDavid Gibson } 4171da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 418508034a3SHugh Dickins flush_tlb_range(vma, start, end); 419fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 420fe1668aeSChen, Kenneth W list_del(&page->lru); 421fe1668aeSChen, Kenneth W put_page(page); 422fe1668aeSChen, Kenneth W } 4231da177e4SLinus Torvalds } 42463551ae0SDavid Gibson 425502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 426502717f4SChen, Kenneth W unsigned long end) 427502717f4SChen, Kenneth W { 428502717f4SChen, Kenneth W /* 429502717f4SChen, Kenneth W * It is undesirable to test vma->vm_file as it should be non-null 430502717f4SChen, Kenneth W * for valid hugetlb area. However, vm_file will be NULL in the error 431502717f4SChen, Kenneth W * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, 432502717f4SChen, Kenneth W * do_mmap_pgoff() nullifies vma->vm_file before calling this function 433502717f4SChen, Kenneth W * to clean up. Since no pte has actually been setup, it is safe to 434502717f4SChen, Kenneth W * do nothing in this case. 435502717f4SChen, Kenneth W */ 436502717f4SChen, Kenneth W if (vma->vm_file) { 437502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 438502717f4SChen, Kenneth W __unmap_hugepage_range(vma, start, end); 439502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 440502717f4SChen, Kenneth W } 441502717f4SChen, Kenneth W } 442502717f4SChen, Kenneth W 4431e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 4441e8f889bSDavid Gibson unsigned long address, pte_t *ptep, pte_t pte) 4451e8f889bSDavid Gibson { 4461e8f889bSDavid Gibson struct page *old_page, *new_page; 44779ac6ba4SDavid Gibson int avoidcopy; 4481e8f889bSDavid Gibson 4491e8f889bSDavid Gibson old_page = pte_page(pte); 4501e8f889bSDavid Gibson 4511e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 4521e8f889bSDavid Gibson * and just make the page writable */ 4531e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 4541e8f889bSDavid Gibson if (avoidcopy) { 4551e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 4561e8f889bSDavid Gibson return VM_FAULT_MINOR; 4571e8f889bSDavid Gibson } 4581e8f889bSDavid Gibson 4591e8f889bSDavid Gibson page_cache_get(old_page); 4605da7ca86SChristoph Lameter new_page = alloc_huge_page(vma, address); 4611e8f889bSDavid Gibson 4621e8f889bSDavid Gibson if (!new_page) { 4631e8f889bSDavid Gibson page_cache_release(old_page); 4640df420d8SChristoph Lameter return VM_FAULT_OOM; 4651e8f889bSDavid Gibson } 4661e8f889bSDavid Gibson 4671e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 4689de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 4691e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 4701e8f889bSDavid Gibson 4711e8f889bSDavid Gibson ptep = huge_pte_offset(mm, address & HPAGE_MASK); 4721e8f889bSDavid Gibson if (likely(pte_same(*ptep, pte))) { 4731e8f889bSDavid Gibson /* Break COW */ 4741e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 4751e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 4761e8f889bSDavid Gibson /* Make the old page be freed below */ 4771e8f889bSDavid Gibson new_page = old_page; 4781e8f889bSDavid Gibson } 4791e8f889bSDavid Gibson page_cache_release(new_page); 4801e8f889bSDavid Gibson page_cache_release(old_page); 4811e8f889bSDavid Gibson return VM_FAULT_MINOR; 4821e8f889bSDavid Gibson } 4831e8f889bSDavid Gibson 48486e5216fSAdam Litke int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 4851e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 486ac9b9c66SHugh Dickins { 487ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 4884c887265SAdam Litke unsigned long idx; 4894c887265SAdam Litke unsigned long size; 4904c887265SAdam Litke struct page *page; 4914c887265SAdam Litke struct address_space *mapping; 4921e8f889bSDavid Gibson pte_t new_pte; 4934c887265SAdam Litke 4944c887265SAdam Litke mapping = vma->vm_file->f_mapping; 4954c887265SAdam Litke idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 4964c887265SAdam Litke + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 4974c887265SAdam Litke 4984c887265SAdam Litke /* 4994c887265SAdam Litke * Use page lock to guard against racing truncation 5004c887265SAdam Litke * before we get page_table_lock. 5014c887265SAdam Litke */ 5026bda666aSChristoph Lameter retry: 5036bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 5046bda666aSChristoph Lameter if (!page) { 505ebed4bfcSHugh Dickins size = i_size_read(mapping->host) >> HPAGE_SHIFT; 506ebed4bfcSHugh Dickins if (idx >= size) 507ebed4bfcSHugh Dickins goto out; 5086bda666aSChristoph Lameter if (hugetlb_get_quota(mapping)) 5094c887265SAdam Litke goto out; 5106bda666aSChristoph Lameter page = alloc_huge_page(vma, address); 5116bda666aSChristoph Lameter if (!page) { 5126bda666aSChristoph Lameter hugetlb_put_quota(mapping); 5130df420d8SChristoph Lameter ret = VM_FAULT_OOM; 5146bda666aSChristoph Lameter goto out; 5156bda666aSChristoph Lameter } 51679ac6ba4SDavid Gibson clear_huge_page(page, address); 517ac9b9c66SHugh Dickins 5186bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 5196bda666aSChristoph Lameter int err; 5206bda666aSChristoph Lameter 5216bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 5226bda666aSChristoph Lameter if (err) { 5236bda666aSChristoph Lameter put_page(page); 5246bda666aSChristoph Lameter hugetlb_put_quota(mapping); 5256bda666aSChristoph Lameter if (err == -EEXIST) 5266bda666aSChristoph Lameter goto retry; 5276bda666aSChristoph Lameter goto out; 5286bda666aSChristoph Lameter } 5296bda666aSChristoph Lameter } else 5306bda666aSChristoph Lameter lock_page(page); 5316bda666aSChristoph Lameter } 5321e8f889bSDavid Gibson 533ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 5344c887265SAdam Litke size = i_size_read(mapping->host) >> HPAGE_SHIFT; 5354c887265SAdam Litke if (idx >= size) 5364c887265SAdam Litke goto backout; 5374c887265SAdam Litke 538ac9b9c66SHugh Dickins ret = VM_FAULT_MINOR; 53986e5216fSAdam Litke if (!pte_none(*ptep)) 5404c887265SAdam Litke goto backout; 5414c887265SAdam Litke 5421e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 5431e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 5441e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 5451e8f889bSDavid Gibson 5461e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 5471e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 5481e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, new_pte); 5491e8f889bSDavid Gibson } 5501e8f889bSDavid Gibson 551ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 5524c887265SAdam Litke unlock_page(page); 5534c887265SAdam Litke out: 554ac9b9c66SHugh Dickins return ret; 5554c887265SAdam Litke 5564c887265SAdam Litke backout: 5574c887265SAdam Litke spin_unlock(&mm->page_table_lock); 5584c887265SAdam Litke hugetlb_put_quota(mapping); 5594c887265SAdam Litke unlock_page(page); 5604c887265SAdam Litke put_page(page); 5614c887265SAdam Litke goto out; 562ac9b9c66SHugh Dickins } 563ac9b9c66SHugh Dickins 56486e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 56586e5216fSAdam Litke unsigned long address, int write_access) 56686e5216fSAdam Litke { 56786e5216fSAdam Litke pte_t *ptep; 56886e5216fSAdam Litke pte_t entry; 5691e8f889bSDavid Gibson int ret; 5703935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 57186e5216fSAdam Litke 57286e5216fSAdam Litke ptep = huge_pte_alloc(mm, address); 57386e5216fSAdam Litke if (!ptep) 57486e5216fSAdam Litke return VM_FAULT_OOM; 57586e5216fSAdam Litke 5763935baa9SDavid Gibson /* 5773935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 5783935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 5793935baa9SDavid Gibson * the same page in the page cache. 5803935baa9SDavid Gibson */ 5813935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 58286e5216fSAdam Litke entry = *ptep; 5833935baa9SDavid Gibson if (pte_none(entry)) { 5843935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 5853935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 5863935baa9SDavid Gibson return ret; 5873935baa9SDavid Gibson } 58886e5216fSAdam Litke 5891e8f889bSDavid Gibson ret = VM_FAULT_MINOR; 5901e8f889bSDavid Gibson 5911e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 5921e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 5931e8f889bSDavid Gibson if (likely(pte_same(entry, *ptep))) 5941e8f889bSDavid Gibson if (write_access && !pte_write(entry)) 5951e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, entry); 5961e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 5973935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 5981e8f889bSDavid Gibson 5991e8f889bSDavid Gibson return ret; 60086e5216fSAdam Litke } 60186e5216fSAdam Litke 60263551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 60363551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 60463551ae0SDavid Gibson unsigned long *position, int *length, int i) 60563551ae0SDavid Gibson { 606d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 607d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 60863551ae0SDavid Gibson int remainder = *length; 60963551ae0SDavid Gibson 6101c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 61163551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 61263551ae0SDavid Gibson pte_t *pte; 61363551ae0SDavid Gibson struct page *page; 61463551ae0SDavid Gibson 6154c887265SAdam Litke /* 6164c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 6174c887265SAdam Litke * each hugepage. We have to make * sure we get the 6184c887265SAdam Litke * first, for the page indexing below to work. 6194c887265SAdam Litke */ 62063551ae0SDavid Gibson pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 62163551ae0SDavid Gibson 6221c59827dSHugh Dickins if (!pte || pte_none(*pte)) { 6234c887265SAdam Litke int ret; 6244c887265SAdam Litke 6254c887265SAdam Litke spin_unlock(&mm->page_table_lock); 6264c887265SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, 0); 6274c887265SAdam Litke spin_lock(&mm->page_table_lock); 6284c887265SAdam Litke if (ret == VM_FAULT_MINOR) 6294c887265SAdam Litke continue; 6304c887265SAdam Litke 6311c59827dSHugh Dickins remainder = 0; 6321c59827dSHugh Dickins if (!i) 6331c59827dSHugh Dickins i = -EFAULT; 6341c59827dSHugh Dickins break; 6351c59827dSHugh Dickins } 63663551ae0SDavid Gibson 637d5d4b0aaSChen, Kenneth W pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 638d5d4b0aaSChen, Kenneth W page = pte_page(*pte); 639d5d4b0aaSChen, Kenneth W same_page: 640d6692183SChen, Kenneth W if (pages) { 64163551ae0SDavid Gibson get_page(page); 642d5d4b0aaSChen, Kenneth W pages[i] = page + pfn_offset; 643d6692183SChen, Kenneth W } 64463551ae0SDavid Gibson 64563551ae0SDavid Gibson if (vmas) 64663551ae0SDavid Gibson vmas[i] = vma; 64763551ae0SDavid Gibson 64863551ae0SDavid Gibson vaddr += PAGE_SIZE; 649d5d4b0aaSChen, Kenneth W ++pfn_offset; 65063551ae0SDavid Gibson --remainder; 65163551ae0SDavid Gibson ++i; 652d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 653d5d4b0aaSChen, Kenneth W pfn_offset < HPAGE_SIZE/PAGE_SIZE) { 654d5d4b0aaSChen, Kenneth W /* 655d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 656d5d4b0aaSChen, Kenneth W * of this compound page. 657d5d4b0aaSChen, Kenneth W */ 658d5d4b0aaSChen, Kenneth W goto same_page; 659d5d4b0aaSChen, Kenneth W } 66063551ae0SDavid Gibson } 6611c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 66263551ae0SDavid Gibson *length = remainder; 66363551ae0SDavid Gibson *position = vaddr; 66463551ae0SDavid Gibson 66563551ae0SDavid Gibson return i; 66663551ae0SDavid Gibson } 6678f860591SZhang, Yanmin 6688f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 6698f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 6708f860591SZhang, Yanmin { 6718f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 6728f860591SZhang, Yanmin unsigned long start = address; 6738f860591SZhang, Yanmin pte_t *ptep; 6748f860591SZhang, Yanmin pte_t pte; 6758f860591SZhang, Yanmin 6768f860591SZhang, Yanmin BUG_ON(address >= end); 6778f860591SZhang, Yanmin flush_cache_range(vma, address, end); 6788f860591SZhang, Yanmin 67939dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 6808f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 6818f860591SZhang, Yanmin for (; address < end; address += HPAGE_SIZE) { 6828f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 6838f860591SZhang, Yanmin if (!ptep) 6848f860591SZhang, Yanmin continue; 68539dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 68639dde65cSChen, Kenneth W continue; 6878f860591SZhang, Yanmin if (!pte_none(*ptep)) { 6888f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 6898f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 6908f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 6918f860591SZhang, Yanmin lazy_mmu_prot_update(pte); 6928f860591SZhang, Yanmin } 6938f860591SZhang, Yanmin } 6948f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 69539dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 6968f860591SZhang, Yanmin 6978f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 6988f860591SZhang, Yanmin } 6998f860591SZhang, Yanmin 700a43a8c39SChen, Kenneth W struct file_region { 701a43a8c39SChen, Kenneth W struct list_head link; 702a43a8c39SChen, Kenneth W long from; 703a43a8c39SChen, Kenneth W long to; 704a43a8c39SChen, Kenneth W }; 705a43a8c39SChen, Kenneth W 706a43a8c39SChen, Kenneth W static long region_add(struct list_head *head, long f, long t) 707a43a8c39SChen, Kenneth W { 708a43a8c39SChen, Kenneth W struct file_region *rg, *nrg, *trg; 709a43a8c39SChen, Kenneth W 710a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 711a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 712a43a8c39SChen, Kenneth W if (f <= rg->to) 713a43a8c39SChen, Kenneth W break; 714a43a8c39SChen, Kenneth W 715a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 716a43a8c39SChen, Kenneth W if (f > rg->from) 717a43a8c39SChen, Kenneth W f = rg->from; 718a43a8c39SChen, Kenneth W 719a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 720a43a8c39SChen, Kenneth W nrg = rg; 721a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 722a43a8c39SChen, Kenneth W if (&rg->link == head) 723a43a8c39SChen, Kenneth W break; 724a43a8c39SChen, Kenneth W if (rg->from > t) 725a43a8c39SChen, Kenneth W break; 726a43a8c39SChen, Kenneth W 727a43a8c39SChen, Kenneth W /* If this area reaches higher then extend our area to 728a43a8c39SChen, Kenneth W * include it completely. If this is not the first area 729a43a8c39SChen, Kenneth W * which we intend to reuse, free it. */ 730a43a8c39SChen, Kenneth W if (rg->to > t) 731a43a8c39SChen, Kenneth W t = rg->to; 732a43a8c39SChen, Kenneth W if (rg != nrg) { 733a43a8c39SChen, Kenneth W list_del(&rg->link); 734a43a8c39SChen, Kenneth W kfree(rg); 735a43a8c39SChen, Kenneth W } 736a43a8c39SChen, Kenneth W } 737a43a8c39SChen, Kenneth W nrg->from = f; 738a43a8c39SChen, Kenneth W nrg->to = t; 739a43a8c39SChen, Kenneth W return 0; 740a43a8c39SChen, Kenneth W } 741a43a8c39SChen, Kenneth W 742a43a8c39SChen, Kenneth W static long region_chg(struct list_head *head, long f, long t) 743a43a8c39SChen, Kenneth W { 744a43a8c39SChen, Kenneth W struct file_region *rg, *nrg; 745a43a8c39SChen, Kenneth W long chg = 0; 746a43a8c39SChen, Kenneth W 747a43a8c39SChen, Kenneth W /* Locate the region we are before or in. */ 748a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 749a43a8c39SChen, Kenneth W if (f <= rg->to) 750a43a8c39SChen, Kenneth W break; 751a43a8c39SChen, Kenneth W 752a43a8c39SChen, Kenneth W /* If we are below the current region then a new region is required. 753a43a8c39SChen, Kenneth W * Subtle, allocate a new region at the position but make it zero 754a43a8c39SChen, Kenneth W * size such that we can guarentee to record the reservation. */ 755a43a8c39SChen, Kenneth W if (&rg->link == head || t < rg->from) { 756a43a8c39SChen, Kenneth W nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 757a43a8c39SChen, Kenneth W if (nrg == 0) 758a43a8c39SChen, Kenneth W return -ENOMEM; 759a43a8c39SChen, Kenneth W nrg->from = f; 760a43a8c39SChen, Kenneth W nrg->to = f; 761a43a8c39SChen, Kenneth W INIT_LIST_HEAD(&nrg->link); 762a43a8c39SChen, Kenneth W list_add(&nrg->link, rg->link.prev); 763a43a8c39SChen, Kenneth W 764a43a8c39SChen, Kenneth W return t - f; 765a43a8c39SChen, Kenneth W } 766a43a8c39SChen, Kenneth W 767a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 768a43a8c39SChen, Kenneth W if (f > rg->from) 769a43a8c39SChen, Kenneth W f = rg->from; 770a43a8c39SChen, Kenneth W chg = t - f; 771a43a8c39SChen, Kenneth W 772a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 773a43a8c39SChen, Kenneth W list_for_each_entry(rg, rg->link.prev, link) { 774a43a8c39SChen, Kenneth W if (&rg->link == head) 775a43a8c39SChen, Kenneth W break; 776a43a8c39SChen, Kenneth W if (rg->from > t) 777a43a8c39SChen, Kenneth W return chg; 778a43a8c39SChen, Kenneth W 779a43a8c39SChen, Kenneth W /* We overlap with this area, if it extends futher than 780a43a8c39SChen, Kenneth W * us then we must extend ourselves. Account for its 781a43a8c39SChen, Kenneth W * existing reservation. */ 782a43a8c39SChen, Kenneth W if (rg->to > t) { 783a43a8c39SChen, Kenneth W chg += rg->to - t; 784a43a8c39SChen, Kenneth W t = rg->to; 785a43a8c39SChen, Kenneth W } 786a43a8c39SChen, Kenneth W chg -= rg->to - rg->from; 787a43a8c39SChen, Kenneth W } 788a43a8c39SChen, Kenneth W return chg; 789a43a8c39SChen, Kenneth W } 790a43a8c39SChen, Kenneth W 791a43a8c39SChen, Kenneth W static long region_truncate(struct list_head *head, long end) 792a43a8c39SChen, Kenneth W { 793a43a8c39SChen, Kenneth W struct file_region *rg, *trg; 794a43a8c39SChen, Kenneth W long chg = 0; 795a43a8c39SChen, Kenneth W 796a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 797a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 798a43a8c39SChen, Kenneth W if (end <= rg->to) 799a43a8c39SChen, Kenneth W break; 800a43a8c39SChen, Kenneth W if (&rg->link == head) 801a43a8c39SChen, Kenneth W return 0; 802a43a8c39SChen, Kenneth W 803a43a8c39SChen, Kenneth W /* If we are in the middle of a region then adjust it. */ 804a43a8c39SChen, Kenneth W if (end > rg->from) { 805a43a8c39SChen, Kenneth W chg = rg->to - end; 806a43a8c39SChen, Kenneth W rg->to = end; 807a43a8c39SChen, Kenneth W rg = list_entry(rg->link.next, typeof(*rg), link); 808a43a8c39SChen, Kenneth W } 809a43a8c39SChen, Kenneth W 810a43a8c39SChen, Kenneth W /* Drop any remaining regions. */ 811a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 812a43a8c39SChen, Kenneth W if (&rg->link == head) 813a43a8c39SChen, Kenneth W break; 814a43a8c39SChen, Kenneth W chg += rg->to - rg->from; 815a43a8c39SChen, Kenneth W list_del(&rg->link); 816a43a8c39SChen, Kenneth W kfree(rg); 817a43a8c39SChen, Kenneth W } 818a43a8c39SChen, Kenneth W return chg; 819a43a8c39SChen, Kenneth W } 820a43a8c39SChen, Kenneth W 821a43a8c39SChen, Kenneth W static int hugetlb_acct_memory(long delta) 822a43a8c39SChen, Kenneth W { 823a43a8c39SChen, Kenneth W int ret = -ENOMEM; 824a43a8c39SChen, Kenneth W 825a43a8c39SChen, Kenneth W spin_lock(&hugetlb_lock); 826a43a8c39SChen, Kenneth W if ((delta + resv_huge_pages) <= free_huge_pages) { 827a43a8c39SChen, Kenneth W resv_huge_pages += delta; 828a43a8c39SChen, Kenneth W ret = 0; 829a43a8c39SChen, Kenneth W } 830a43a8c39SChen, Kenneth W spin_unlock(&hugetlb_lock); 831a43a8c39SChen, Kenneth W return ret; 832a43a8c39SChen, Kenneth W } 833a43a8c39SChen, Kenneth W 834a43a8c39SChen, Kenneth W int hugetlb_reserve_pages(struct inode *inode, long from, long to) 835a43a8c39SChen, Kenneth W { 836a43a8c39SChen, Kenneth W long ret, chg; 837a43a8c39SChen, Kenneth W 838a43a8c39SChen, Kenneth W chg = region_chg(&inode->i_mapping->private_list, from, to); 839a43a8c39SChen, Kenneth W if (chg < 0) 840a43a8c39SChen, Kenneth W return chg; 8418a630112SKen Chen /* 8428a630112SKen Chen * When cpuset is configured, it breaks the strict hugetlb page 8438a630112SKen Chen * reservation as the accounting is done on a global variable. Such 8448a630112SKen Chen * reservation is completely rubbish in the presence of cpuset because 8458a630112SKen Chen * the reservation is not checked against page availability for the 8468a630112SKen Chen * current cpuset. Application can still potentially OOM'ed by kernel 8478a630112SKen Chen * with lack of free htlb page in cpuset that the task is in. 8488a630112SKen Chen * Attempt to enforce strict accounting with cpuset is almost 8498a630112SKen Chen * impossible (or too ugly) because cpuset is too fluid that 8508a630112SKen Chen * task or memory node can be dynamically moved between cpusets. 8518a630112SKen Chen * 8528a630112SKen Chen * The change of semantics for shared hugetlb mapping with cpuset is 8538a630112SKen Chen * undesirable. However, in order to preserve some of the semantics, 8548a630112SKen Chen * we fall back to check against current free page availability as 8558a630112SKen Chen * a best attempt and hopefully to minimize the impact of changing 8568a630112SKen Chen * semantics that cpuset has. 8578a630112SKen Chen */ 8588a630112SKen Chen if (chg > cpuset_mems_nr(free_huge_pages_node)) 8598a630112SKen Chen return -ENOMEM; 8608a630112SKen Chen 861a43a8c39SChen, Kenneth W ret = hugetlb_acct_memory(chg); 862a43a8c39SChen, Kenneth W if (ret < 0) 863a43a8c39SChen, Kenneth W return ret; 864a43a8c39SChen, Kenneth W region_add(&inode->i_mapping->private_list, from, to); 865a43a8c39SChen, Kenneth W return 0; 866a43a8c39SChen, Kenneth W } 867a43a8c39SChen, Kenneth W 868a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 869a43a8c39SChen, Kenneth W { 870a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 871a43a8c39SChen, Kenneth W hugetlb_acct_memory(freed - chg); 872a43a8c39SChen, Kenneth W } 873