11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 121da177e4SLinus Torvalds #include <linux/nodemask.h> 1363551ae0SDavid Gibson #include <linux/pagemap.h> 145da7ca86SChristoph Lameter #include <linux/mempolicy.h> 15aea47ff3SChristoph Lameter #include <linux/cpuset.h> 163935baa9SDavid Gibson #include <linux/mutex.h> 175da7ca86SChristoph Lameter 1863551ae0SDavid Gibson #include <asm/page.h> 1963551ae0SDavid Gibson #include <asm/pgtable.h> 2063551ae0SDavid Gibson 2163551ae0SDavid Gibson #include <linux/hugetlb.h> 227835e98bSNick Piggin #include "internal.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25a43a8c39SChen, Kenneth W static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; 267893d1d5SAdam Litke static unsigned long surplus_huge_pages; 271da177e4SLinus Torvalds unsigned long max_huge_pages; 281da177e4SLinus Torvalds static struct list_head hugepage_freelists[MAX_NUMNODES]; 291da177e4SLinus Torvalds static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 301da177e4SLinus Torvalds static unsigned int free_huge_pages_node[MAX_NUMNODES]; 317893d1d5SAdam Litke static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 32396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 33396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 34396faf03SMel Gorman 353935baa9SDavid Gibson /* 363935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 373935baa9SDavid Gibson */ 383935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 390bd0f9fbSEric Paris 4079ac6ba4SDavid Gibson static void clear_huge_page(struct page *page, unsigned long addr) 4179ac6ba4SDavid Gibson { 4279ac6ba4SDavid Gibson int i; 4379ac6ba4SDavid Gibson 4479ac6ba4SDavid Gibson might_sleep(); 4579ac6ba4SDavid Gibson for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 4679ac6ba4SDavid Gibson cond_resched(); 47281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 4879ac6ba4SDavid Gibson } 4979ac6ba4SDavid Gibson } 5079ac6ba4SDavid Gibson 5179ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 529de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 5379ac6ba4SDavid Gibson { 5479ac6ba4SDavid Gibson int i; 5579ac6ba4SDavid Gibson 5679ac6ba4SDavid Gibson might_sleep(); 5779ac6ba4SDavid Gibson for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 5879ac6ba4SDavid Gibson cond_resched(); 599de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 6079ac6ba4SDavid Gibson } 6179ac6ba4SDavid Gibson } 6279ac6ba4SDavid Gibson 631da177e4SLinus Torvalds static void enqueue_huge_page(struct page *page) 641da177e4SLinus Torvalds { 651da177e4SLinus Torvalds int nid = page_to_nid(page); 661da177e4SLinus Torvalds list_add(&page->lru, &hugepage_freelists[nid]); 671da177e4SLinus Torvalds free_huge_pages++; 681da177e4SLinus Torvalds free_huge_pages_node[nid]++; 691da177e4SLinus Torvalds } 701da177e4SLinus Torvalds 715da7ca86SChristoph Lameter static struct page *dequeue_huge_page(struct vm_area_struct *vma, 725da7ca86SChristoph Lameter unsigned long address) 731da177e4SLinus Torvalds { 7431a5c6e4SNishanth Aravamudan int nid; 751da177e4SLinus Torvalds struct page *page = NULL; 76480eccf9SLee Schermerhorn struct mempolicy *mpol; 77396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 78480eccf9SLee Schermerhorn htlb_alloc_mask, &mpol); 7996df9333SChristoph Lameter struct zone **z; 801da177e4SLinus Torvalds 8196df9333SChristoph Lameter for (z = zonelist->zones; *z; z++) { 8289fa3024SChristoph Lameter nid = zone_to_nid(*z); 83396faf03SMel Gorman if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) && 843abf7afdSAndrew Morton !list_empty(&hugepage_freelists[nid])) { 851da177e4SLinus Torvalds page = list_entry(hugepage_freelists[nid].next, 861da177e4SLinus Torvalds struct page, lru); 871da177e4SLinus Torvalds list_del(&page->lru); 881da177e4SLinus Torvalds free_huge_pages--; 891da177e4SLinus Torvalds free_huge_pages_node[nid]--; 905ab3ee7bSKen Chen break; 911da177e4SLinus Torvalds } 923abf7afdSAndrew Morton } 93480eccf9SLee Schermerhorn mpol_free(mpol); /* unref if mpol !NULL */ 941da177e4SLinus Torvalds return page; 951da177e4SLinus Torvalds } 961da177e4SLinus Torvalds 976af2acb6SAdam Litke static void update_and_free_page(struct page *page) 986af2acb6SAdam Litke { 996af2acb6SAdam Litke int i; 1006af2acb6SAdam Litke nr_huge_pages--; 1016af2acb6SAdam Litke nr_huge_pages_node[page_to_nid(page)]--; 1026af2acb6SAdam Litke for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 1036af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1046af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 1056af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 1066af2acb6SAdam Litke } 1076af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 1086af2acb6SAdam Litke set_page_refcounted(page); 1096af2acb6SAdam Litke __free_pages(page, HUGETLB_PAGE_ORDER); 1106af2acb6SAdam Litke } 1116af2acb6SAdam Litke 11227a85ef1SDavid Gibson static void free_huge_page(struct page *page) 11327a85ef1SDavid Gibson { 1147893d1d5SAdam Litke int nid = page_to_nid(page); 11527a85ef1SDavid Gibson 1167893d1d5SAdam Litke BUG_ON(page_count(page)); 11727a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 11827a85ef1SDavid Gibson 11927a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 1207893d1d5SAdam Litke if (surplus_huge_pages_node[nid]) { 1217893d1d5SAdam Litke update_and_free_page(page); 1227893d1d5SAdam Litke surplus_huge_pages--; 1237893d1d5SAdam Litke surplus_huge_pages_node[nid]--; 1247893d1d5SAdam Litke } else { 12527a85ef1SDavid Gibson enqueue_huge_page(page); 1267893d1d5SAdam Litke } 12727a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 12827a85ef1SDavid Gibson } 12927a85ef1SDavid Gibson 1307893d1d5SAdam Litke /* 1317893d1d5SAdam Litke * Increment or decrement surplus_huge_pages. Keep node-specific counters 1327893d1d5SAdam Litke * balanced by operating on them in a round-robin fashion. 1337893d1d5SAdam Litke * Returns 1 if an adjustment was made. 1347893d1d5SAdam Litke */ 1357893d1d5SAdam Litke static int adjust_pool_surplus(int delta) 1367893d1d5SAdam Litke { 1377893d1d5SAdam Litke static int prev_nid; 1387893d1d5SAdam Litke int nid = prev_nid; 1397893d1d5SAdam Litke int ret = 0; 1407893d1d5SAdam Litke 1417893d1d5SAdam Litke VM_BUG_ON(delta != -1 && delta != 1); 1427893d1d5SAdam Litke do { 1437893d1d5SAdam Litke nid = next_node(nid, node_online_map); 1447893d1d5SAdam Litke if (nid == MAX_NUMNODES) 1457893d1d5SAdam Litke nid = first_node(node_online_map); 1467893d1d5SAdam Litke 1477893d1d5SAdam Litke /* To shrink on this node, there must be a surplus page */ 1487893d1d5SAdam Litke if (delta < 0 && !surplus_huge_pages_node[nid]) 1497893d1d5SAdam Litke continue; 1507893d1d5SAdam Litke /* Surplus cannot exceed the total number of pages */ 1517893d1d5SAdam Litke if (delta > 0 && surplus_huge_pages_node[nid] >= 1527893d1d5SAdam Litke nr_huge_pages_node[nid]) 1537893d1d5SAdam Litke continue; 1547893d1d5SAdam Litke 1557893d1d5SAdam Litke surplus_huge_pages += delta; 1567893d1d5SAdam Litke surplus_huge_pages_node[nid] += delta; 1577893d1d5SAdam Litke ret = 1; 1587893d1d5SAdam Litke break; 1597893d1d5SAdam Litke } while (nid != prev_nid); 1607893d1d5SAdam Litke 1617893d1d5SAdam Litke prev_nid = nid; 1627893d1d5SAdam Litke return ret; 1637893d1d5SAdam Litke } 1647893d1d5SAdam Litke 165a482289dSNick Piggin static int alloc_fresh_huge_page(void) 1661da177e4SLinus Torvalds { 167f96efd58SJoe Jin static int prev_nid; 1681da177e4SLinus Torvalds struct page *page; 169f96efd58SJoe Jin int nid; 170f96efd58SJoe Jin 1717ed5cb2bSHugh Dickins /* 1727ed5cb2bSHugh Dickins * Copy static prev_nid to local nid, work on that, then copy it 1737ed5cb2bSHugh Dickins * back to prev_nid afterwards: otherwise there's a window in which 1747ed5cb2bSHugh Dickins * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node. 1757ed5cb2bSHugh Dickins * But we don't need to use a spin_lock here: it really doesn't 1767ed5cb2bSHugh Dickins * matter if occasionally a racer chooses the same nid as we do. 1777ed5cb2bSHugh Dickins */ 178f96efd58SJoe Jin nid = next_node(prev_nid, node_online_map); 179fdb7cc59SPaul Jackson if (nid == MAX_NUMNODES) 180fdb7cc59SPaul Jackson nid = first_node(node_online_map); 181f96efd58SJoe Jin prev_nid = nid; 182f96efd58SJoe Jin 183396faf03SMel Gorman page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, 184f96efd58SJoe Jin HUGETLB_PAGE_ORDER); 1851da177e4SLinus Torvalds if (page) { 18633f2ef89SAndy Whitcroft set_compound_page_dtor(page, free_huge_page); 1870bd0f9fbSEric Paris spin_lock(&hugetlb_lock); 1881da177e4SLinus Torvalds nr_huge_pages++; 1891da177e4SLinus Torvalds nr_huge_pages_node[page_to_nid(page)]++; 1900bd0f9fbSEric Paris spin_unlock(&hugetlb_lock); 191a482289dSNick Piggin put_page(page); /* free it into the hugepage allocator */ 192a482289dSNick Piggin return 1; 1931da177e4SLinus Torvalds } 194a482289dSNick Piggin return 0; 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 1977893d1d5SAdam Litke static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, 1987893d1d5SAdam Litke unsigned long address) 1997893d1d5SAdam Litke { 2007893d1d5SAdam Litke struct page *page; 2017893d1d5SAdam Litke 2027893d1d5SAdam Litke page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, 2037893d1d5SAdam Litke HUGETLB_PAGE_ORDER); 2047893d1d5SAdam Litke if (page) { 2057893d1d5SAdam Litke set_compound_page_dtor(page, free_huge_page); 2067893d1d5SAdam Litke spin_lock(&hugetlb_lock); 2077893d1d5SAdam Litke nr_huge_pages++; 2087893d1d5SAdam Litke nr_huge_pages_node[page_to_nid(page)]++; 2097893d1d5SAdam Litke surplus_huge_pages++; 2107893d1d5SAdam Litke surplus_huge_pages_node[page_to_nid(page)]++; 2117893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 2127893d1d5SAdam Litke } 2137893d1d5SAdam Litke 2147893d1d5SAdam Litke return page; 2157893d1d5SAdam Litke } 2167893d1d5SAdam Litke 21727a85ef1SDavid Gibson static struct page *alloc_huge_page(struct vm_area_struct *vma, 21827a85ef1SDavid Gibson unsigned long addr) 2191da177e4SLinus Torvalds { 2207893d1d5SAdam Litke struct page *page = NULL; 2211da177e4SLinus Torvalds 2221da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 223a43a8c39SChen, Kenneth W if (vma->vm_flags & VM_MAYSHARE) 224a43a8c39SChen, Kenneth W resv_huge_pages--; 225a43a8c39SChen, Kenneth W else if (free_huge_pages <= resv_huge_pages) 226b45b5bd6SDavid Gibson goto fail; 227b45b5bd6SDavid Gibson 228b45b5bd6SDavid Gibson page = dequeue_huge_page(vma, addr); 229b45b5bd6SDavid Gibson if (!page) 230b45b5bd6SDavid Gibson goto fail; 231b45b5bd6SDavid Gibson 2321da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 2337835e98bSNick Piggin set_page_refcounted(page); 2341da177e4SLinus Torvalds return page; 235b45b5bd6SDavid Gibson 236b45b5bd6SDavid Gibson fail: 237ace4bd29SKen Chen if (vma->vm_flags & VM_MAYSHARE) 238ace4bd29SKen Chen resv_huge_pages++; 239b45b5bd6SDavid Gibson spin_unlock(&hugetlb_lock); 2407893d1d5SAdam Litke 2417893d1d5SAdam Litke /* 2427893d1d5SAdam Litke * Private mappings do not use reserved huge pages so the allocation 2437893d1d5SAdam Litke * may have failed due to an undersized hugetlb pool. Try to grab a 2447893d1d5SAdam Litke * surplus huge page from the buddy allocator. 2457893d1d5SAdam Litke */ 2467893d1d5SAdam Litke if (!(vma->vm_flags & VM_MAYSHARE)) 2477893d1d5SAdam Litke page = alloc_buddy_huge_page(vma, addr); 2487893d1d5SAdam Litke 2497893d1d5SAdam Litke return page; 250b45b5bd6SDavid Gibson } 251b45b5bd6SDavid Gibson 2521da177e4SLinus Torvalds static int __init hugetlb_init(void) 2531da177e4SLinus Torvalds { 2541da177e4SLinus Torvalds unsigned long i; 2551da177e4SLinus Torvalds 2563c726f8dSBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 2573c726f8dSBenjamin Herrenschmidt return 0; 2583c726f8dSBenjamin Herrenschmidt 2591da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) 2601da177e4SLinus Torvalds INIT_LIST_HEAD(&hugepage_freelists[i]); 2611da177e4SLinus Torvalds 2621da177e4SLinus Torvalds for (i = 0; i < max_huge_pages; ++i) { 263a482289dSNick Piggin if (!alloc_fresh_huge_page()) 2641da177e4SLinus Torvalds break; 2651da177e4SLinus Torvalds } 2661da177e4SLinus Torvalds max_huge_pages = free_huge_pages = nr_huge_pages = i; 2671da177e4SLinus Torvalds printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); 2681da177e4SLinus Torvalds return 0; 2691da177e4SLinus Torvalds } 2701da177e4SLinus Torvalds module_init(hugetlb_init); 2711da177e4SLinus Torvalds 2721da177e4SLinus Torvalds static int __init hugetlb_setup(char *s) 2731da177e4SLinus Torvalds { 2741da177e4SLinus Torvalds if (sscanf(s, "%lu", &max_huge_pages) <= 0) 2751da177e4SLinus Torvalds max_huge_pages = 0; 2761da177e4SLinus Torvalds return 1; 2771da177e4SLinus Torvalds } 2781da177e4SLinus Torvalds __setup("hugepages=", hugetlb_setup); 2791da177e4SLinus Torvalds 2808a630112SKen Chen static unsigned int cpuset_mems_nr(unsigned int *array) 2818a630112SKen Chen { 2828a630112SKen Chen int node; 2838a630112SKen Chen unsigned int nr = 0; 2848a630112SKen Chen 2858a630112SKen Chen for_each_node_mask(node, cpuset_current_mems_allowed) 2868a630112SKen Chen nr += array[node]; 2878a630112SKen Chen 2888a630112SKen Chen return nr; 2898a630112SKen Chen } 2908a630112SKen Chen 2911da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL 2921da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 2931da177e4SLinus Torvalds static void try_to_free_low(unsigned long count) 2941da177e4SLinus Torvalds { 2954415cc8dSChristoph Lameter int i; 2964415cc8dSChristoph Lameter 2971da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 2981da177e4SLinus Torvalds struct page *page, *next; 2991da177e4SLinus Torvalds list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 3001da177e4SLinus Torvalds if (PageHighMem(page)) 3011da177e4SLinus Torvalds continue; 3021da177e4SLinus Torvalds list_del(&page->lru); 3031da177e4SLinus Torvalds update_and_free_page(page); 3041da177e4SLinus Torvalds free_huge_pages--; 3054415cc8dSChristoph Lameter free_huge_pages_node[page_to_nid(page)]--; 3061da177e4SLinus Torvalds if (count >= nr_huge_pages) 3071da177e4SLinus Torvalds return; 3081da177e4SLinus Torvalds } 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds } 3111da177e4SLinus Torvalds #else 3121da177e4SLinus Torvalds static inline void try_to_free_low(unsigned long count) 3131da177e4SLinus Torvalds { 3141da177e4SLinus Torvalds } 3151da177e4SLinus Torvalds #endif 3161da177e4SLinus Torvalds 3177893d1d5SAdam Litke #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) 3181da177e4SLinus Torvalds static unsigned long set_max_huge_pages(unsigned long count) 3191da177e4SLinus Torvalds { 3207893d1d5SAdam Litke unsigned long min_count, ret; 3211da177e4SLinus Torvalds 3227893d1d5SAdam Litke /* 3237893d1d5SAdam Litke * Increase the pool size 3247893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 3257893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 3267893d1d5SAdam Litke */ 3271da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 3287893d1d5SAdam Litke while (surplus_huge_pages && count > persistent_huge_pages) { 3297893d1d5SAdam Litke if (!adjust_pool_surplus(-1)) 3307893d1d5SAdam Litke break; 3317893d1d5SAdam Litke } 3327893d1d5SAdam Litke 3337893d1d5SAdam Litke while (count > persistent_huge_pages) { 3347893d1d5SAdam Litke int ret; 3357893d1d5SAdam Litke /* 3367893d1d5SAdam Litke * If this allocation races such that we no longer need the 3377893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 3387893d1d5SAdam Litke * and reducing the surplus. 3397893d1d5SAdam Litke */ 3407893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 3417893d1d5SAdam Litke ret = alloc_fresh_huge_page(); 3427893d1d5SAdam Litke spin_lock(&hugetlb_lock); 3437893d1d5SAdam Litke if (!ret) 3447893d1d5SAdam Litke goto out; 3457893d1d5SAdam Litke 3467893d1d5SAdam Litke } 3477893d1d5SAdam Litke if (count >= persistent_huge_pages) 3487893d1d5SAdam Litke goto out; 3497893d1d5SAdam Litke 3507893d1d5SAdam Litke /* 3517893d1d5SAdam Litke * Decrease the pool size 3527893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 3537893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 3547893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 3557893d1d5SAdam Litke * to the desired size as pages become free. 3567893d1d5SAdam Litke */ 3577893d1d5SAdam Litke min_count = max(count, resv_huge_pages); 3587893d1d5SAdam Litke try_to_free_low(min_count); 3597893d1d5SAdam Litke while (min_count < persistent_huge_pages) { 3605da7ca86SChristoph Lameter struct page *page = dequeue_huge_page(NULL, 0); 3611da177e4SLinus Torvalds if (!page) 3621da177e4SLinus Torvalds break; 3631da177e4SLinus Torvalds update_and_free_page(page); 3641da177e4SLinus Torvalds } 3657893d1d5SAdam Litke while (count < persistent_huge_pages) { 3667893d1d5SAdam Litke if (!adjust_pool_surplus(1)) 3677893d1d5SAdam Litke break; 3687893d1d5SAdam Litke } 3697893d1d5SAdam Litke out: 3707893d1d5SAdam Litke ret = persistent_huge_pages; 3711da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 3727893d1d5SAdam Litke return ret; 3731da177e4SLinus Torvalds } 3741da177e4SLinus Torvalds 3751da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 3761da177e4SLinus Torvalds struct file *file, void __user *buffer, 3771da177e4SLinus Torvalds size_t *length, loff_t *ppos) 3781da177e4SLinus Torvalds { 3791da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 3801da177e4SLinus Torvalds max_huge_pages = set_max_huge_pages(max_huge_pages); 3811da177e4SLinus Torvalds return 0; 3821da177e4SLinus Torvalds } 383396faf03SMel Gorman 384396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 385396faf03SMel Gorman struct file *file, void __user *buffer, 386396faf03SMel Gorman size_t *length, loff_t *ppos) 387396faf03SMel Gorman { 388396faf03SMel Gorman proc_dointvec(table, write, file, buffer, length, ppos); 389396faf03SMel Gorman if (hugepages_treat_as_movable) 390396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 391396faf03SMel Gorman else 392396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 393396faf03SMel Gorman return 0; 394396faf03SMel Gorman } 395396faf03SMel Gorman 3961da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 3971da177e4SLinus Torvalds 3981da177e4SLinus Torvalds int hugetlb_report_meminfo(char *buf) 3991da177e4SLinus Torvalds { 4001da177e4SLinus Torvalds return sprintf(buf, 4011da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 4021da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 403b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 4047893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 4051da177e4SLinus Torvalds "Hugepagesize: %5lu kB\n", 4061da177e4SLinus Torvalds nr_huge_pages, 4071da177e4SLinus Torvalds free_huge_pages, 408a43a8c39SChen, Kenneth W resv_huge_pages, 4097893d1d5SAdam Litke surplus_huge_pages, 4101da177e4SLinus Torvalds HPAGE_SIZE/1024); 4111da177e4SLinus Torvalds } 4121da177e4SLinus Torvalds 4131da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 4141da177e4SLinus Torvalds { 4151da177e4SLinus Torvalds return sprintf(buf, 4161da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 4171da177e4SLinus Torvalds "Node %d HugePages_Free: %5u\n", 4181da177e4SLinus Torvalds nid, nr_huge_pages_node[nid], 4191da177e4SLinus Torvalds nid, free_huge_pages_node[nid]); 4201da177e4SLinus Torvalds } 4211da177e4SLinus Torvalds 4221da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 4231da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 4241da177e4SLinus Torvalds { 4251da177e4SLinus Torvalds return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 4261da177e4SLinus Torvalds } 4271da177e4SLinus Torvalds 4281da177e4SLinus Torvalds /* 4291da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 4301da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 4311da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 4321da177e4SLinus Torvalds * this far. 4331da177e4SLinus Torvalds */ 434d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 4351da177e4SLinus Torvalds { 4361da177e4SLinus Torvalds BUG(); 437d0217ac0SNick Piggin return 0; 4381da177e4SLinus Torvalds } 4391da177e4SLinus Torvalds 4401da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 441d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 4421da177e4SLinus Torvalds }; 4431da177e4SLinus Torvalds 4441e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 4451e8f889bSDavid Gibson int writable) 44663551ae0SDavid Gibson { 44763551ae0SDavid Gibson pte_t entry; 44863551ae0SDavid Gibson 4491e8f889bSDavid Gibson if (writable) { 45063551ae0SDavid Gibson entry = 45163551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 45263551ae0SDavid Gibson } else { 45363551ae0SDavid Gibson entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 45463551ae0SDavid Gibson } 45563551ae0SDavid Gibson entry = pte_mkyoung(entry); 45663551ae0SDavid Gibson entry = pte_mkhuge(entry); 45763551ae0SDavid Gibson 45863551ae0SDavid Gibson return entry; 45963551ae0SDavid Gibson } 46063551ae0SDavid Gibson 4611e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 4621e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 4631e8f889bSDavid Gibson { 4641e8f889bSDavid Gibson pte_t entry; 4651e8f889bSDavid Gibson 4661e8f889bSDavid Gibson entry = pte_mkwrite(pte_mkdirty(*ptep)); 4678dab5241SBenjamin Herrenschmidt if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { 4681e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 4691e8f889bSDavid Gibson } 4708dab5241SBenjamin Herrenschmidt } 4711e8f889bSDavid Gibson 4721e8f889bSDavid Gibson 47363551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 47463551ae0SDavid Gibson struct vm_area_struct *vma) 47563551ae0SDavid Gibson { 47663551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 47763551ae0SDavid Gibson struct page *ptepage; 4781c59827dSHugh Dickins unsigned long addr; 4791e8f889bSDavid Gibson int cow; 4801e8f889bSDavid Gibson 4811e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 48263551ae0SDavid Gibson 4831c59827dSHugh Dickins for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 484c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 485c74df32cSHugh Dickins if (!src_pte) 486c74df32cSHugh Dickins continue; 48763551ae0SDavid Gibson dst_pte = huge_pte_alloc(dst, addr); 48863551ae0SDavid Gibson if (!dst_pte) 48963551ae0SDavid Gibson goto nomem; 490c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 4911c59827dSHugh Dickins spin_lock(&src->page_table_lock); 492c74df32cSHugh Dickins if (!pte_none(*src_pte)) { 4931e8f889bSDavid Gibson if (cow) 4941e8f889bSDavid Gibson ptep_set_wrprotect(src, addr, src_pte); 49563551ae0SDavid Gibson entry = *src_pte; 49663551ae0SDavid Gibson ptepage = pte_page(entry); 49763551ae0SDavid Gibson get_page(ptepage); 49863551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 4991c59827dSHugh Dickins } 5001c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 501c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 50263551ae0SDavid Gibson } 50363551ae0SDavid Gibson return 0; 50463551ae0SDavid Gibson 50563551ae0SDavid Gibson nomem: 50663551ae0SDavid Gibson return -ENOMEM; 50763551ae0SDavid Gibson } 50863551ae0SDavid Gibson 509502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 51063551ae0SDavid Gibson unsigned long end) 51163551ae0SDavid Gibson { 51263551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 51363551ae0SDavid Gibson unsigned long address; 514c7546f8fSDavid Gibson pte_t *ptep; 51563551ae0SDavid Gibson pte_t pte; 51663551ae0SDavid Gibson struct page *page; 517fe1668aeSChen, Kenneth W struct page *tmp; 518c0a499c2SChen, Kenneth W /* 519c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 520c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 521c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 522c0a499c2SChen, Kenneth W */ 523fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 52463551ae0SDavid Gibson 52563551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 52663551ae0SDavid Gibson BUG_ON(start & ~HPAGE_MASK); 52763551ae0SDavid Gibson BUG_ON(end & ~HPAGE_MASK); 52863551ae0SDavid Gibson 529508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 53063551ae0SDavid Gibson for (address = start; address < end; address += HPAGE_SIZE) { 531c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 532c7546f8fSDavid Gibson if (!ptep) 533c7546f8fSDavid Gibson continue; 534c7546f8fSDavid Gibson 53539dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 53639dde65cSChen, Kenneth W continue; 53739dde65cSChen, Kenneth W 538c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 53963551ae0SDavid Gibson if (pte_none(pte)) 54063551ae0SDavid Gibson continue; 541c7546f8fSDavid Gibson 54263551ae0SDavid Gibson page = pte_page(pte); 5436649a386SKen Chen if (pte_dirty(pte)) 5446649a386SKen Chen set_page_dirty(page); 545fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 54663551ae0SDavid Gibson } 5471da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 548508034a3SHugh Dickins flush_tlb_range(vma, start, end); 549fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 550fe1668aeSChen, Kenneth W list_del(&page->lru); 551fe1668aeSChen, Kenneth W put_page(page); 552fe1668aeSChen, Kenneth W } 5531da177e4SLinus Torvalds } 55463551ae0SDavid Gibson 555502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 556502717f4SChen, Kenneth W unsigned long end) 557502717f4SChen, Kenneth W { 558502717f4SChen, Kenneth W /* 559502717f4SChen, Kenneth W * It is undesirable to test vma->vm_file as it should be non-null 560502717f4SChen, Kenneth W * for valid hugetlb area. However, vm_file will be NULL in the error 561502717f4SChen, Kenneth W * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, 562502717f4SChen, Kenneth W * do_mmap_pgoff() nullifies vma->vm_file before calling this function 563502717f4SChen, Kenneth W * to clean up. Since no pte has actually been setup, it is safe to 564502717f4SChen, Kenneth W * do nothing in this case. 565502717f4SChen, Kenneth W */ 566502717f4SChen, Kenneth W if (vma->vm_file) { 567502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 568502717f4SChen, Kenneth W __unmap_hugepage_range(vma, start, end); 569502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 570502717f4SChen, Kenneth W } 571502717f4SChen, Kenneth W } 572502717f4SChen, Kenneth W 5731e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 5741e8f889bSDavid Gibson unsigned long address, pte_t *ptep, pte_t pte) 5751e8f889bSDavid Gibson { 5761e8f889bSDavid Gibson struct page *old_page, *new_page; 57779ac6ba4SDavid Gibson int avoidcopy; 5781e8f889bSDavid Gibson 5791e8f889bSDavid Gibson old_page = pte_page(pte); 5801e8f889bSDavid Gibson 5811e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 5821e8f889bSDavid Gibson * and just make the page writable */ 5831e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 5841e8f889bSDavid Gibson if (avoidcopy) { 5851e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 58683c54070SNick Piggin return 0; 5871e8f889bSDavid Gibson } 5881e8f889bSDavid Gibson 5891e8f889bSDavid Gibson page_cache_get(old_page); 5905da7ca86SChristoph Lameter new_page = alloc_huge_page(vma, address); 5911e8f889bSDavid Gibson 5921e8f889bSDavid Gibson if (!new_page) { 5931e8f889bSDavid Gibson page_cache_release(old_page); 5940df420d8SChristoph Lameter return VM_FAULT_OOM; 5951e8f889bSDavid Gibson } 5961e8f889bSDavid Gibson 5971e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 5989de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 5991e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 6001e8f889bSDavid Gibson 6011e8f889bSDavid Gibson ptep = huge_pte_offset(mm, address & HPAGE_MASK); 6021e8f889bSDavid Gibson if (likely(pte_same(*ptep, pte))) { 6031e8f889bSDavid Gibson /* Break COW */ 6041e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 6051e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 6061e8f889bSDavid Gibson /* Make the old page be freed below */ 6071e8f889bSDavid Gibson new_page = old_page; 6081e8f889bSDavid Gibson } 6091e8f889bSDavid Gibson page_cache_release(new_page); 6101e8f889bSDavid Gibson page_cache_release(old_page); 61183c54070SNick Piggin return 0; 6121e8f889bSDavid Gibson } 6131e8f889bSDavid Gibson 614a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 6151e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 616ac9b9c66SHugh Dickins { 617ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 6184c887265SAdam Litke unsigned long idx; 6194c887265SAdam Litke unsigned long size; 6204c887265SAdam Litke struct page *page; 6214c887265SAdam Litke struct address_space *mapping; 6221e8f889bSDavid Gibson pte_t new_pte; 6234c887265SAdam Litke 6244c887265SAdam Litke mapping = vma->vm_file->f_mapping; 6254c887265SAdam Litke idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 6264c887265SAdam Litke + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 6274c887265SAdam Litke 6284c887265SAdam Litke /* 6294c887265SAdam Litke * Use page lock to guard against racing truncation 6304c887265SAdam Litke * before we get page_table_lock. 6314c887265SAdam Litke */ 6326bda666aSChristoph Lameter retry: 6336bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 6346bda666aSChristoph Lameter if (!page) { 635ebed4bfcSHugh Dickins size = i_size_read(mapping->host) >> HPAGE_SHIFT; 636ebed4bfcSHugh Dickins if (idx >= size) 637ebed4bfcSHugh Dickins goto out; 6386bda666aSChristoph Lameter if (hugetlb_get_quota(mapping)) 6394c887265SAdam Litke goto out; 6406bda666aSChristoph Lameter page = alloc_huge_page(vma, address); 6416bda666aSChristoph Lameter if (!page) { 6426bda666aSChristoph Lameter hugetlb_put_quota(mapping); 6430df420d8SChristoph Lameter ret = VM_FAULT_OOM; 6446bda666aSChristoph Lameter goto out; 6456bda666aSChristoph Lameter } 64679ac6ba4SDavid Gibson clear_huge_page(page, address); 647ac9b9c66SHugh Dickins 6486bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 6496bda666aSChristoph Lameter int err; 6506bda666aSChristoph Lameter 6516bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 6526bda666aSChristoph Lameter if (err) { 6536bda666aSChristoph Lameter put_page(page); 6546bda666aSChristoph Lameter hugetlb_put_quota(mapping); 6556bda666aSChristoph Lameter if (err == -EEXIST) 6566bda666aSChristoph Lameter goto retry; 6576bda666aSChristoph Lameter goto out; 6586bda666aSChristoph Lameter } 6596bda666aSChristoph Lameter } else 6606bda666aSChristoph Lameter lock_page(page); 6616bda666aSChristoph Lameter } 6621e8f889bSDavid Gibson 663ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 6644c887265SAdam Litke size = i_size_read(mapping->host) >> HPAGE_SHIFT; 6654c887265SAdam Litke if (idx >= size) 6664c887265SAdam Litke goto backout; 6674c887265SAdam Litke 66883c54070SNick Piggin ret = 0; 66986e5216fSAdam Litke if (!pte_none(*ptep)) 6704c887265SAdam Litke goto backout; 6714c887265SAdam Litke 6721e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 6731e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 6741e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 6751e8f889bSDavid Gibson 6761e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 6771e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 6781e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, new_pte); 6791e8f889bSDavid Gibson } 6801e8f889bSDavid Gibson 681ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 6824c887265SAdam Litke unlock_page(page); 6834c887265SAdam Litke out: 684ac9b9c66SHugh Dickins return ret; 6854c887265SAdam Litke 6864c887265SAdam Litke backout: 6874c887265SAdam Litke spin_unlock(&mm->page_table_lock); 6884c887265SAdam Litke hugetlb_put_quota(mapping); 6894c887265SAdam Litke unlock_page(page); 6904c887265SAdam Litke put_page(page); 6914c887265SAdam Litke goto out; 692ac9b9c66SHugh Dickins } 693ac9b9c66SHugh Dickins 69486e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 69586e5216fSAdam Litke unsigned long address, int write_access) 69686e5216fSAdam Litke { 69786e5216fSAdam Litke pte_t *ptep; 69886e5216fSAdam Litke pte_t entry; 6991e8f889bSDavid Gibson int ret; 7003935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 70186e5216fSAdam Litke 70286e5216fSAdam Litke ptep = huge_pte_alloc(mm, address); 70386e5216fSAdam Litke if (!ptep) 70486e5216fSAdam Litke return VM_FAULT_OOM; 70586e5216fSAdam Litke 7063935baa9SDavid Gibson /* 7073935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 7083935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 7093935baa9SDavid Gibson * the same page in the page cache. 7103935baa9SDavid Gibson */ 7113935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 71286e5216fSAdam Litke entry = *ptep; 7133935baa9SDavid Gibson if (pte_none(entry)) { 7143935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 7153935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 7163935baa9SDavid Gibson return ret; 7173935baa9SDavid Gibson } 71886e5216fSAdam Litke 71983c54070SNick Piggin ret = 0; 7201e8f889bSDavid Gibson 7211e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 7221e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 7231e8f889bSDavid Gibson if (likely(pte_same(entry, *ptep))) 7241e8f889bSDavid Gibson if (write_access && !pte_write(entry)) 7251e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, entry); 7261e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 7273935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 7281e8f889bSDavid Gibson 7291e8f889bSDavid Gibson return ret; 73086e5216fSAdam Litke } 73186e5216fSAdam Litke 73263551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 73363551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 73463551ae0SDavid Gibson unsigned long *position, int *length, int i) 73563551ae0SDavid Gibson { 736d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 737d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 73863551ae0SDavid Gibson int remainder = *length; 73963551ae0SDavid Gibson 7401c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 74163551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 74263551ae0SDavid Gibson pte_t *pte; 74363551ae0SDavid Gibson struct page *page; 74463551ae0SDavid Gibson 7454c887265SAdam Litke /* 7464c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 7474c887265SAdam Litke * each hugepage. We have to make * sure we get the 7484c887265SAdam Litke * first, for the page indexing below to work. 7494c887265SAdam Litke */ 75063551ae0SDavid Gibson pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 75163551ae0SDavid Gibson 7521c59827dSHugh Dickins if (!pte || pte_none(*pte)) { 7534c887265SAdam Litke int ret; 7544c887265SAdam Litke 7554c887265SAdam Litke spin_unlock(&mm->page_table_lock); 7564c887265SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, 0); 7574c887265SAdam Litke spin_lock(&mm->page_table_lock); 758a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 7594c887265SAdam Litke continue; 7604c887265SAdam Litke 7611c59827dSHugh Dickins remainder = 0; 7621c59827dSHugh Dickins if (!i) 7631c59827dSHugh Dickins i = -EFAULT; 7641c59827dSHugh Dickins break; 7651c59827dSHugh Dickins } 76663551ae0SDavid Gibson 767d5d4b0aaSChen, Kenneth W pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 768d5d4b0aaSChen, Kenneth W page = pte_page(*pte); 769d5d4b0aaSChen, Kenneth W same_page: 770d6692183SChen, Kenneth W if (pages) { 77163551ae0SDavid Gibson get_page(page); 772d5d4b0aaSChen, Kenneth W pages[i] = page + pfn_offset; 773d6692183SChen, Kenneth W } 77463551ae0SDavid Gibson 77563551ae0SDavid Gibson if (vmas) 77663551ae0SDavid Gibson vmas[i] = vma; 77763551ae0SDavid Gibson 77863551ae0SDavid Gibson vaddr += PAGE_SIZE; 779d5d4b0aaSChen, Kenneth W ++pfn_offset; 78063551ae0SDavid Gibson --remainder; 78163551ae0SDavid Gibson ++i; 782d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 783d5d4b0aaSChen, Kenneth W pfn_offset < HPAGE_SIZE/PAGE_SIZE) { 784d5d4b0aaSChen, Kenneth W /* 785d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 786d5d4b0aaSChen, Kenneth W * of this compound page. 787d5d4b0aaSChen, Kenneth W */ 788d5d4b0aaSChen, Kenneth W goto same_page; 789d5d4b0aaSChen, Kenneth W } 79063551ae0SDavid Gibson } 7911c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 79263551ae0SDavid Gibson *length = remainder; 79363551ae0SDavid Gibson *position = vaddr; 79463551ae0SDavid Gibson 79563551ae0SDavid Gibson return i; 79663551ae0SDavid Gibson } 7978f860591SZhang, Yanmin 7988f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 7998f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 8008f860591SZhang, Yanmin { 8018f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 8028f860591SZhang, Yanmin unsigned long start = address; 8038f860591SZhang, Yanmin pte_t *ptep; 8048f860591SZhang, Yanmin pte_t pte; 8058f860591SZhang, Yanmin 8068f860591SZhang, Yanmin BUG_ON(address >= end); 8078f860591SZhang, Yanmin flush_cache_range(vma, address, end); 8088f860591SZhang, Yanmin 80939dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 8108f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 8118f860591SZhang, Yanmin for (; address < end; address += HPAGE_SIZE) { 8128f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 8138f860591SZhang, Yanmin if (!ptep) 8148f860591SZhang, Yanmin continue; 81539dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 81639dde65cSChen, Kenneth W continue; 8178f860591SZhang, Yanmin if (!pte_none(*ptep)) { 8188f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 8198f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 8208f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 8218f860591SZhang, Yanmin } 8228f860591SZhang, Yanmin } 8238f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 82439dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 8258f860591SZhang, Yanmin 8268f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 8278f860591SZhang, Yanmin } 8288f860591SZhang, Yanmin 829a43a8c39SChen, Kenneth W struct file_region { 830a43a8c39SChen, Kenneth W struct list_head link; 831a43a8c39SChen, Kenneth W long from; 832a43a8c39SChen, Kenneth W long to; 833a43a8c39SChen, Kenneth W }; 834a43a8c39SChen, Kenneth W 835a43a8c39SChen, Kenneth W static long region_add(struct list_head *head, long f, long t) 836a43a8c39SChen, Kenneth W { 837a43a8c39SChen, Kenneth W struct file_region *rg, *nrg, *trg; 838a43a8c39SChen, Kenneth W 839a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 840a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 841a43a8c39SChen, Kenneth W if (f <= rg->to) 842a43a8c39SChen, Kenneth W break; 843a43a8c39SChen, Kenneth W 844a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 845a43a8c39SChen, Kenneth W if (f > rg->from) 846a43a8c39SChen, Kenneth W f = rg->from; 847a43a8c39SChen, Kenneth W 848a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 849a43a8c39SChen, Kenneth W nrg = rg; 850a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 851a43a8c39SChen, Kenneth W if (&rg->link == head) 852a43a8c39SChen, Kenneth W break; 853a43a8c39SChen, Kenneth W if (rg->from > t) 854a43a8c39SChen, Kenneth W break; 855a43a8c39SChen, Kenneth W 856a43a8c39SChen, Kenneth W /* If this area reaches higher then extend our area to 857a43a8c39SChen, Kenneth W * include it completely. If this is not the first area 858a43a8c39SChen, Kenneth W * which we intend to reuse, free it. */ 859a43a8c39SChen, Kenneth W if (rg->to > t) 860a43a8c39SChen, Kenneth W t = rg->to; 861a43a8c39SChen, Kenneth W if (rg != nrg) { 862a43a8c39SChen, Kenneth W list_del(&rg->link); 863a43a8c39SChen, Kenneth W kfree(rg); 864a43a8c39SChen, Kenneth W } 865a43a8c39SChen, Kenneth W } 866a43a8c39SChen, Kenneth W nrg->from = f; 867a43a8c39SChen, Kenneth W nrg->to = t; 868a43a8c39SChen, Kenneth W return 0; 869a43a8c39SChen, Kenneth W } 870a43a8c39SChen, Kenneth W 871a43a8c39SChen, Kenneth W static long region_chg(struct list_head *head, long f, long t) 872a43a8c39SChen, Kenneth W { 873a43a8c39SChen, Kenneth W struct file_region *rg, *nrg; 874a43a8c39SChen, Kenneth W long chg = 0; 875a43a8c39SChen, Kenneth W 876a43a8c39SChen, Kenneth W /* Locate the region we are before or in. */ 877a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 878a43a8c39SChen, Kenneth W if (f <= rg->to) 879a43a8c39SChen, Kenneth W break; 880a43a8c39SChen, Kenneth W 881a43a8c39SChen, Kenneth W /* If we are below the current region then a new region is required. 882a43a8c39SChen, Kenneth W * Subtle, allocate a new region at the position but make it zero 883a43a8c39SChen, Kenneth W * size such that we can guarentee to record the reservation. */ 884a43a8c39SChen, Kenneth W if (&rg->link == head || t < rg->from) { 885a43a8c39SChen, Kenneth W nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 886a43a8c39SChen, Kenneth W if (nrg == 0) 887a43a8c39SChen, Kenneth W return -ENOMEM; 888a43a8c39SChen, Kenneth W nrg->from = f; 889a43a8c39SChen, Kenneth W nrg->to = f; 890a43a8c39SChen, Kenneth W INIT_LIST_HEAD(&nrg->link); 891a43a8c39SChen, Kenneth W list_add(&nrg->link, rg->link.prev); 892a43a8c39SChen, Kenneth W 893a43a8c39SChen, Kenneth W return t - f; 894a43a8c39SChen, Kenneth W } 895a43a8c39SChen, Kenneth W 896a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 897a43a8c39SChen, Kenneth W if (f > rg->from) 898a43a8c39SChen, Kenneth W f = rg->from; 899a43a8c39SChen, Kenneth W chg = t - f; 900a43a8c39SChen, Kenneth W 901a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 902a43a8c39SChen, Kenneth W list_for_each_entry(rg, rg->link.prev, link) { 903a43a8c39SChen, Kenneth W if (&rg->link == head) 904a43a8c39SChen, Kenneth W break; 905a43a8c39SChen, Kenneth W if (rg->from > t) 906a43a8c39SChen, Kenneth W return chg; 907a43a8c39SChen, Kenneth W 908a43a8c39SChen, Kenneth W /* We overlap with this area, if it extends futher than 909a43a8c39SChen, Kenneth W * us then we must extend ourselves. Account for its 910a43a8c39SChen, Kenneth W * existing reservation. */ 911a43a8c39SChen, Kenneth W if (rg->to > t) { 912a43a8c39SChen, Kenneth W chg += rg->to - t; 913a43a8c39SChen, Kenneth W t = rg->to; 914a43a8c39SChen, Kenneth W } 915a43a8c39SChen, Kenneth W chg -= rg->to - rg->from; 916a43a8c39SChen, Kenneth W } 917a43a8c39SChen, Kenneth W return chg; 918a43a8c39SChen, Kenneth W } 919a43a8c39SChen, Kenneth W 920a43a8c39SChen, Kenneth W static long region_truncate(struct list_head *head, long end) 921a43a8c39SChen, Kenneth W { 922a43a8c39SChen, Kenneth W struct file_region *rg, *trg; 923a43a8c39SChen, Kenneth W long chg = 0; 924a43a8c39SChen, Kenneth W 925a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 926a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 927a43a8c39SChen, Kenneth W if (end <= rg->to) 928a43a8c39SChen, Kenneth W break; 929a43a8c39SChen, Kenneth W if (&rg->link == head) 930a43a8c39SChen, Kenneth W return 0; 931a43a8c39SChen, Kenneth W 932a43a8c39SChen, Kenneth W /* If we are in the middle of a region then adjust it. */ 933a43a8c39SChen, Kenneth W if (end > rg->from) { 934a43a8c39SChen, Kenneth W chg = rg->to - end; 935a43a8c39SChen, Kenneth W rg->to = end; 936a43a8c39SChen, Kenneth W rg = list_entry(rg->link.next, typeof(*rg), link); 937a43a8c39SChen, Kenneth W } 938a43a8c39SChen, Kenneth W 939a43a8c39SChen, Kenneth W /* Drop any remaining regions. */ 940a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 941a43a8c39SChen, Kenneth W if (&rg->link == head) 942a43a8c39SChen, Kenneth W break; 943a43a8c39SChen, Kenneth W chg += rg->to - rg->from; 944a43a8c39SChen, Kenneth W list_del(&rg->link); 945a43a8c39SChen, Kenneth W kfree(rg); 946a43a8c39SChen, Kenneth W } 947a43a8c39SChen, Kenneth W return chg; 948a43a8c39SChen, Kenneth W } 949a43a8c39SChen, Kenneth W 950a43a8c39SChen, Kenneth W static int hugetlb_acct_memory(long delta) 951a43a8c39SChen, Kenneth W { 952a43a8c39SChen, Kenneth W int ret = -ENOMEM; 953a43a8c39SChen, Kenneth W 954a43a8c39SChen, Kenneth W spin_lock(&hugetlb_lock); 955a43a8c39SChen, Kenneth W if ((delta + resv_huge_pages) <= free_huge_pages) { 956a43a8c39SChen, Kenneth W resv_huge_pages += delta; 957a43a8c39SChen, Kenneth W ret = 0; 958a43a8c39SChen, Kenneth W } 959a43a8c39SChen, Kenneth W spin_unlock(&hugetlb_lock); 960a43a8c39SChen, Kenneth W return ret; 961a43a8c39SChen, Kenneth W } 962a43a8c39SChen, Kenneth W 963a43a8c39SChen, Kenneth W int hugetlb_reserve_pages(struct inode *inode, long from, long to) 964a43a8c39SChen, Kenneth W { 965a43a8c39SChen, Kenneth W long ret, chg; 966a43a8c39SChen, Kenneth W 967a43a8c39SChen, Kenneth W chg = region_chg(&inode->i_mapping->private_list, from, to); 968a43a8c39SChen, Kenneth W if (chg < 0) 969a43a8c39SChen, Kenneth W return chg; 9708a630112SKen Chen /* 9718a630112SKen Chen * When cpuset is configured, it breaks the strict hugetlb page 9728a630112SKen Chen * reservation as the accounting is done on a global variable. Such 9738a630112SKen Chen * reservation is completely rubbish in the presence of cpuset because 9748a630112SKen Chen * the reservation is not checked against page availability for the 9758a630112SKen Chen * current cpuset. Application can still potentially OOM'ed by kernel 9768a630112SKen Chen * with lack of free htlb page in cpuset that the task is in. 9778a630112SKen Chen * Attempt to enforce strict accounting with cpuset is almost 9788a630112SKen Chen * impossible (or too ugly) because cpuset is too fluid that 9798a630112SKen Chen * task or memory node can be dynamically moved between cpusets. 9808a630112SKen Chen * 9818a630112SKen Chen * The change of semantics for shared hugetlb mapping with cpuset is 9828a630112SKen Chen * undesirable. However, in order to preserve some of the semantics, 9838a630112SKen Chen * we fall back to check against current free page availability as 9848a630112SKen Chen * a best attempt and hopefully to minimize the impact of changing 9858a630112SKen Chen * semantics that cpuset has. 9868a630112SKen Chen */ 9878a630112SKen Chen if (chg > cpuset_mems_nr(free_huge_pages_node)) 9888a630112SKen Chen return -ENOMEM; 9898a630112SKen Chen 990a43a8c39SChen, Kenneth W ret = hugetlb_acct_memory(chg); 991a43a8c39SChen, Kenneth W if (ret < 0) 992a43a8c39SChen, Kenneth W return ret; 993a43a8c39SChen, Kenneth W region_add(&inode->i_mapping->private_list, from, to); 994a43a8c39SChen, Kenneth W return 0; 995a43a8c39SChen, Kenneth W } 996a43a8c39SChen, Kenneth W 997a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 998a43a8c39SChen, Kenneth W { 999a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 1000a43a8c39SChen, Kenneth W hugetlb_acct_memory(freed - chg); 1001a43a8c39SChen, Kenneth W } 1002