11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 121da177e4SLinus Torvalds #include <linux/nodemask.h> 1363551ae0SDavid Gibson #include <linux/pagemap.h> 145da7ca86SChristoph Lameter #include <linux/mempolicy.h> 15aea47ff3SChristoph Lameter #include <linux/cpuset.h> 163935baa9SDavid Gibson #include <linux/mutex.h> 175da7ca86SChristoph Lameter 1863551ae0SDavid Gibson #include <asm/page.h> 1963551ae0SDavid Gibson #include <asm/pgtable.h> 2063551ae0SDavid Gibson 2163551ae0SDavid Gibson #include <linux/hugetlb.h> 227835e98bSNick Piggin #include "internal.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25a43a8c39SChen, Kenneth W static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; 267893d1d5SAdam Litke static unsigned long surplus_huge_pages; 27064d9efeSNishanth Aravamudan static unsigned long nr_overcommit_huge_pages; 281da177e4SLinus Torvalds unsigned long max_huge_pages; 29064d9efeSNishanth Aravamudan unsigned long sysctl_overcommit_huge_pages; 301da177e4SLinus Torvalds static struct list_head hugepage_freelists[MAX_NUMNODES]; 311da177e4SLinus Torvalds static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 321da177e4SLinus Torvalds static unsigned int free_huge_pages_node[MAX_NUMNODES]; 337893d1d5SAdam Litke static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 34396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 35396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 3663b4613cSNishanth Aravamudan static int hugetlb_next_nid; 37396faf03SMel Gorman 383935baa9SDavid Gibson /* 393935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 403935baa9SDavid Gibson */ 413935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 420bd0f9fbSEric Paris 43a1e78772SMel Gorman /* 44a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 45a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 46a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 47a1e78772SMel Gorman * 48a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 49a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 50a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 51a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 52a1e78772SMel Gorman */ 53a1e78772SMel Gorman static unsigned long vma_resv_huge_pages(struct vm_area_struct *vma) 54a1e78772SMel Gorman { 55a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 56a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 57a1e78772SMel Gorman return (unsigned long)vma->vm_private_data; 58a1e78772SMel Gorman return 0; 59a1e78772SMel Gorman } 60a1e78772SMel Gorman 61a1e78772SMel Gorman static void set_vma_resv_huge_pages(struct vm_area_struct *vma, 62a1e78772SMel Gorman unsigned long reserve) 63a1e78772SMel Gorman { 64a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 65a1e78772SMel Gorman VM_BUG_ON(vma->vm_flags & VM_SHARED); 66a1e78772SMel Gorman 67a1e78772SMel Gorman vma->vm_private_data = (void *)reserve; 68a1e78772SMel Gorman } 69a1e78772SMel Gorman 70a1e78772SMel Gorman /* Decrement the reserved pages in the hugepage pool by one */ 71a1e78772SMel Gorman static void decrement_hugepage_resv_vma(struct vm_area_struct *vma) 72a1e78772SMel Gorman { 73a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) { 74a1e78772SMel Gorman /* Shared mappings always use reserves */ 75a1e78772SMel Gorman resv_huge_pages--; 76a1e78772SMel Gorman } else { 77a1e78772SMel Gorman /* 78a1e78772SMel Gorman * Only the process that called mmap() has reserves for 79a1e78772SMel Gorman * private mappings. 80a1e78772SMel Gorman */ 81a1e78772SMel Gorman if (vma_resv_huge_pages(vma)) { 82a1e78772SMel Gorman resv_huge_pages--; 83a1e78772SMel Gorman reserve = (unsigned long)vma->vm_private_data - 1; 84a1e78772SMel Gorman vma->vm_private_data = (void *)reserve; 85a1e78772SMel Gorman } 86a1e78772SMel Gorman } 87a1e78772SMel Gorman } 88a1e78772SMel Gorman 89a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 90a1e78772SMel Gorman { 91a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 92a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 93a1e78772SMel Gorman vma->vm_private_data = (void *)0; 94a1e78772SMel Gorman } 95a1e78772SMel Gorman 96a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 97a1e78772SMel Gorman static int vma_has_private_reserves(struct vm_area_struct *vma) 98a1e78772SMel Gorman { 99a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) 100a1e78772SMel Gorman return 0; 101a1e78772SMel Gorman if (!vma_resv_huge_pages(vma)) 102a1e78772SMel Gorman return 0; 103a1e78772SMel Gorman return 1; 104a1e78772SMel Gorman } 105a1e78772SMel Gorman 10679ac6ba4SDavid Gibson static void clear_huge_page(struct page *page, unsigned long addr) 10779ac6ba4SDavid Gibson { 10879ac6ba4SDavid Gibson int i; 10979ac6ba4SDavid Gibson 11079ac6ba4SDavid Gibson might_sleep(); 11179ac6ba4SDavid Gibson for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 11279ac6ba4SDavid Gibson cond_resched(); 113281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 11479ac6ba4SDavid Gibson } 11579ac6ba4SDavid Gibson } 11679ac6ba4SDavid Gibson 11779ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 1189de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 11979ac6ba4SDavid Gibson { 12079ac6ba4SDavid Gibson int i; 12179ac6ba4SDavid Gibson 12279ac6ba4SDavid Gibson might_sleep(); 12379ac6ba4SDavid Gibson for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 12479ac6ba4SDavid Gibson cond_resched(); 1259de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 12679ac6ba4SDavid Gibson } 12779ac6ba4SDavid Gibson } 12879ac6ba4SDavid Gibson 1291da177e4SLinus Torvalds static void enqueue_huge_page(struct page *page) 1301da177e4SLinus Torvalds { 1311da177e4SLinus Torvalds int nid = page_to_nid(page); 1321da177e4SLinus Torvalds list_add(&page->lru, &hugepage_freelists[nid]); 1331da177e4SLinus Torvalds free_huge_pages++; 1341da177e4SLinus Torvalds free_huge_pages_node[nid]++; 1351da177e4SLinus Torvalds } 1361da177e4SLinus Torvalds 137348e1e04SNishanth Aravamudan static struct page *dequeue_huge_page(void) 138348e1e04SNishanth Aravamudan { 139348e1e04SNishanth Aravamudan int nid; 140348e1e04SNishanth Aravamudan struct page *page = NULL; 141348e1e04SNishanth Aravamudan 142348e1e04SNishanth Aravamudan for (nid = 0; nid < MAX_NUMNODES; ++nid) { 143348e1e04SNishanth Aravamudan if (!list_empty(&hugepage_freelists[nid])) { 144348e1e04SNishanth Aravamudan page = list_entry(hugepage_freelists[nid].next, 145348e1e04SNishanth Aravamudan struct page, lru); 146348e1e04SNishanth Aravamudan list_del(&page->lru); 147348e1e04SNishanth Aravamudan free_huge_pages--; 148348e1e04SNishanth Aravamudan free_huge_pages_node[nid]--; 149348e1e04SNishanth Aravamudan break; 150348e1e04SNishanth Aravamudan } 151348e1e04SNishanth Aravamudan } 152348e1e04SNishanth Aravamudan return page; 153348e1e04SNishanth Aravamudan } 154348e1e04SNishanth Aravamudan 155348e1e04SNishanth Aravamudan static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, 1565da7ca86SChristoph Lameter unsigned long address) 1571da177e4SLinus Torvalds { 15831a5c6e4SNishanth Aravamudan int nid; 1591da177e4SLinus Torvalds struct page *page = NULL; 160480eccf9SLee Schermerhorn struct mempolicy *mpol; 16119770b32SMel Gorman nodemask_t *nodemask; 162396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 16319770b32SMel Gorman htlb_alloc_mask, &mpol, &nodemask); 164dd1a239fSMel Gorman struct zone *zone; 165dd1a239fSMel Gorman struct zoneref *z; 1661da177e4SLinus Torvalds 167a1e78772SMel Gorman /* 168a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 169a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 170a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 171a1e78772SMel Gorman */ 172a1e78772SMel Gorman if (!vma_has_private_reserves(vma) && 173a1e78772SMel Gorman free_huge_pages - resv_huge_pages == 0) 174a1e78772SMel Gorman return NULL; 175a1e78772SMel Gorman 17619770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 17719770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 17854a6eb5cSMel Gorman nid = zone_to_nid(zone); 17954a6eb5cSMel Gorman if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && 1803abf7afdSAndrew Morton !list_empty(&hugepage_freelists[nid])) { 1811da177e4SLinus Torvalds page = list_entry(hugepage_freelists[nid].next, 1821da177e4SLinus Torvalds struct page, lru); 1831da177e4SLinus Torvalds list_del(&page->lru); 1841da177e4SLinus Torvalds free_huge_pages--; 1851da177e4SLinus Torvalds free_huge_pages_node[nid]--; 186a1e78772SMel Gorman decrement_hugepage_resv_vma(vma); 187a1e78772SMel Gorman 1885ab3ee7bSKen Chen break; 1891da177e4SLinus Torvalds } 1903abf7afdSAndrew Morton } 19152cd3b07SLee Schermerhorn mpol_cond_put(mpol); 1921da177e4SLinus Torvalds return page; 1931da177e4SLinus Torvalds } 1941da177e4SLinus Torvalds 1956af2acb6SAdam Litke static void update_and_free_page(struct page *page) 1966af2acb6SAdam Litke { 1976af2acb6SAdam Litke int i; 1986af2acb6SAdam Litke nr_huge_pages--; 1996af2acb6SAdam Litke nr_huge_pages_node[page_to_nid(page)]--; 2006af2acb6SAdam Litke for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 2016af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 2026af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 2036af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 2046af2acb6SAdam Litke } 2056af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 2066af2acb6SAdam Litke set_page_refcounted(page); 2077f2e9525SGerald Schaefer arch_release_hugepage(page); 2086af2acb6SAdam Litke __free_pages(page, HUGETLB_PAGE_ORDER); 2096af2acb6SAdam Litke } 2106af2acb6SAdam Litke 21127a85ef1SDavid Gibson static void free_huge_page(struct page *page) 21227a85ef1SDavid Gibson { 2137893d1d5SAdam Litke int nid = page_to_nid(page); 214c79fb75eSAdam Litke struct address_space *mapping; 21527a85ef1SDavid Gibson 216c79fb75eSAdam Litke mapping = (struct address_space *) page_private(page); 217e5df70abSAndy Whitcroft set_page_private(page, 0); 2187893d1d5SAdam Litke BUG_ON(page_count(page)); 21927a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 22027a85ef1SDavid Gibson 22127a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 2227893d1d5SAdam Litke if (surplus_huge_pages_node[nid]) { 2237893d1d5SAdam Litke update_and_free_page(page); 2247893d1d5SAdam Litke surplus_huge_pages--; 2257893d1d5SAdam Litke surplus_huge_pages_node[nid]--; 2267893d1d5SAdam Litke } else { 22727a85ef1SDavid Gibson enqueue_huge_page(page); 2287893d1d5SAdam Litke } 22927a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 230c79fb75eSAdam Litke if (mapping) 2319a119c05SAdam Litke hugetlb_put_quota(mapping, 1); 23227a85ef1SDavid Gibson } 23327a85ef1SDavid Gibson 2347893d1d5SAdam Litke /* 2357893d1d5SAdam Litke * Increment or decrement surplus_huge_pages. Keep node-specific counters 2367893d1d5SAdam Litke * balanced by operating on them in a round-robin fashion. 2377893d1d5SAdam Litke * Returns 1 if an adjustment was made. 2387893d1d5SAdam Litke */ 2397893d1d5SAdam Litke static int adjust_pool_surplus(int delta) 2407893d1d5SAdam Litke { 2417893d1d5SAdam Litke static int prev_nid; 2427893d1d5SAdam Litke int nid = prev_nid; 2437893d1d5SAdam Litke int ret = 0; 2447893d1d5SAdam Litke 2457893d1d5SAdam Litke VM_BUG_ON(delta != -1 && delta != 1); 2467893d1d5SAdam Litke do { 2477893d1d5SAdam Litke nid = next_node(nid, node_online_map); 2487893d1d5SAdam Litke if (nid == MAX_NUMNODES) 2497893d1d5SAdam Litke nid = first_node(node_online_map); 2507893d1d5SAdam Litke 2517893d1d5SAdam Litke /* To shrink on this node, there must be a surplus page */ 2527893d1d5SAdam Litke if (delta < 0 && !surplus_huge_pages_node[nid]) 2537893d1d5SAdam Litke continue; 2547893d1d5SAdam Litke /* Surplus cannot exceed the total number of pages */ 2557893d1d5SAdam Litke if (delta > 0 && surplus_huge_pages_node[nid] >= 2567893d1d5SAdam Litke nr_huge_pages_node[nid]) 2577893d1d5SAdam Litke continue; 2587893d1d5SAdam Litke 2597893d1d5SAdam Litke surplus_huge_pages += delta; 2607893d1d5SAdam Litke surplus_huge_pages_node[nid] += delta; 2617893d1d5SAdam Litke ret = 1; 2627893d1d5SAdam Litke break; 2637893d1d5SAdam Litke } while (nid != prev_nid); 2647893d1d5SAdam Litke 2657893d1d5SAdam Litke prev_nid = nid; 2667893d1d5SAdam Litke return ret; 2677893d1d5SAdam Litke } 2687893d1d5SAdam Litke 26963b4613cSNishanth Aravamudan static struct page *alloc_fresh_huge_page_node(int nid) 2701da177e4SLinus Torvalds { 2711da177e4SLinus Torvalds struct page *page; 272f96efd58SJoe Jin 27363b4613cSNishanth Aravamudan page = alloc_pages_node(nid, 274551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 275551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 276f96efd58SJoe Jin HUGETLB_PAGE_ORDER); 2771da177e4SLinus Torvalds if (page) { 2787f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 2797f2e9525SGerald Schaefer __free_pages(page, HUGETLB_PAGE_ORDER); 2807b8ee84dSHarvey Harrison return NULL; 2817f2e9525SGerald Schaefer } 28233f2ef89SAndy Whitcroft set_compound_page_dtor(page, free_huge_page); 2830bd0f9fbSEric Paris spin_lock(&hugetlb_lock); 2841da177e4SLinus Torvalds nr_huge_pages++; 28563b4613cSNishanth Aravamudan nr_huge_pages_node[nid]++; 2860bd0f9fbSEric Paris spin_unlock(&hugetlb_lock); 287a482289dSNick Piggin put_page(page); /* free it into the hugepage allocator */ 2881da177e4SLinus Torvalds } 28963b4613cSNishanth Aravamudan 29063b4613cSNishanth Aravamudan return page; 29163b4613cSNishanth Aravamudan } 29263b4613cSNishanth Aravamudan 29363b4613cSNishanth Aravamudan static int alloc_fresh_huge_page(void) 29463b4613cSNishanth Aravamudan { 29563b4613cSNishanth Aravamudan struct page *page; 29663b4613cSNishanth Aravamudan int start_nid; 29763b4613cSNishanth Aravamudan int next_nid; 29863b4613cSNishanth Aravamudan int ret = 0; 29963b4613cSNishanth Aravamudan 30063b4613cSNishanth Aravamudan start_nid = hugetlb_next_nid; 30163b4613cSNishanth Aravamudan 30263b4613cSNishanth Aravamudan do { 30363b4613cSNishanth Aravamudan page = alloc_fresh_huge_page_node(hugetlb_next_nid); 30463b4613cSNishanth Aravamudan if (page) 30563b4613cSNishanth Aravamudan ret = 1; 30663b4613cSNishanth Aravamudan /* 30763b4613cSNishanth Aravamudan * Use a helper variable to find the next node and then 30863b4613cSNishanth Aravamudan * copy it back to hugetlb_next_nid afterwards: 30963b4613cSNishanth Aravamudan * otherwise there's a window in which a racer might 31063b4613cSNishanth Aravamudan * pass invalid nid MAX_NUMNODES to alloc_pages_node. 31163b4613cSNishanth Aravamudan * But we don't need to use a spin_lock here: it really 31263b4613cSNishanth Aravamudan * doesn't matter if occasionally a racer chooses the 31363b4613cSNishanth Aravamudan * same nid as we do. Move nid forward in the mask even 31463b4613cSNishanth Aravamudan * if we just successfully allocated a hugepage so that 31563b4613cSNishanth Aravamudan * the next caller gets hugepages on the next node. 31663b4613cSNishanth Aravamudan */ 31763b4613cSNishanth Aravamudan next_nid = next_node(hugetlb_next_nid, node_online_map); 31863b4613cSNishanth Aravamudan if (next_nid == MAX_NUMNODES) 31963b4613cSNishanth Aravamudan next_nid = first_node(node_online_map); 32063b4613cSNishanth Aravamudan hugetlb_next_nid = next_nid; 32163b4613cSNishanth Aravamudan } while (!page && hugetlb_next_nid != start_nid); 32263b4613cSNishanth Aravamudan 3233b116300SAdam Litke if (ret) 3243b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC); 3253b116300SAdam Litke else 3263b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 3273b116300SAdam Litke 32863b4613cSNishanth Aravamudan return ret; 3291da177e4SLinus Torvalds } 3301da177e4SLinus Torvalds 3317893d1d5SAdam Litke static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, 3327893d1d5SAdam Litke unsigned long address) 3337893d1d5SAdam Litke { 3347893d1d5SAdam Litke struct page *page; 335d1c3fb1fSNishanth Aravamudan unsigned int nid; 3367893d1d5SAdam Litke 337d1c3fb1fSNishanth Aravamudan /* 338d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 339d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 340d1c3fb1fSNishanth Aravamudan * overcommit 341d1c3fb1fSNishanth Aravamudan * 342d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 343d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 344d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 345d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 346d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 347d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 348d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 349d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 350d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 351d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 352d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 353d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 354d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 355d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 356d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 357d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 358d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 359d1c3fb1fSNishanth Aravamudan */ 360d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 361d1c3fb1fSNishanth Aravamudan if (surplus_huge_pages >= nr_overcommit_huge_pages) { 362d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 363d1c3fb1fSNishanth Aravamudan return NULL; 364d1c3fb1fSNishanth Aravamudan } else { 365d1c3fb1fSNishanth Aravamudan nr_huge_pages++; 366d1c3fb1fSNishanth Aravamudan surplus_huge_pages++; 367d1c3fb1fSNishanth Aravamudan } 368d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 369d1c3fb1fSNishanth Aravamudan 370551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 371551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 3727893d1d5SAdam Litke HUGETLB_PAGE_ORDER); 373d1c3fb1fSNishanth Aravamudan 3747893d1d5SAdam Litke spin_lock(&hugetlb_lock); 375d1c3fb1fSNishanth Aravamudan if (page) { 3762668db91SAdam Litke /* 3772668db91SAdam Litke * This page is now managed by the hugetlb allocator and has 3782668db91SAdam Litke * no users -- drop the buddy allocator's reference. 3792668db91SAdam Litke */ 3802668db91SAdam Litke put_page_testzero(page); 3812668db91SAdam Litke VM_BUG_ON(page_count(page)); 382d1c3fb1fSNishanth Aravamudan nid = page_to_nid(page); 383d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 384d1c3fb1fSNishanth Aravamudan /* 385d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 386d1c3fb1fSNishanth Aravamudan */ 387d1c3fb1fSNishanth Aravamudan nr_huge_pages_node[nid]++; 388d1c3fb1fSNishanth Aravamudan surplus_huge_pages_node[nid]++; 3893b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 390d1c3fb1fSNishanth Aravamudan } else { 391d1c3fb1fSNishanth Aravamudan nr_huge_pages--; 392d1c3fb1fSNishanth Aravamudan surplus_huge_pages--; 3933b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 3947893d1d5SAdam Litke } 395d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 3967893d1d5SAdam Litke 3977893d1d5SAdam Litke return page; 3987893d1d5SAdam Litke } 3997893d1d5SAdam Litke 400e4e574b7SAdam Litke /* 401e4e574b7SAdam Litke * Increase the hugetlb pool such that it can accomodate a reservation 402e4e574b7SAdam Litke * of size 'delta'. 403e4e574b7SAdam Litke */ 404e4e574b7SAdam Litke static int gather_surplus_pages(int delta) 405e4e574b7SAdam Litke { 406e4e574b7SAdam Litke struct list_head surplus_list; 407e4e574b7SAdam Litke struct page *page, *tmp; 408e4e574b7SAdam Litke int ret, i; 409e4e574b7SAdam Litke int needed, allocated; 410e4e574b7SAdam Litke 411e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - free_huge_pages; 412ac09b3a1SAdam Litke if (needed <= 0) { 413ac09b3a1SAdam Litke resv_huge_pages += delta; 414e4e574b7SAdam Litke return 0; 415ac09b3a1SAdam Litke } 416e4e574b7SAdam Litke 417e4e574b7SAdam Litke allocated = 0; 418e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 419e4e574b7SAdam Litke 420e4e574b7SAdam Litke ret = -ENOMEM; 421e4e574b7SAdam Litke retry: 422e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 423e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 424e4e574b7SAdam Litke page = alloc_buddy_huge_page(NULL, 0); 425e4e574b7SAdam Litke if (!page) { 426e4e574b7SAdam Litke /* 427e4e574b7SAdam Litke * We were not able to allocate enough pages to 428e4e574b7SAdam Litke * satisfy the entire reservation so we free what 429e4e574b7SAdam Litke * we've allocated so far. 430e4e574b7SAdam Litke */ 431e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 432e4e574b7SAdam Litke needed = 0; 433e4e574b7SAdam Litke goto free; 434e4e574b7SAdam Litke } 435e4e574b7SAdam Litke 436e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 437e4e574b7SAdam Litke } 438e4e574b7SAdam Litke allocated += needed; 439e4e574b7SAdam Litke 440e4e574b7SAdam Litke /* 441e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 442e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 443e4e574b7SAdam Litke */ 444e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 445e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); 446e4e574b7SAdam Litke if (needed > 0) 447e4e574b7SAdam Litke goto retry; 448e4e574b7SAdam Litke 449e4e574b7SAdam Litke /* 450e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 451e4e574b7SAdam Litke * needed to accomodate the reservation. Add the appropriate number 452e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 453ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 454ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 455ac09b3a1SAdam Litke * before they are reserved. 456e4e574b7SAdam Litke */ 457e4e574b7SAdam Litke needed += allocated; 458ac09b3a1SAdam Litke resv_huge_pages += delta; 459e4e574b7SAdam Litke ret = 0; 460e4e574b7SAdam Litke free: 46119fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 46219fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 46319fc3f0aSAdam Litke if ((--needed) < 0) 46419fc3f0aSAdam Litke break; 46519fc3f0aSAdam Litke list_del(&page->lru); 46619fc3f0aSAdam Litke enqueue_huge_page(page); 46719fc3f0aSAdam Litke } 46819fc3f0aSAdam Litke 46919fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 47019fc3f0aSAdam Litke if (!list_empty(&surplus_list)) { 47119fc3f0aSAdam Litke spin_unlock(&hugetlb_lock); 472e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 473e4e574b7SAdam Litke list_del(&page->lru); 474af767cbdSAdam Litke /* 4752668db91SAdam Litke * The page has a reference count of zero already, so 4762668db91SAdam Litke * call free_huge_page directly instead of using 4772668db91SAdam Litke * put_page. This must be done with hugetlb_lock 478af767cbdSAdam Litke * unlocked which is safe because free_huge_page takes 479af767cbdSAdam Litke * hugetlb_lock before deciding how to free the page. 480af767cbdSAdam Litke */ 4812668db91SAdam Litke free_huge_page(page); 482af767cbdSAdam Litke } 48319fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 484e4e574b7SAdam Litke } 485e4e574b7SAdam Litke 486e4e574b7SAdam Litke return ret; 487e4e574b7SAdam Litke } 488e4e574b7SAdam Litke 489e4e574b7SAdam Litke /* 490e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 491e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 492e4e574b7SAdam Litke * never used. 493e4e574b7SAdam Litke */ 4948cde045cSAdrian Bunk static void return_unused_surplus_pages(unsigned long unused_resv_pages) 495e4e574b7SAdam Litke { 496e4e574b7SAdam Litke static int nid = -1; 497e4e574b7SAdam Litke struct page *page; 498e4e574b7SAdam Litke unsigned long nr_pages; 499e4e574b7SAdam Litke 50011320d17SNishanth Aravamudan /* 50111320d17SNishanth Aravamudan * We want to release as many surplus pages as possible, spread 50211320d17SNishanth Aravamudan * evenly across all nodes. Iterate across all nodes until we 50311320d17SNishanth Aravamudan * can no longer free unreserved surplus pages. This occurs when 50411320d17SNishanth Aravamudan * the nodes with surplus pages have no free pages. 50511320d17SNishanth Aravamudan */ 50611320d17SNishanth Aravamudan unsigned long remaining_iterations = num_online_nodes(); 50711320d17SNishanth Aravamudan 508ac09b3a1SAdam Litke /* Uncommit the reservation */ 509ac09b3a1SAdam Litke resv_huge_pages -= unused_resv_pages; 510ac09b3a1SAdam Litke 511e4e574b7SAdam Litke nr_pages = min(unused_resv_pages, surplus_huge_pages); 512e4e574b7SAdam Litke 51311320d17SNishanth Aravamudan while (remaining_iterations-- && nr_pages) { 514e4e574b7SAdam Litke nid = next_node(nid, node_online_map); 515e4e574b7SAdam Litke if (nid == MAX_NUMNODES) 516e4e574b7SAdam Litke nid = first_node(node_online_map); 517e4e574b7SAdam Litke 518e4e574b7SAdam Litke if (!surplus_huge_pages_node[nid]) 519e4e574b7SAdam Litke continue; 520e4e574b7SAdam Litke 521e4e574b7SAdam Litke if (!list_empty(&hugepage_freelists[nid])) { 522e4e574b7SAdam Litke page = list_entry(hugepage_freelists[nid].next, 523e4e574b7SAdam Litke struct page, lru); 524e4e574b7SAdam Litke list_del(&page->lru); 525e4e574b7SAdam Litke update_and_free_page(page); 526e4e574b7SAdam Litke free_huge_pages--; 527e4e574b7SAdam Litke free_huge_pages_node[nid]--; 528e4e574b7SAdam Litke surplus_huge_pages--; 529e4e574b7SAdam Litke surplus_huge_pages_node[nid]--; 530e4e574b7SAdam Litke nr_pages--; 53111320d17SNishanth Aravamudan remaining_iterations = num_online_nodes(); 532e4e574b7SAdam Litke } 533e4e574b7SAdam Litke } 534e4e574b7SAdam Litke } 535e4e574b7SAdam Litke 536348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 537348ea204SAdam Litke unsigned long addr) 538348ea204SAdam Litke { 539348ea204SAdam Litke struct page *page; 5402fc39cecSAdam Litke struct address_space *mapping = vma->vm_file->f_mapping; 541a1e78772SMel Gorman struct inode *inode = mapping->host; 542a1e78772SMel Gorman unsigned int chg = 0; 5432fc39cecSAdam Litke 544a1e78772SMel Gorman /* 545a1e78772SMel Gorman * Processes that did not create the mapping will have no reserves and 546a1e78772SMel Gorman * will not have accounted against quota. Check that the quota can be 547a1e78772SMel Gorman * made before satisfying the allocation 548a1e78772SMel Gorman */ 549a1e78772SMel Gorman if (!vma_has_private_reserves(vma)) { 550a1e78772SMel Gorman chg = 1; 551a1e78772SMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 552a1e78772SMel Gorman return ERR_PTR(-ENOSPC); 553a1e78772SMel Gorman } 55490d8b7e6SAdam Litke 555a1e78772SMel Gorman spin_lock(&hugetlb_lock); 556a1e78772SMel Gorman page = dequeue_huge_page_vma(vma, addr); 557a1e78772SMel Gorman spin_unlock(&hugetlb_lock); 558a1e78772SMel Gorman 559a1e78772SMel Gorman if (!page) { 560a1e78772SMel Gorman page = alloc_buddy_huge_page(vma, addr); 561a1e78772SMel Gorman if (!page) { 562a1e78772SMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 563a1e78772SMel Gorman return ERR_PTR(-VM_FAULT_OOM); 564a1e78772SMel Gorman } 565a1e78772SMel Gorman } 566a1e78772SMel Gorman 567348ea204SAdam Litke set_page_refcounted(page); 5682fc39cecSAdam Litke set_page_private(page, (unsigned long) mapping); 569a1e78772SMel Gorman 5707893d1d5SAdam Litke return page; 571b45b5bd6SDavid Gibson } 572b45b5bd6SDavid Gibson 5731da177e4SLinus Torvalds static int __init hugetlb_init(void) 5741da177e4SLinus Torvalds { 5751da177e4SLinus Torvalds unsigned long i; 5761da177e4SLinus Torvalds 5773c726f8dSBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 5783c726f8dSBenjamin Herrenschmidt return 0; 5793c726f8dSBenjamin Herrenschmidt 5801da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) 5811da177e4SLinus Torvalds INIT_LIST_HEAD(&hugepage_freelists[i]); 5821da177e4SLinus Torvalds 58363b4613cSNishanth Aravamudan hugetlb_next_nid = first_node(node_online_map); 58463b4613cSNishanth Aravamudan 5851da177e4SLinus Torvalds for (i = 0; i < max_huge_pages; ++i) { 586a482289dSNick Piggin if (!alloc_fresh_huge_page()) 5871da177e4SLinus Torvalds break; 5881da177e4SLinus Torvalds } 5891da177e4SLinus Torvalds max_huge_pages = free_huge_pages = nr_huge_pages = i; 5901da177e4SLinus Torvalds printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); 5911da177e4SLinus Torvalds return 0; 5921da177e4SLinus Torvalds } 5931da177e4SLinus Torvalds module_init(hugetlb_init); 5941da177e4SLinus Torvalds 5951da177e4SLinus Torvalds static int __init hugetlb_setup(char *s) 5961da177e4SLinus Torvalds { 5971da177e4SLinus Torvalds if (sscanf(s, "%lu", &max_huge_pages) <= 0) 5981da177e4SLinus Torvalds max_huge_pages = 0; 5991da177e4SLinus Torvalds return 1; 6001da177e4SLinus Torvalds } 6011da177e4SLinus Torvalds __setup("hugepages=", hugetlb_setup); 6021da177e4SLinus Torvalds 6038a630112SKen Chen static unsigned int cpuset_mems_nr(unsigned int *array) 6048a630112SKen Chen { 6058a630112SKen Chen int node; 6068a630112SKen Chen unsigned int nr = 0; 6078a630112SKen Chen 6088a630112SKen Chen for_each_node_mask(node, cpuset_current_mems_allowed) 6098a630112SKen Chen nr += array[node]; 6108a630112SKen Chen 6118a630112SKen Chen return nr; 6128a630112SKen Chen } 6138a630112SKen Chen 6141da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL 6151da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 6161da177e4SLinus Torvalds static void try_to_free_low(unsigned long count) 6171da177e4SLinus Torvalds { 6184415cc8dSChristoph Lameter int i; 6194415cc8dSChristoph Lameter 6201da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 6211da177e4SLinus Torvalds struct page *page, *next; 6221da177e4SLinus Torvalds list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 6236b0c880dSAdam Litke if (count >= nr_huge_pages) 6246b0c880dSAdam Litke return; 6251da177e4SLinus Torvalds if (PageHighMem(page)) 6261da177e4SLinus Torvalds continue; 6271da177e4SLinus Torvalds list_del(&page->lru); 6281da177e4SLinus Torvalds update_and_free_page(page); 6291da177e4SLinus Torvalds free_huge_pages--; 6304415cc8dSChristoph Lameter free_huge_pages_node[page_to_nid(page)]--; 6311da177e4SLinus Torvalds } 6321da177e4SLinus Torvalds } 6331da177e4SLinus Torvalds } 6341da177e4SLinus Torvalds #else 6351da177e4SLinus Torvalds static inline void try_to_free_low(unsigned long count) 6361da177e4SLinus Torvalds { 6371da177e4SLinus Torvalds } 6381da177e4SLinus Torvalds #endif 6391da177e4SLinus Torvalds 6407893d1d5SAdam Litke #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) 6411da177e4SLinus Torvalds static unsigned long set_max_huge_pages(unsigned long count) 6421da177e4SLinus Torvalds { 6437893d1d5SAdam Litke unsigned long min_count, ret; 6441da177e4SLinus Torvalds 6457893d1d5SAdam Litke /* 6467893d1d5SAdam Litke * Increase the pool size 6477893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 6487893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 649d1c3fb1fSNishanth Aravamudan * 650d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 651d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 652d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 653d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 654d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 6557893d1d5SAdam Litke */ 6561da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 6577893d1d5SAdam Litke while (surplus_huge_pages && count > persistent_huge_pages) { 6587893d1d5SAdam Litke if (!adjust_pool_surplus(-1)) 6597893d1d5SAdam Litke break; 6607893d1d5SAdam Litke } 6617893d1d5SAdam Litke 6627893d1d5SAdam Litke while (count > persistent_huge_pages) { 6637893d1d5SAdam Litke /* 6647893d1d5SAdam Litke * If this allocation races such that we no longer need the 6657893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 6667893d1d5SAdam Litke * and reducing the surplus. 6677893d1d5SAdam Litke */ 6687893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 6697893d1d5SAdam Litke ret = alloc_fresh_huge_page(); 6707893d1d5SAdam Litke spin_lock(&hugetlb_lock); 6717893d1d5SAdam Litke if (!ret) 6727893d1d5SAdam Litke goto out; 6737893d1d5SAdam Litke 6747893d1d5SAdam Litke } 6757893d1d5SAdam Litke 6767893d1d5SAdam Litke /* 6777893d1d5SAdam Litke * Decrease the pool size 6787893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 6797893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 6807893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 6817893d1d5SAdam Litke * to the desired size as pages become free. 682d1c3fb1fSNishanth Aravamudan * 683d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 684d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 685d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 686d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 687d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 688d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 689d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 6907893d1d5SAdam Litke */ 6916b0c880dSAdam Litke min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; 6926b0c880dSAdam Litke min_count = max(count, min_count); 6937893d1d5SAdam Litke try_to_free_low(min_count); 6947893d1d5SAdam Litke while (min_count < persistent_huge_pages) { 695348e1e04SNishanth Aravamudan struct page *page = dequeue_huge_page(); 6961da177e4SLinus Torvalds if (!page) 6971da177e4SLinus Torvalds break; 6981da177e4SLinus Torvalds update_and_free_page(page); 6991da177e4SLinus Torvalds } 7007893d1d5SAdam Litke while (count < persistent_huge_pages) { 7017893d1d5SAdam Litke if (!adjust_pool_surplus(1)) 7027893d1d5SAdam Litke break; 7037893d1d5SAdam Litke } 7047893d1d5SAdam Litke out: 7057893d1d5SAdam Litke ret = persistent_huge_pages; 7061da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 7077893d1d5SAdam Litke return ret; 7081da177e4SLinus Torvalds } 7091da177e4SLinus Torvalds 7101da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 7111da177e4SLinus Torvalds struct file *file, void __user *buffer, 7121da177e4SLinus Torvalds size_t *length, loff_t *ppos) 7131da177e4SLinus Torvalds { 7141da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 7151da177e4SLinus Torvalds max_huge_pages = set_max_huge_pages(max_huge_pages); 7161da177e4SLinus Torvalds return 0; 7171da177e4SLinus Torvalds } 718396faf03SMel Gorman 719396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 720396faf03SMel Gorman struct file *file, void __user *buffer, 721396faf03SMel Gorman size_t *length, loff_t *ppos) 722396faf03SMel Gorman { 723396faf03SMel Gorman proc_dointvec(table, write, file, buffer, length, ppos); 724396faf03SMel Gorman if (hugepages_treat_as_movable) 725396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 726396faf03SMel Gorman else 727396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 728396faf03SMel Gorman return 0; 729396faf03SMel Gorman } 730396faf03SMel Gorman 731a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 732a3d0c6aaSNishanth Aravamudan struct file *file, void __user *buffer, 733a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 734a3d0c6aaSNishanth Aravamudan { 735a3d0c6aaSNishanth Aravamudan proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 736064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 737064d9efeSNishanth Aravamudan nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; 738a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 739a3d0c6aaSNishanth Aravamudan return 0; 740a3d0c6aaSNishanth Aravamudan } 741a3d0c6aaSNishanth Aravamudan 7421da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 7431da177e4SLinus Torvalds 7441da177e4SLinus Torvalds int hugetlb_report_meminfo(char *buf) 7451da177e4SLinus Torvalds { 7461da177e4SLinus Torvalds return sprintf(buf, 7471da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 7481da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 749b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 7507893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 7511da177e4SLinus Torvalds "Hugepagesize: %5lu kB\n", 7521da177e4SLinus Torvalds nr_huge_pages, 7531da177e4SLinus Torvalds free_huge_pages, 754a43a8c39SChen, Kenneth W resv_huge_pages, 7557893d1d5SAdam Litke surplus_huge_pages, 7561da177e4SLinus Torvalds HPAGE_SIZE/1024); 7571da177e4SLinus Torvalds } 7581da177e4SLinus Torvalds 7591da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 7601da177e4SLinus Torvalds { 7611da177e4SLinus Torvalds return sprintf(buf, 7621da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 763a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 764a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 7651da177e4SLinus Torvalds nid, nr_huge_pages_node[nid], 766a1de0919SNishanth Aravamudan nid, free_huge_pages_node[nid], 767a1de0919SNishanth Aravamudan nid, surplus_huge_pages_node[nid]); 7681da177e4SLinus Torvalds } 7691da177e4SLinus Torvalds 7701da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 7711da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 7721da177e4SLinus Torvalds { 7731da177e4SLinus Torvalds return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 7741da177e4SLinus Torvalds } 7751da177e4SLinus Torvalds 776fc1b8a73SMel Gorman static int hugetlb_acct_memory(long delta) 777fc1b8a73SMel Gorman { 778fc1b8a73SMel Gorman int ret = -ENOMEM; 779fc1b8a73SMel Gorman 780fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 781fc1b8a73SMel Gorman /* 782fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 783fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 784fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 785fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 786fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 787fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 788fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 789fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 790fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 791fc1b8a73SMel Gorman * 792fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 793fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 794fc1b8a73SMel Gorman * we fall back to check against current free page availability as 795fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 796fc1b8a73SMel Gorman * semantics that cpuset has. 797fc1b8a73SMel Gorman */ 798fc1b8a73SMel Gorman if (delta > 0) { 799fc1b8a73SMel Gorman if (gather_surplus_pages(delta) < 0) 800fc1b8a73SMel Gorman goto out; 801fc1b8a73SMel Gorman 802fc1b8a73SMel Gorman if (delta > cpuset_mems_nr(free_huge_pages_node)) { 803fc1b8a73SMel Gorman return_unused_surplus_pages(delta); 804fc1b8a73SMel Gorman goto out; 805fc1b8a73SMel Gorman } 806fc1b8a73SMel Gorman } 807fc1b8a73SMel Gorman 808fc1b8a73SMel Gorman ret = 0; 809fc1b8a73SMel Gorman if (delta < 0) 810fc1b8a73SMel Gorman return_unused_surplus_pages((unsigned long) -delta); 811fc1b8a73SMel Gorman 812fc1b8a73SMel Gorman out: 813fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 814fc1b8a73SMel Gorman return ret; 815fc1b8a73SMel Gorman } 816fc1b8a73SMel Gorman 817a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 818a1e78772SMel Gorman { 819a1e78772SMel Gorman unsigned long reserve = vma_resv_huge_pages(vma); 820a1e78772SMel Gorman if (reserve) 821a1e78772SMel Gorman hugetlb_acct_memory(-reserve); 822a1e78772SMel Gorman } 823a1e78772SMel Gorman 8241da177e4SLinus Torvalds /* 8251da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 8261da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 8271da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 8281da177e4SLinus Torvalds * this far. 8291da177e4SLinus Torvalds */ 830d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 8311da177e4SLinus Torvalds { 8321da177e4SLinus Torvalds BUG(); 833d0217ac0SNick Piggin return 0; 8341da177e4SLinus Torvalds } 8351da177e4SLinus Torvalds 8361da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 837d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 838a1e78772SMel Gorman .close = hugetlb_vm_op_close, 8391da177e4SLinus Torvalds }; 8401da177e4SLinus Torvalds 8411e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 8421e8f889bSDavid Gibson int writable) 84363551ae0SDavid Gibson { 84463551ae0SDavid Gibson pte_t entry; 84563551ae0SDavid Gibson 8461e8f889bSDavid Gibson if (writable) { 84763551ae0SDavid Gibson entry = 84863551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 84963551ae0SDavid Gibson } else { 8507f2e9525SGerald Schaefer entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 85163551ae0SDavid Gibson } 85263551ae0SDavid Gibson entry = pte_mkyoung(entry); 85363551ae0SDavid Gibson entry = pte_mkhuge(entry); 85463551ae0SDavid Gibson 85563551ae0SDavid Gibson return entry; 85663551ae0SDavid Gibson } 85763551ae0SDavid Gibson 8581e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 8591e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 8601e8f889bSDavid Gibson { 8611e8f889bSDavid Gibson pte_t entry; 8621e8f889bSDavid Gibson 8637f2e9525SGerald Schaefer entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 8647f2e9525SGerald Schaefer if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { 8651e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 8661e8f889bSDavid Gibson } 8678dab5241SBenjamin Herrenschmidt } 8681e8f889bSDavid Gibson 8691e8f889bSDavid Gibson 87063551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 87163551ae0SDavid Gibson struct vm_area_struct *vma) 87263551ae0SDavid Gibson { 87363551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 87463551ae0SDavid Gibson struct page *ptepage; 8751c59827dSHugh Dickins unsigned long addr; 8761e8f889bSDavid Gibson int cow; 8771e8f889bSDavid Gibson 8781e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 87963551ae0SDavid Gibson 8801c59827dSHugh Dickins for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 881c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 882c74df32cSHugh Dickins if (!src_pte) 883c74df32cSHugh Dickins continue; 88463551ae0SDavid Gibson dst_pte = huge_pte_alloc(dst, addr); 88563551ae0SDavid Gibson if (!dst_pte) 88663551ae0SDavid Gibson goto nomem; 887c5c99429SLarry Woodman 888c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 889c5c99429SLarry Woodman if (dst_pte == src_pte) 890c5c99429SLarry Woodman continue; 891c5c99429SLarry Woodman 892c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 89346478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 8947f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 8951e8f889bSDavid Gibson if (cow) 8967f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 8977f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 89863551ae0SDavid Gibson ptepage = pte_page(entry); 89963551ae0SDavid Gibson get_page(ptepage); 90063551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 9011c59827dSHugh Dickins } 9021c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 903c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 90463551ae0SDavid Gibson } 90563551ae0SDavid Gibson return 0; 90663551ae0SDavid Gibson 90763551ae0SDavid Gibson nomem: 90863551ae0SDavid Gibson return -ENOMEM; 90963551ae0SDavid Gibson } 91063551ae0SDavid Gibson 911502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 91263551ae0SDavid Gibson unsigned long end) 91363551ae0SDavid Gibson { 91463551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 91563551ae0SDavid Gibson unsigned long address; 916c7546f8fSDavid Gibson pte_t *ptep; 91763551ae0SDavid Gibson pte_t pte; 91863551ae0SDavid Gibson struct page *page; 919fe1668aeSChen, Kenneth W struct page *tmp; 920c0a499c2SChen, Kenneth W /* 921c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 922c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 923c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 924c0a499c2SChen, Kenneth W */ 925fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 92663551ae0SDavid Gibson 92763551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 92863551ae0SDavid Gibson BUG_ON(start & ~HPAGE_MASK); 92963551ae0SDavid Gibson BUG_ON(end & ~HPAGE_MASK); 93063551ae0SDavid Gibson 931508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 93263551ae0SDavid Gibson for (address = start; address < end; address += HPAGE_SIZE) { 933c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 934c7546f8fSDavid Gibson if (!ptep) 935c7546f8fSDavid Gibson continue; 936c7546f8fSDavid Gibson 93739dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 93839dde65cSChen, Kenneth W continue; 93939dde65cSChen, Kenneth W 940c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 9417f2e9525SGerald Schaefer if (huge_pte_none(pte)) 94263551ae0SDavid Gibson continue; 943c7546f8fSDavid Gibson 94463551ae0SDavid Gibson page = pte_page(pte); 9456649a386SKen Chen if (pte_dirty(pte)) 9466649a386SKen Chen set_page_dirty(page); 947fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 94863551ae0SDavid Gibson } 9491da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 950508034a3SHugh Dickins flush_tlb_range(vma, start, end); 951fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 952fe1668aeSChen, Kenneth W list_del(&page->lru); 953fe1668aeSChen, Kenneth W put_page(page); 954fe1668aeSChen, Kenneth W } 9551da177e4SLinus Torvalds } 95663551ae0SDavid Gibson 957502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 958502717f4SChen, Kenneth W unsigned long end) 959502717f4SChen, Kenneth W { 960502717f4SChen, Kenneth W /* 961502717f4SChen, Kenneth W * It is undesirable to test vma->vm_file as it should be non-null 962502717f4SChen, Kenneth W * for valid hugetlb area. However, vm_file will be NULL in the error 963502717f4SChen, Kenneth W * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, 964502717f4SChen, Kenneth W * do_mmap_pgoff() nullifies vma->vm_file before calling this function 965502717f4SChen, Kenneth W * to clean up. Since no pte has actually been setup, it is safe to 966502717f4SChen, Kenneth W * do nothing in this case. 967502717f4SChen, Kenneth W */ 968502717f4SChen, Kenneth W if (vma->vm_file) { 969502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 970502717f4SChen, Kenneth W __unmap_hugepage_range(vma, start, end); 971502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 972502717f4SChen, Kenneth W } 973502717f4SChen, Kenneth W } 974502717f4SChen, Kenneth W 9751e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 9761e8f889bSDavid Gibson unsigned long address, pte_t *ptep, pte_t pte) 9771e8f889bSDavid Gibson { 9781e8f889bSDavid Gibson struct page *old_page, *new_page; 97979ac6ba4SDavid Gibson int avoidcopy; 9801e8f889bSDavid Gibson 9811e8f889bSDavid Gibson old_page = pte_page(pte); 9821e8f889bSDavid Gibson 9831e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 9841e8f889bSDavid Gibson * and just make the page writable */ 9851e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 9861e8f889bSDavid Gibson if (avoidcopy) { 9871e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 98883c54070SNick Piggin return 0; 9891e8f889bSDavid Gibson } 9901e8f889bSDavid Gibson 9911e8f889bSDavid Gibson page_cache_get(old_page); 9925da7ca86SChristoph Lameter new_page = alloc_huge_page(vma, address); 9931e8f889bSDavid Gibson 9942fc39cecSAdam Litke if (IS_ERR(new_page)) { 9951e8f889bSDavid Gibson page_cache_release(old_page); 9962fc39cecSAdam Litke return -PTR_ERR(new_page); 9971e8f889bSDavid Gibson } 9981e8f889bSDavid Gibson 9991e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 10009de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 10010ed361deSNick Piggin __SetPageUptodate(new_page); 10021e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 10031e8f889bSDavid Gibson 10041e8f889bSDavid Gibson ptep = huge_pte_offset(mm, address & HPAGE_MASK); 10057f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 10061e8f889bSDavid Gibson /* Break COW */ 10078fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 10081e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 10091e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 10101e8f889bSDavid Gibson /* Make the old page be freed below */ 10111e8f889bSDavid Gibson new_page = old_page; 10121e8f889bSDavid Gibson } 10131e8f889bSDavid Gibson page_cache_release(new_page); 10141e8f889bSDavid Gibson page_cache_release(old_page); 101583c54070SNick Piggin return 0; 10161e8f889bSDavid Gibson } 10171e8f889bSDavid Gibson 1018a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 10191e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 1020ac9b9c66SHugh Dickins { 1021ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 10224c887265SAdam Litke unsigned long idx; 10234c887265SAdam Litke unsigned long size; 10244c887265SAdam Litke struct page *page; 10254c887265SAdam Litke struct address_space *mapping; 10261e8f889bSDavid Gibson pte_t new_pte; 10274c887265SAdam Litke 10284c887265SAdam Litke mapping = vma->vm_file->f_mapping; 10294c887265SAdam Litke idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 10304c887265SAdam Litke + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 10314c887265SAdam Litke 10324c887265SAdam Litke /* 10334c887265SAdam Litke * Use page lock to guard against racing truncation 10344c887265SAdam Litke * before we get page_table_lock. 10354c887265SAdam Litke */ 10366bda666aSChristoph Lameter retry: 10376bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 10386bda666aSChristoph Lameter if (!page) { 1039ebed4bfcSHugh Dickins size = i_size_read(mapping->host) >> HPAGE_SHIFT; 1040ebed4bfcSHugh Dickins if (idx >= size) 1041ebed4bfcSHugh Dickins goto out; 10426bda666aSChristoph Lameter page = alloc_huge_page(vma, address); 10432fc39cecSAdam Litke if (IS_ERR(page)) { 10442fc39cecSAdam Litke ret = -PTR_ERR(page); 10456bda666aSChristoph Lameter goto out; 10466bda666aSChristoph Lameter } 104779ac6ba4SDavid Gibson clear_huge_page(page, address); 10480ed361deSNick Piggin __SetPageUptodate(page); 1049ac9b9c66SHugh Dickins 10506bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 10516bda666aSChristoph Lameter int err; 105245c682a6SKen Chen struct inode *inode = mapping->host; 10536bda666aSChristoph Lameter 10546bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 10556bda666aSChristoph Lameter if (err) { 10566bda666aSChristoph Lameter put_page(page); 10576bda666aSChristoph Lameter if (err == -EEXIST) 10586bda666aSChristoph Lameter goto retry; 10596bda666aSChristoph Lameter goto out; 10606bda666aSChristoph Lameter } 106145c682a6SKen Chen 106245c682a6SKen Chen spin_lock(&inode->i_lock); 106345c682a6SKen Chen inode->i_blocks += BLOCKS_PER_HUGEPAGE; 106445c682a6SKen Chen spin_unlock(&inode->i_lock); 10656bda666aSChristoph Lameter } else 10666bda666aSChristoph Lameter lock_page(page); 10676bda666aSChristoph Lameter } 10681e8f889bSDavid Gibson 1069ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 10704c887265SAdam Litke size = i_size_read(mapping->host) >> HPAGE_SHIFT; 10714c887265SAdam Litke if (idx >= size) 10724c887265SAdam Litke goto backout; 10734c887265SAdam Litke 107483c54070SNick Piggin ret = 0; 10757f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 10764c887265SAdam Litke goto backout; 10774c887265SAdam Litke 10781e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 10791e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 10801e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 10811e8f889bSDavid Gibson 10821e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 10831e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 10841e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, new_pte); 10851e8f889bSDavid Gibson } 10861e8f889bSDavid Gibson 1087ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 10884c887265SAdam Litke unlock_page(page); 10894c887265SAdam Litke out: 1090ac9b9c66SHugh Dickins return ret; 10914c887265SAdam Litke 10924c887265SAdam Litke backout: 10934c887265SAdam Litke spin_unlock(&mm->page_table_lock); 10944c887265SAdam Litke unlock_page(page); 10954c887265SAdam Litke put_page(page); 10964c887265SAdam Litke goto out; 1097ac9b9c66SHugh Dickins } 1098ac9b9c66SHugh Dickins 109986e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 110086e5216fSAdam Litke unsigned long address, int write_access) 110186e5216fSAdam Litke { 110286e5216fSAdam Litke pte_t *ptep; 110386e5216fSAdam Litke pte_t entry; 11041e8f889bSDavid Gibson int ret; 11053935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 110686e5216fSAdam Litke 110786e5216fSAdam Litke ptep = huge_pte_alloc(mm, address); 110886e5216fSAdam Litke if (!ptep) 110986e5216fSAdam Litke return VM_FAULT_OOM; 111086e5216fSAdam Litke 11113935baa9SDavid Gibson /* 11123935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 11133935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 11143935baa9SDavid Gibson * the same page in the page cache. 11153935baa9SDavid Gibson */ 11163935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 11177f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 11187f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 11193935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 11203935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 11213935baa9SDavid Gibson return ret; 11223935baa9SDavid Gibson } 112386e5216fSAdam Litke 112483c54070SNick Piggin ret = 0; 11251e8f889bSDavid Gibson 11261e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 11271e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 11287f2e9525SGerald Schaefer if (likely(pte_same(entry, huge_ptep_get(ptep)))) 11291e8f889bSDavid Gibson if (write_access && !pte_write(entry)) 11301e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, entry); 11311e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 11323935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 11331e8f889bSDavid Gibson 11341e8f889bSDavid Gibson return ret; 113586e5216fSAdam Litke } 113686e5216fSAdam Litke 113763551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 113863551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 11395b23dbe8SAdam Litke unsigned long *position, int *length, int i, 11405b23dbe8SAdam Litke int write) 114163551ae0SDavid Gibson { 1142d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 1143d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 114463551ae0SDavid Gibson int remainder = *length; 114563551ae0SDavid Gibson 11461c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 114763551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 114863551ae0SDavid Gibson pte_t *pte; 114963551ae0SDavid Gibson struct page *page; 115063551ae0SDavid Gibson 11514c887265SAdam Litke /* 11524c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 11534c887265SAdam Litke * each hugepage. We have to make * sure we get the 11544c887265SAdam Litke * first, for the page indexing below to work. 11554c887265SAdam Litke */ 115663551ae0SDavid Gibson pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 115763551ae0SDavid Gibson 11587f2e9525SGerald Schaefer if (!pte || huge_pte_none(huge_ptep_get(pte)) || 11597f2e9525SGerald Schaefer (write && !pte_write(huge_ptep_get(pte)))) { 11604c887265SAdam Litke int ret; 11614c887265SAdam Litke 11624c887265SAdam Litke spin_unlock(&mm->page_table_lock); 11635b23dbe8SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, write); 11644c887265SAdam Litke spin_lock(&mm->page_table_lock); 1165a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 11664c887265SAdam Litke continue; 11674c887265SAdam Litke 11681c59827dSHugh Dickins remainder = 0; 11691c59827dSHugh Dickins if (!i) 11701c59827dSHugh Dickins i = -EFAULT; 11711c59827dSHugh Dickins break; 11721c59827dSHugh Dickins } 117363551ae0SDavid Gibson 1174d5d4b0aaSChen, Kenneth W pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 11757f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 1176d5d4b0aaSChen, Kenneth W same_page: 1177d6692183SChen, Kenneth W if (pages) { 117863551ae0SDavid Gibson get_page(page); 1179d5d4b0aaSChen, Kenneth W pages[i] = page + pfn_offset; 1180d6692183SChen, Kenneth W } 118163551ae0SDavid Gibson 118263551ae0SDavid Gibson if (vmas) 118363551ae0SDavid Gibson vmas[i] = vma; 118463551ae0SDavid Gibson 118563551ae0SDavid Gibson vaddr += PAGE_SIZE; 1186d5d4b0aaSChen, Kenneth W ++pfn_offset; 118763551ae0SDavid Gibson --remainder; 118863551ae0SDavid Gibson ++i; 1189d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 1190d5d4b0aaSChen, Kenneth W pfn_offset < HPAGE_SIZE/PAGE_SIZE) { 1191d5d4b0aaSChen, Kenneth W /* 1192d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 1193d5d4b0aaSChen, Kenneth W * of this compound page. 1194d5d4b0aaSChen, Kenneth W */ 1195d5d4b0aaSChen, Kenneth W goto same_page; 1196d5d4b0aaSChen, Kenneth W } 119763551ae0SDavid Gibson } 11981c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 119963551ae0SDavid Gibson *length = remainder; 120063551ae0SDavid Gibson *position = vaddr; 120163551ae0SDavid Gibson 120263551ae0SDavid Gibson return i; 120363551ae0SDavid Gibson } 12048f860591SZhang, Yanmin 12058f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 12068f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 12078f860591SZhang, Yanmin { 12088f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 12098f860591SZhang, Yanmin unsigned long start = address; 12108f860591SZhang, Yanmin pte_t *ptep; 12118f860591SZhang, Yanmin pte_t pte; 12128f860591SZhang, Yanmin 12138f860591SZhang, Yanmin BUG_ON(address >= end); 12148f860591SZhang, Yanmin flush_cache_range(vma, address, end); 12158f860591SZhang, Yanmin 121639dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 12178f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 12188f860591SZhang, Yanmin for (; address < end; address += HPAGE_SIZE) { 12198f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 12208f860591SZhang, Yanmin if (!ptep) 12218f860591SZhang, Yanmin continue; 122239dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 122339dde65cSChen, Kenneth W continue; 12247f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 12258f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 12268f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 12278f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 12288f860591SZhang, Yanmin } 12298f860591SZhang, Yanmin } 12308f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 123139dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 12328f860591SZhang, Yanmin 12338f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 12348f860591SZhang, Yanmin } 12358f860591SZhang, Yanmin 1236a43a8c39SChen, Kenneth W struct file_region { 1237a43a8c39SChen, Kenneth W struct list_head link; 1238a43a8c39SChen, Kenneth W long from; 1239a43a8c39SChen, Kenneth W long to; 1240a43a8c39SChen, Kenneth W }; 1241a43a8c39SChen, Kenneth W 1242a43a8c39SChen, Kenneth W static long region_add(struct list_head *head, long f, long t) 1243a43a8c39SChen, Kenneth W { 1244a43a8c39SChen, Kenneth W struct file_region *rg, *nrg, *trg; 1245a43a8c39SChen, Kenneth W 1246a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 1247a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1248a43a8c39SChen, Kenneth W if (f <= rg->to) 1249a43a8c39SChen, Kenneth W break; 1250a43a8c39SChen, Kenneth W 1251a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 1252a43a8c39SChen, Kenneth W if (f > rg->from) 1253a43a8c39SChen, Kenneth W f = rg->from; 1254a43a8c39SChen, Kenneth W 1255a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 1256a43a8c39SChen, Kenneth W nrg = rg; 1257a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 1258a43a8c39SChen, Kenneth W if (&rg->link == head) 1259a43a8c39SChen, Kenneth W break; 1260a43a8c39SChen, Kenneth W if (rg->from > t) 1261a43a8c39SChen, Kenneth W break; 1262a43a8c39SChen, Kenneth W 1263a43a8c39SChen, Kenneth W /* If this area reaches higher then extend our area to 1264a43a8c39SChen, Kenneth W * include it completely. If this is not the first area 1265a43a8c39SChen, Kenneth W * which we intend to reuse, free it. */ 1266a43a8c39SChen, Kenneth W if (rg->to > t) 1267a43a8c39SChen, Kenneth W t = rg->to; 1268a43a8c39SChen, Kenneth W if (rg != nrg) { 1269a43a8c39SChen, Kenneth W list_del(&rg->link); 1270a43a8c39SChen, Kenneth W kfree(rg); 1271a43a8c39SChen, Kenneth W } 1272a43a8c39SChen, Kenneth W } 1273a43a8c39SChen, Kenneth W nrg->from = f; 1274a43a8c39SChen, Kenneth W nrg->to = t; 1275a43a8c39SChen, Kenneth W return 0; 1276a43a8c39SChen, Kenneth W } 1277a43a8c39SChen, Kenneth W 1278a43a8c39SChen, Kenneth W static long region_chg(struct list_head *head, long f, long t) 1279a43a8c39SChen, Kenneth W { 1280a43a8c39SChen, Kenneth W struct file_region *rg, *nrg; 1281a43a8c39SChen, Kenneth W long chg = 0; 1282a43a8c39SChen, Kenneth W 1283a43a8c39SChen, Kenneth W /* Locate the region we are before or in. */ 1284a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1285a43a8c39SChen, Kenneth W if (f <= rg->to) 1286a43a8c39SChen, Kenneth W break; 1287a43a8c39SChen, Kenneth W 1288a43a8c39SChen, Kenneth W /* If we are below the current region then a new region is required. 1289a43a8c39SChen, Kenneth W * Subtle, allocate a new region at the position but make it zero 1290183ff22bSSimon Arlott * size such that we can guarantee to record the reservation. */ 1291a43a8c39SChen, Kenneth W if (&rg->link == head || t < rg->from) { 1292a43a8c39SChen, Kenneth W nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 1293c80544dcSStephen Hemminger if (!nrg) 1294a43a8c39SChen, Kenneth W return -ENOMEM; 1295a43a8c39SChen, Kenneth W nrg->from = f; 1296a43a8c39SChen, Kenneth W nrg->to = f; 1297a43a8c39SChen, Kenneth W INIT_LIST_HEAD(&nrg->link); 1298a43a8c39SChen, Kenneth W list_add(&nrg->link, rg->link.prev); 1299a43a8c39SChen, Kenneth W 1300a43a8c39SChen, Kenneth W return t - f; 1301a43a8c39SChen, Kenneth W } 1302a43a8c39SChen, Kenneth W 1303a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 1304a43a8c39SChen, Kenneth W if (f > rg->from) 1305a43a8c39SChen, Kenneth W f = rg->from; 1306a43a8c39SChen, Kenneth W chg = t - f; 1307a43a8c39SChen, Kenneth W 1308a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 1309a43a8c39SChen, Kenneth W list_for_each_entry(rg, rg->link.prev, link) { 1310a43a8c39SChen, Kenneth W if (&rg->link == head) 1311a43a8c39SChen, Kenneth W break; 1312a43a8c39SChen, Kenneth W if (rg->from > t) 1313a43a8c39SChen, Kenneth W return chg; 1314a43a8c39SChen, Kenneth W 1315a43a8c39SChen, Kenneth W /* We overlap with this area, if it extends futher than 1316a43a8c39SChen, Kenneth W * us then we must extend ourselves. Account for its 1317a43a8c39SChen, Kenneth W * existing reservation. */ 1318a43a8c39SChen, Kenneth W if (rg->to > t) { 1319a43a8c39SChen, Kenneth W chg += rg->to - t; 1320a43a8c39SChen, Kenneth W t = rg->to; 1321a43a8c39SChen, Kenneth W } 1322a43a8c39SChen, Kenneth W chg -= rg->to - rg->from; 1323a43a8c39SChen, Kenneth W } 1324a43a8c39SChen, Kenneth W return chg; 1325a43a8c39SChen, Kenneth W } 1326a43a8c39SChen, Kenneth W 1327a43a8c39SChen, Kenneth W static long region_truncate(struct list_head *head, long end) 1328a43a8c39SChen, Kenneth W { 1329a43a8c39SChen, Kenneth W struct file_region *rg, *trg; 1330a43a8c39SChen, Kenneth W long chg = 0; 1331a43a8c39SChen, Kenneth W 1332a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 1333a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1334a43a8c39SChen, Kenneth W if (end <= rg->to) 1335a43a8c39SChen, Kenneth W break; 1336a43a8c39SChen, Kenneth W if (&rg->link == head) 1337a43a8c39SChen, Kenneth W return 0; 1338a43a8c39SChen, Kenneth W 1339a43a8c39SChen, Kenneth W /* If we are in the middle of a region then adjust it. */ 1340a43a8c39SChen, Kenneth W if (end > rg->from) { 1341a43a8c39SChen, Kenneth W chg = rg->to - end; 1342a43a8c39SChen, Kenneth W rg->to = end; 1343a43a8c39SChen, Kenneth W rg = list_entry(rg->link.next, typeof(*rg), link); 1344a43a8c39SChen, Kenneth W } 1345a43a8c39SChen, Kenneth W 1346a43a8c39SChen, Kenneth W /* Drop any remaining regions. */ 1347a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 1348a43a8c39SChen, Kenneth W if (&rg->link == head) 1349a43a8c39SChen, Kenneth W break; 1350a43a8c39SChen, Kenneth W chg += rg->to - rg->from; 1351a43a8c39SChen, Kenneth W list_del(&rg->link); 1352a43a8c39SChen, Kenneth W kfree(rg); 1353a43a8c39SChen, Kenneth W } 1354a43a8c39SChen, Kenneth W return chg; 1355a43a8c39SChen, Kenneth W } 1356a43a8c39SChen, Kenneth W 1357a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 1358a1e78772SMel Gorman long from, long to, 1359a1e78772SMel Gorman struct vm_area_struct *vma) 1360e4e574b7SAdam Litke { 1361e4e574b7SAdam Litke long ret, chg; 1362e4e574b7SAdam Litke 1363a1e78772SMel Gorman /* 1364a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 1365a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 1366a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 1367a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 1368a1e78772SMel Gorman */ 1369a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 1370e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 1371a1e78772SMel Gorman else { 1372a1e78772SMel Gorman chg = to - from; 1373a1e78772SMel Gorman set_vma_resv_huge_pages(vma, chg); 1374a1e78772SMel Gorman } 1375a1e78772SMel Gorman 1376e4e574b7SAdam Litke if (chg < 0) 1377e4e574b7SAdam Litke return chg; 13788a630112SKen Chen 137990d8b7e6SAdam Litke if (hugetlb_get_quota(inode->i_mapping, chg)) 138090d8b7e6SAdam Litke return -ENOSPC; 1381a43a8c39SChen, Kenneth W ret = hugetlb_acct_memory(chg); 138268842c9bSKen Chen if (ret < 0) { 138368842c9bSKen Chen hugetlb_put_quota(inode->i_mapping, chg); 1384a43a8c39SChen, Kenneth W return ret; 138568842c9bSKen Chen } 1386a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 1387a43a8c39SChen, Kenneth W region_add(&inode->i_mapping->private_list, from, to); 1388a43a8c39SChen, Kenneth W return 0; 1389a43a8c39SChen, Kenneth W } 1390a43a8c39SChen, Kenneth W 1391a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 1392a43a8c39SChen, Kenneth W { 1393a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 139445c682a6SKen Chen 139545c682a6SKen Chen spin_lock(&inode->i_lock); 139645c682a6SKen Chen inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; 139745c682a6SKen Chen spin_unlock(&inode->i_lock); 139845c682a6SKen Chen 139990d8b7e6SAdam Litke hugetlb_put_quota(inode->i_mapping, (chg - freed)); 140090d8b7e6SAdam Litke hugetlb_acct_memory(-(chg - freed)); 1401a43a8c39SChen, Kenneth W } 1402