1 /* 2 * Generic hugetlb support. 3 * (C) Nadia Yvette Chambers, April 2004 4 */ 5 #include <linux/list.h> 6 #include <linux/init.h> 7 #include <linux/module.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/cpuset.h> 17 #include <linux/mutex.h> 18 #include <linux/bootmem.h> 19 #include <linux/sysfs.h> 20 #include <linux/slab.h> 21 #include <linux/rmap.h> 22 #include <linux/swap.h> 23 #include <linux/swapops.h> 24 #include <linux/page-isolation.h> 25 26 #include <asm/page.h> 27 #include <asm/pgtable.h> 28 #include <asm/tlb.h> 29 30 #include <linux/io.h> 31 #include <linux/hugetlb.h> 32 #include <linux/hugetlb_cgroup.h> 33 #include <linux/node.h> 34 #include "internal.h" 35 36 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 37 unsigned long hugepages_treat_as_movable; 38 39 int hugetlb_max_hstate __read_mostly; 40 unsigned int default_hstate_idx; 41 struct hstate hstates[HUGE_MAX_HSTATE]; 42 43 __initdata LIST_HEAD(huge_boot_pages); 44 45 /* for command line parsing */ 46 static struct hstate * __initdata parsed_hstate; 47 static unsigned long __initdata default_hstate_max_huge_pages; 48 static unsigned long __initdata default_hstate_size; 49 50 /* 51 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 52 * free_huge_pages, and surplus_huge_pages. 53 */ 54 DEFINE_SPINLOCK(hugetlb_lock); 55 56 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) 57 { 58 bool free = (spool->count == 0) && (spool->used_hpages == 0); 59 60 spin_unlock(&spool->lock); 61 62 /* If no pages are used, and no other handles to the subpool 63 * remain, free the subpool the subpool remain */ 64 if (free) 65 kfree(spool); 66 } 67 68 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks) 69 { 70 struct hugepage_subpool *spool; 71 72 spool = kmalloc(sizeof(*spool), GFP_KERNEL); 73 if (!spool) 74 return NULL; 75 76 spin_lock_init(&spool->lock); 77 spool->count = 1; 78 spool->max_hpages = nr_blocks; 79 spool->used_hpages = 0; 80 81 return spool; 82 } 83 84 void hugepage_put_subpool(struct hugepage_subpool *spool) 85 { 86 spin_lock(&spool->lock); 87 BUG_ON(!spool->count); 88 spool->count--; 89 unlock_or_release_subpool(spool); 90 } 91 92 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool, 93 long delta) 94 { 95 int ret = 0; 96 97 if (!spool) 98 return 0; 99 100 spin_lock(&spool->lock); 101 if ((spool->used_hpages + delta) <= spool->max_hpages) { 102 spool->used_hpages += delta; 103 } else { 104 ret = -ENOMEM; 105 } 106 spin_unlock(&spool->lock); 107 108 return ret; 109 } 110 111 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool, 112 long delta) 113 { 114 if (!spool) 115 return; 116 117 spin_lock(&spool->lock); 118 spool->used_hpages -= delta; 119 /* If hugetlbfs_put_super couldn't free spool due to 120 * an outstanding quota reference, free it now. */ 121 unlock_or_release_subpool(spool); 122 } 123 124 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 125 { 126 return HUGETLBFS_SB(inode->i_sb)->spool; 127 } 128 129 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 130 { 131 return subpool_inode(file_inode(vma->vm_file)); 132 } 133 134 /* 135 * Region tracking -- allows tracking of reservations and instantiated pages 136 * across the pages in a mapping. 137 * 138 * The region data structures are protected by a combination of the mmap_sem 139 * and the hugetlb_instantiation_mutex. To access or modify a region the caller 140 * must either hold the mmap_sem for write, or the mmap_sem for read and 141 * the hugetlb_instantiation_mutex: 142 * 143 * down_write(&mm->mmap_sem); 144 * or 145 * down_read(&mm->mmap_sem); 146 * mutex_lock(&hugetlb_instantiation_mutex); 147 */ 148 struct file_region { 149 struct list_head link; 150 long from; 151 long to; 152 }; 153 154 static long region_add(struct list_head *head, long f, long t) 155 { 156 struct file_region *rg, *nrg, *trg; 157 158 /* Locate the region we are either in or before. */ 159 list_for_each_entry(rg, head, link) 160 if (f <= rg->to) 161 break; 162 163 /* Round our left edge to the current segment if it encloses us. */ 164 if (f > rg->from) 165 f = rg->from; 166 167 /* Check for and consume any regions we now overlap with. */ 168 nrg = rg; 169 list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 170 if (&rg->link == head) 171 break; 172 if (rg->from > t) 173 break; 174 175 /* If this area reaches higher then extend our area to 176 * include it completely. If this is not the first area 177 * which we intend to reuse, free it. */ 178 if (rg->to > t) 179 t = rg->to; 180 if (rg != nrg) { 181 list_del(&rg->link); 182 kfree(rg); 183 } 184 } 185 nrg->from = f; 186 nrg->to = t; 187 return 0; 188 } 189 190 static long region_chg(struct list_head *head, long f, long t) 191 { 192 struct file_region *rg, *nrg; 193 long chg = 0; 194 195 /* Locate the region we are before or in. */ 196 list_for_each_entry(rg, head, link) 197 if (f <= rg->to) 198 break; 199 200 /* If we are below the current region then a new region is required. 201 * Subtle, allocate a new region at the position but make it zero 202 * size such that we can guarantee to record the reservation. */ 203 if (&rg->link == head || t < rg->from) { 204 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 205 if (!nrg) 206 return -ENOMEM; 207 nrg->from = f; 208 nrg->to = f; 209 INIT_LIST_HEAD(&nrg->link); 210 list_add(&nrg->link, rg->link.prev); 211 212 return t - f; 213 } 214 215 /* Round our left edge to the current segment if it encloses us. */ 216 if (f > rg->from) 217 f = rg->from; 218 chg = t - f; 219 220 /* Check for and consume any regions we now overlap with. */ 221 list_for_each_entry(rg, rg->link.prev, link) { 222 if (&rg->link == head) 223 break; 224 if (rg->from > t) 225 return chg; 226 227 /* We overlap with this area, if it extends further than 228 * us then we must extend ourselves. Account for its 229 * existing reservation. */ 230 if (rg->to > t) { 231 chg += rg->to - t; 232 t = rg->to; 233 } 234 chg -= rg->to - rg->from; 235 } 236 return chg; 237 } 238 239 static long region_truncate(struct list_head *head, long end) 240 { 241 struct file_region *rg, *trg; 242 long chg = 0; 243 244 /* Locate the region we are either in or before. */ 245 list_for_each_entry(rg, head, link) 246 if (end <= rg->to) 247 break; 248 if (&rg->link == head) 249 return 0; 250 251 /* If we are in the middle of a region then adjust it. */ 252 if (end > rg->from) { 253 chg = rg->to - end; 254 rg->to = end; 255 rg = list_entry(rg->link.next, typeof(*rg), link); 256 } 257 258 /* Drop any remaining regions. */ 259 list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 260 if (&rg->link == head) 261 break; 262 chg += rg->to - rg->from; 263 list_del(&rg->link); 264 kfree(rg); 265 } 266 return chg; 267 } 268 269 static long region_count(struct list_head *head, long f, long t) 270 { 271 struct file_region *rg; 272 long chg = 0; 273 274 /* Locate each segment we overlap with, and count that overlap. */ 275 list_for_each_entry(rg, head, link) { 276 long seg_from; 277 long seg_to; 278 279 if (rg->to <= f) 280 continue; 281 if (rg->from >= t) 282 break; 283 284 seg_from = max(rg->from, f); 285 seg_to = min(rg->to, t); 286 287 chg += seg_to - seg_from; 288 } 289 290 return chg; 291 } 292 293 /* 294 * Convert the address within this vma to the page offset within 295 * the mapping, in pagecache page units; huge pages here. 296 */ 297 static pgoff_t vma_hugecache_offset(struct hstate *h, 298 struct vm_area_struct *vma, unsigned long address) 299 { 300 return ((address - vma->vm_start) >> huge_page_shift(h)) + 301 (vma->vm_pgoff >> huge_page_order(h)); 302 } 303 304 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 305 unsigned long address) 306 { 307 return vma_hugecache_offset(hstate_vma(vma), vma, address); 308 } 309 310 /* 311 * Return the size of the pages allocated when backing a VMA. In the majority 312 * cases this will be same size as used by the page table entries. 313 */ 314 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 315 { 316 struct hstate *hstate; 317 318 if (!is_vm_hugetlb_page(vma)) 319 return PAGE_SIZE; 320 321 hstate = hstate_vma(vma); 322 323 return 1UL << huge_page_shift(hstate); 324 } 325 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 326 327 /* 328 * Return the page size being used by the MMU to back a VMA. In the majority 329 * of cases, the page size used by the kernel matches the MMU size. On 330 * architectures where it differs, an architecture-specific version of this 331 * function is required. 332 */ 333 #ifndef vma_mmu_pagesize 334 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 335 { 336 return vma_kernel_pagesize(vma); 337 } 338 #endif 339 340 /* 341 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 342 * bits of the reservation map pointer, which are always clear due to 343 * alignment. 344 */ 345 #define HPAGE_RESV_OWNER (1UL << 0) 346 #define HPAGE_RESV_UNMAPPED (1UL << 1) 347 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 348 349 /* 350 * These helpers are used to track how many pages are reserved for 351 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 352 * is guaranteed to have their future faults succeed. 353 * 354 * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 355 * the reserve counters are updated with the hugetlb_lock held. It is safe 356 * to reset the VMA at fork() time as it is not in use yet and there is no 357 * chance of the global counters getting corrupted as a result of the values. 358 * 359 * The private mapping reservation is represented in a subtly different 360 * manner to a shared mapping. A shared mapping has a region map associated 361 * with the underlying file, this region map represents the backing file 362 * pages which have ever had a reservation assigned which this persists even 363 * after the page is instantiated. A private mapping has a region map 364 * associated with the original mmap which is attached to all VMAs which 365 * reference it, this region map represents those offsets which have consumed 366 * reservation ie. where pages have been instantiated. 367 */ 368 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 369 { 370 return (unsigned long)vma->vm_private_data; 371 } 372 373 static void set_vma_private_data(struct vm_area_struct *vma, 374 unsigned long value) 375 { 376 vma->vm_private_data = (void *)value; 377 } 378 379 struct resv_map { 380 struct kref refs; 381 struct list_head regions; 382 }; 383 384 static struct resv_map *resv_map_alloc(void) 385 { 386 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 387 if (!resv_map) 388 return NULL; 389 390 kref_init(&resv_map->refs); 391 INIT_LIST_HEAD(&resv_map->regions); 392 393 return resv_map; 394 } 395 396 static void resv_map_release(struct kref *ref) 397 { 398 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 399 400 /* Clear out any active regions before we release the map. */ 401 region_truncate(&resv_map->regions, 0); 402 kfree(resv_map); 403 } 404 405 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 406 { 407 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 408 if (!(vma->vm_flags & VM_MAYSHARE)) 409 return (struct resv_map *)(get_vma_private_data(vma) & 410 ~HPAGE_RESV_MASK); 411 return NULL; 412 } 413 414 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 415 { 416 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 417 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 418 419 set_vma_private_data(vma, (get_vma_private_data(vma) & 420 HPAGE_RESV_MASK) | (unsigned long)map); 421 } 422 423 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 424 { 425 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 426 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 427 428 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 429 } 430 431 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 432 { 433 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 434 435 return (get_vma_private_data(vma) & flag) != 0; 436 } 437 438 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 439 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 440 { 441 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 442 if (!(vma->vm_flags & VM_MAYSHARE)) 443 vma->vm_private_data = (void *)0; 444 } 445 446 /* Returns true if the VMA has associated reserve pages */ 447 static int vma_has_reserves(struct vm_area_struct *vma, long chg) 448 { 449 if (vma->vm_flags & VM_NORESERVE) { 450 /* 451 * This address is already reserved by other process(chg == 0), 452 * so, we should decrement reserved count. Without decrementing, 453 * reserve count remains after releasing inode, because this 454 * allocated page will go into page cache and is regarded as 455 * coming from reserved pool in releasing step. Currently, we 456 * don't have any other solution to deal with this situation 457 * properly, so add work-around here. 458 */ 459 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 460 return 1; 461 else 462 return 0; 463 } 464 465 /* Shared mappings always use reserves */ 466 if (vma->vm_flags & VM_MAYSHARE) 467 return 1; 468 469 /* 470 * Only the process that called mmap() has reserves for 471 * private mappings. 472 */ 473 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 474 return 1; 475 476 return 0; 477 } 478 479 static void enqueue_huge_page(struct hstate *h, struct page *page) 480 { 481 int nid = page_to_nid(page); 482 list_move(&page->lru, &h->hugepage_freelists[nid]); 483 h->free_huge_pages++; 484 h->free_huge_pages_node[nid]++; 485 } 486 487 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) 488 { 489 struct page *page; 490 491 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) 492 if (!is_migrate_isolate_page(page)) 493 break; 494 /* 495 * if 'non-isolated free hugepage' not found on the list, 496 * the allocation fails. 497 */ 498 if (&h->hugepage_freelists[nid] == &page->lru) 499 return NULL; 500 list_move(&page->lru, &h->hugepage_activelist); 501 set_page_refcounted(page); 502 h->free_huge_pages--; 503 h->free_huge_pages_node[nid]--; 504 return page; 505 } 506 507 /* Movability of hugepages depends on migration support. */ 508 static inline gfp_t htlb_alloc_mask(struct hstate *h) 509 { 510 if (hugepages_treat_as_movable || hugepage_migration_support(h)) 511 return GFP_HIGHUSER_MOVABLE; 512 else 513 return GFP_HIGHUSER; 514 } 515 516 static struct page *dequeue_huge_page_vma(struct hstate *h, 517 struct vm_area_struct *vma, 518 unsigned long address, int avoid_reserve, 519 long chg) 520 { 521 struct page *page = NULL; 522 struct mempolicy *mpol; 523 nodemask_t *nodemask; 524 struct zonelist *zonelist; 525 struct zone *zone; 526 struct zoneref *z; 527 unsigned int cpuset_mems_cookie; 528 529 /* 530 * A child process with MAP_PRIVATE mappings created by their parent 531 * have no page reserves. This check ensures that reservations are 532 * not "stolen". The child may still get SIGKILLed 533 */ 534 if (!vma_has_reserves(vma, chg) && 535 h->free_huge_pages - h->resv_huge_pages == 0) 536 goto err; 537 538 /* If reserves cannot be used, ensure enough pages are in the pool */ 539 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 540 goto err; 541 542 retry_cpuset: 543 cpuset_mems_cookie = get_mems_allowed(); 544 zonelist = huge_zonelist(vma, address, 545 htlb_alloc_mask(h), &mpol, &nodemask); 546 547 for_each_zone_zonelist_nodemask(zone, z, zonelist, 548 MAX_NR_ZONES - 1, nodemask) { 549 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) { 550 page = dequeue_huge_page_node(h, zone_to_nid(zone)); 551 if (page) { 552 if (avoid_reserve) 553 break; 554 if (!vma_has_reserves(vma, chg)) 555 break; 556 557 SetPagePrivate(page); 558 h->resv_huge_pages--; 559 break; 560 } 561 } 562 } 563 564 mpol_cond_put(mpol); 565 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 566 goto retry_cpuset; 567 return page; 568 569 err: 570 return NULL; 571 } 572 573 static void update_and_free_page(struct hstate *h, struct page *page) 574 { 575 int i; 576 577 VM_BUG_ON(h->order >= MAX_ORDER); 578 579 h->nr_huge_pages--; 580 h->nr_huge_pages_node[page_to_nid(page)]--; 581 for (i = 0; i < pages_per_huge_page(h); i++) { 582 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 583 1 << PG_referenced | 1 << PG_dirty | 584 1 << PG_active | 1 << PG_reserved | 585 1 << PG_private | 1 << PG_writeback); 586 } 587 VM_BUG_ON(hugetlb_cgroup_from_page(page)); 588 set_compound_page_dtor(page, NULL); 589 set_page_refcounted(page); 590 arch_release_hugepage(page); 591 __free_pages(page, huge_page_order(h)); 592 } 593 594 struct hstate *size_to_hstate(unsigned long size) 595 { 596 struct hstate *h; 597 598 for_each_hstate(h) { 599 if (huge_page_size(h) == size) 600 return h; 601 } 602 return NULL; 603 } 604 605 static void free_huge_page(struct page *page) 606 { 607 /* 608 * Can't pass hstate in here because it is called from the 609 * compound page destructor. 610 */ 611 struct hstate *h = page_hstate(page); 612 int nid = page_to_nid(page); 613 struct hugepage_subpool *spool = 614 (struct hugepage_subpool *)page_private(page); 615 bool restore_reserve; 616 617 set_page_private(page, 0); 618 page->mapping = NULL; 619 BUG_ON(page_count(page)); 620 BUG_ON(page_mapcount(page)); 621 restore_reserve = PagePrivate(page); 622 ClearPagePrivate(page); 623 624 spin_lock(&hugetlb_lock); 625 hugetlb_cgroup_uncharge_page(hstate_index(h), 626 pages_per_huge_page(h), page); 627 if (restore_reserve) 628 h->resv_huge_pages++; 629 630 if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { 631 /* remove the page from active list */ 632 list_del(&page->lru); 633 update_and_free_page(h, page); 634 h->surplus_huge_pages--; 635 h->surplus_huge_pages_node[nid]--; 636 } else { 637 arch_clear_hugepage_flags(page); 638 enqueue_huge_page(h, page); 639 } 640 spin_unlock(&hugetlb_lock); 641 hugepage_subpool_put_pages(spool, 1); 642 } 643 644 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 645 { 646 INIT_LIST_HEAD(&page->lru); 647 set_compound_page_dtor(page, free_huge_page); 648 spin_lock(&hugetlb_lock); 649 set_hugetlb_cgroup(page, NULL); 650 h->nr_huge_pages++; 651 h->nr_huge_pages_node[nid]++; 652 spin_unlock(&hugetlb_lock); 653 put_page(page); /* free it into the hugepage allocator */ 654 } 655 656 static void prep_compound_gigantic_page(struct page *page, unsigned long order) 657 { 658 int i; 659 int nr_pages = 1 << order; 660 struct page *p = page + 1; 661 662 /* we rely on prep_new_huge_page to set the destructor */ 663 set_compound_order(page, order); 664 __SetPageHead(page); 665 __ClearPageReserved(page); 666 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 667 __SetPageTail(p); 668 /* 669 * For gigantic hugepages allocated through bootmem at 670 * boot, it's safer to be consistent with the not-gigantic 671 * hugepages and clear the PG_reserved bit from all tail pages 672 * too. Otherwse drivers using get_user_pages() to access tail 673 * pages may get the reference counting wrong if they see 674 * PG_reserved set on a tail page (despite the head page not 675 * having PG_reserved set). Enforcing this consistency between 676 * head and tail pages allows drivers to optimize away a check 677 * on the head page when they need know if put_page() is needed 678 * after get_user_pages(). 679 */ 680 __ClearPageReserved(p); 681 set_page_count(p, 0); 682 p->first_page = page; 683 } 684 } 685 686 /* 687 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 688 * transparent huge pages. See the PageTransHuge() documentation for more 689 * details. 690 */ 691 int PageHuge(struct page *page) 692 { 693 compound_page_dtor *dtor; 694 695 if (!PageCompound(page)) 696 return 0; 697 698 page = compound_head(page); 699 dtor = get_compound_page_dtor(page); 700 701 return dtor == free_huge_page; 702 } 703 EXPORT_SYMBOL_GPL(PageHuge); 704 705 /* 706 * PageHeadHuge() only returns true for hugetlbfs head page, but not for 707 * normal or transparent huge pages. 708 */ 709 int PageHeadHuge(struct page *page_head) 710 { 711 compound_page_dtor *dtor; 712 713 if (!PageHead(page_head)) 714 return 0; 715 716 dtor = get_compound_page_dtor(page_head); 717 718 return dtor == free_huge_page; 719 } 720 EXPORT_SYMBOL_GPL(PageHeadHuge); 721 722 pgoff_t __basepage_index(struct page *page) 723 { 724 struct page *page_head = compound_head(page); 725 pgoff_t index = page_index(page_head); 726 unsigned long compound_idx; 727 728 if (!PageHuge(page_head)) 729 return page_index(page); 730 731 if (compound_order(page_head) >= MAX_ORDER) 732 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 733 else 734 compound_idx = page - page_head; 735 736 return (index << compound_order(page_head)) + compound_idx; 737 } 738 739 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 740 { 741 struct page *page; 742 743 if (h->order >= MAX_ORDER) 744 return NULL; 745 746 page = alloc_pages_exact_node(nid, 747 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| 748 __GFP_REPEAT|__GFP_NOWARN, 749 huge_page_order(h)); 750 if (page) { 751 if (arch_prepare_hugepage(page)) { 752 __free_pages(page, huge_page_order(h)); 753 return NULL; 754 } 755 prep_new_huge_page(h, page, nid); 756 } 757 758 return page; 759 } 760 761 /* 762 * common helper functions for hstate_next_node_to_{alloc|free}. 763 * We may have allocated or freed a huge page based on a different 764 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 765 * be outside of *nodes_allowed. Ensure that we use an allowed 766 * node for alloc or free. 767 */ 768 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 769 { 770 nid = next_node(nid, *nodes_allowed); 771 if (nid == MAX_NUMNODES) 772 nid = first_node(*nodes_allowed); 773 VM_BUG_ON(nid >= MAX_NUMNODES); 774 775 return nid; 776 } 777 778 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 779 { 780 if (!node_isset(nid, *nodes_allowed)) 781 nid = next_node_allowed(nid, nodes_allowed); 782 return nid; 783 } 784 785 /* 786 * returns the previously saved node ["this node"] from which to 787 * allocate a persistent huge page for the pool and advance the 788 * next node from which to allocate, handling wrap at end of node 789 * mask. 790 */ 791 static int hstate_next_node_to_alloc(struct hstate *h, 792 nodemask_t *nodes_allowed) 793 { 794 int nid; 795 796 VM_BUG_ON(!nodes_allowed); 797 798 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 799 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 800 801 return nid; 802 } 803 804 /* 805 * helper for free_pool_huge_page() - return the previously saved 806 * node ["this node"] from which to free a huge page. Advance the 807 * next node id whether or not we find a free huge page to free so 808 * that the next attempt to free addresses the next node. 809 */ 810 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 811 { 812 int nid; 813 814 VM_BUG_ON(!nodes_allowed); 815 816 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 817 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 818 819 return nid; 820 } 821 822 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 823 for (nr_nodes = nodes_weight(*mask); \ 824 nr_nodes > 0 && \ 825 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 826 nr_nodes--) 827 828 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 829 for (nr_nodes = nodes_weight(*mask); \ 830 nr_nodes > 0 && \ 831 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 832 nr_nodes--) 833 834 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 835 { 836 struct page *page; 837 int nr_nodes, node; 838 int ret = 0; 839 840 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 841 page = alloc_fresh_huge_page_node(h, node); 842 if (page) { 843 ret = 1; 844 break; 845 } 846 } 847 848 if (ret) 849 count_vm_event(HTLB_BUDDY_PGALLOC); 850 else 851 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 852 853 return ret; 854 } 855 856 /* 857 * Free huge page from pool from next node to free. 858 * Attempt to keep persistent huge pages more or less 859 * balanced over allowed nodes. 860 * Called with hugetlb_lock locked. 861 */ 862 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 863 bool acct_surplus) 864 { 865 int nr_nodes, node; 866 int ret = 0; 867 868 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 869 /* 870 * If we're returning unused surplus pages, only examine 871 * nodes with surplus pages. 872 */ 873 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 874 !list_empty(&h->hugepage_freelists[node])) { 875 struct page *page = 876 list_entry(h->hugepage_freelists[node].next, 877 struct page, lru); 878 list_del(&page->lru); 879 h->free_huge_pages--; 880 h->free_huge_pages_node[node]--; 881 if (acct_surplus) { 882 h->surplus_huge_pages--; 883 h->surplus_huge_pages_node[node]--; 884 } 885 update_and_free_page(h, page); 886 ret = 1; 887 break; 888 } 889 } 890 891 return ret; 892 } 893 894 /* 895 * Dissolve a given free hugepage into free buddy pages. This function does 896 * nothing for in-use (including surplus) hugepages. 897 */ 898 static void dissolve_free_huge_page(struct page *page) 899 { 900 spin_lock(&hugetlb_lock); 901 if (PageHuge(page) && !page_count(page)) { 902 struct hstate *h = page_hstate(page); 903 int nid = page_to_nid(page); 904 list_del(&page->lru); 905 h->free_huge_pages--; 906 h->free_huge_pages_node[nid]--; 907 update_and_free_page(h, page); 908 } 909 spin_unlock(&hugetlb_lock); 910 } 911 912 /* 913 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 914 * make specified memory blocks removable from the system. 915 * Note that start_pfn should aligned with (minimum) hugepage size. 916 */ 917 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 918 { 919 unsigned int order = 8 * sizeof(void *); 920 unsigned long pfn; 921 struct hstate *h; 922 923 /* Set scan step to minimum hugepage size */ 924 for_each_hstate(h) 925 if (order > huge_page_order(h)) 926 order = huge_page_order(h); 927 VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order)); 928 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) 929 dissolve_free_huge_page(pfn_to_page(pfn)); 930 } 931 932 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) 933 { 934 struct page *page; 935 unsigned int r_nid; 936 937 if (h->order >= MAX_ORDER) 938 return NULL; 939 940 /* 941 * Assume we will successfully allocate the surplus page to 942 * prevent racing processes from causing the surplus to exceed 943 * overcommit 944 * 945 * This however introduces a different race, where a process B 946 * tries to grow the static hugepage pool while alloc_pages() is 947 * called by process A. B will only examine the per-node 948 * counters in determining if surplus huge pages can be 949 * converted to normal huge pages in adjust_pool_surplus(). A 950 * won't be able to increment the per-node counter, until the 951 * lock is dropped by B, but B doesn't drop hugetlb_lock until 952 * no more huge pages can be converted from surplus to normal 953 * state (and doesn't try to convert again). Thus, we have a 954 * case where a surplus huge page exists, the pool is grown, and 955 * the surplus huge page still exists after, even though it 956 * should just have been converted to a normal huge page. This 957 * does not leak memory, though, as the hugepage will be freed 958 * once it is out of use. It also does not allow the counters to 959 * go out of whack in adjust_pool_surplus() as we don't modify 960 * the node values until we've gotten the hugepage and only the 961 * per-node value is checked there. 962 */ 963 spin_lock(&hugetlb_lock); 964 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 965 spin_unlock(&hugetlb_lock); 966 return NULL; 967 } else { 968 h->nr_huge_pages++; 969 h->surplus_huge_pages++; 970 } 971 spin_unlock(&hugetlb_lock); 972 973 if (nid == NUMA_NO_NODE) 974 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP| 975 __GFP_REPEAT|__GFP_NOWARN, 976 huge_page_order(h)); 977 else 978 page = alloc_pages_exact_node(nid, 979 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| 980 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); 981 982 if (page && arch_prepare_hugepage(page)) { 983 __free_pages(page, huge_page_order(h)); 984 page = NULL; 985 } 986 987 spin_lock(&hugetlb_lock); 988 if (page) { 989 INIT_LIST_HEAD(&page->lru); 990 r_nid = page_to_nid(page); 991 set_compound_page_dtor(page, free_huge_page); 992 set_hugetlb_cgroup(page, NULL); 993 /* 994 * We incremented the global counters already 995 */ 996 h->nr_huge_pages_node[r_nid]++; 997 h->surplus_huge_pages_node[r_nid]++; 998 __count_vm_event(HTLB_BUDDY_PGALLOC); 999 } else { 1000 h->nr_huge_pages--; 1001 h->surplus_huge_pages--; 1002 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1003 } 1004 spin_unlock(&hugetlb_lock); 1005 1006 return page; 1007 } 1008 1009 /* 1010 * This allocation function is useful in the context where vma is irrelevant. 1011 * E.g. soft-offlining uses this function because it only cares physical 1012 * address of error page. 1013 */ 1014 struct page *alloc_huge_page_node(struct hstate *h, int nid) 1015 { 1016 struct page *page = NULL; 1017 1018 spin_lock(&hugetlb_lock); 1019 if (h->free_huge_pages - h->resv_huge_pages > 0) 1020 page = dequeue_huge_page_node(h, nid); 1021 spin_unlock(&hugetlb_lock); 1022 1023 if (!page) 1024 page = alloc_buddy_huge_page(h, nid); 1025 1026 return page; 1027 } 1028 1029 /* 1030 * Increase the hugetlb pool such that it can accommodate a reservation 1031 * of size 'delta'. 1032 */ 1033 static int gather_surplus_pages(struct hstate *h, int delta) 1034 { 1035 struct list_head surplus_list; 1036 struct page *page, *tmp; 1037 int ret, i; 1038 int needed, allocated; 1039 bool alloc_ok = true; 1040 1041 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 1042 if (needed <= 0) { 1043 h->resv_huge_pages += delta; 1044 return 0; 1045 } 1046 1047 allocated = 0; 1048 INIT_LIST_HEAD(&surplus_list); 1049 1050 ret = -ENOMEM; 1051 retry: 1052 spin_unlock(&hugetlb_lock); 1053 for (i = 0; i < needed; i++) { 1054 page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 1055 if (!page) { 1056 alloc_ok = false; 1057 break; 1058 } 1059 list_add(&page->lru, &surplus_list); 1060 } 1061 allocated += i; 1062 1063 /* 1064 * After retaking hugetlb_lock, we need to recalculate 'needed' 1065 * because either resv_huge_pages or free_huge_pages may have changed. 1066 */ 1067 spin_lock(&hugetlb_lock); 1068 needed = (h->resv_huge_pages + delta) - 1069 (h->free_huge_pages + allocated); 1070 if (needed > 0) { 1071 if (alloc_ok) 1072 goto retry; 1073 /* 1074 * We were not able to allocate enough pages to 1075 * satisfy the entire reservation so we free what 1076 * we've allocated so far. 1077 */ 1078 goto free; 1079 } 1080 /* 1081 * The surplus_list now contains _at_least_ the number of extra pages 1082 * needed to accommodate the reservation. Add the appropriate number 1083 * of pages to the hugetlb pool and free the extras back to the buddy 1084 * allocator. Commit the entire reservation here to prevent another 1085 * process from stealing the pages as they are added to the pool but 1086 * before they are reserved. 1087 */ 1088 needed += allocated; 1089 h->resv_huge_pages += delta; 1090 ret = 0; 1091 1092 /* Free the needed pages to the hugetlb pool */ 1093 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 1094 if ((--needed) < 0) 1095 break; 1096 /* 1097 * This page is now managed by the hugetlb allocator and has 1098 * no users -- drop the buddy allocator's reference. 1099 */ 1100 put_page_testzero(page); 1101 VM_BUG_ON(page_count(page)); 1102 enqueue_huge_page(h, page); 1103 } 1104 free: 1105 spin_unlock(&hugetlb_lock); 1106 1107 /* Free unnecessary surplus pages to the buddy allocator */ 1108 list_for_each_entry_safe(page, tmp, &surplus_list, lru) 1109 put_page(page); 1110 spin_lock(&hugetlb_lock); 1111 1112 return ret; 1113 } 1114 1115 /* 1116 * When releasing a hugetlb pool reservation, any surplus pages that were 1117 * allocated to satisfy the reservation must be explicitly freed if they were 1118 * never used. 1119 * Called with hugetlb_lock held. 1120 */ 1121 static void return_unused_surplus_pages(struct hstate *h, 1122 unsigned long unused_resv_pages) 1123 { 1124 unsigned long nr_pages; 1125 1126 /* Uncommit the reservation */ 1127 h->resv_huge_pages -= unused_resv_pages; 1128 1129 /* Cannot return gigantic pages currently */ 1130 if (h->order >= MAX_ORDER) 1131 return; 1132 1133 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 1134 1135 /* 1136 * We want to release as many surplus pages as possible, spread 1137 * evenly across all nodes with memory. Iterate across these nodes 1138 * until we can no longer free unreserved surplus pages. This occurs 1139 * when the nodes with surplus pages have no free pages. 1140 * free_pool_huge_page() will balance the the freed pages across the 1141 * on-line nodes with memory and will handle the hstate accounting. 1142 */ 1143 while (nr_pages--) { 1144 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) 1145 break; 1146 } 1147 } 1148 1149 /* 1150 * Determine if the huge page at addr within the vma has an associated 1151 * reservation. Where it does not we will need to logically increase 1152 * reservation and actually increase subpool usage before an allocation 1153 * can occur. Where any new reservation would be required the 1154 * reservation change is prepared, but not committed. Once the page 1155 * has been allocated from the subpool and instantiated the change should 1156 * be committed via vma_commit_reservation. No action is required on 1157 * failure. 1158 */ 1159 static long vma_needs_reservation(struct hstate *h, 1160 struct vm_area_struct *vma, unsigned long addr) 1161 { 1162 struct address_space *mapping = vma->vm_file->f_mapping; 1163 struct inode *inode = mapping->host; 1164 1165 if (vma->vm_flags & VM_MAYSHARE) { 1166 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1167 return region_chg(&inode->i_mapping->private_list, 1168 idx, idx + 1); 1169 1170 } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1171 return 1; 1172 1173 } else { 1174 long err; 1175 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1176 struct resv_map *resv = vma_resv_map(vma); 1177 1178 err = region_chg(&resv->regions, idx, idx + 1); 1179 if (err < 0) 1180 return err; 1181 return 0; 1182 } 1183 } 1184 static void vma_commit_reservation(struct hstate *h, 1185 struct vm_area_struct *vma, unsigned long addr) 1186 { 1187 struct address_space *mapping = vma->vm_file->f_mapping; 1188 struct inode *inode = mapping->host; 1189 1190 if (vma->vm_flags & VM_MAYSHARE) { 1191 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1192 region_add(&inode->i_mapping->private_list, idx, idx + 1); 1193 1194 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1195 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1196 struct resv_map *resv = vma_resv_map(vma); 1197 1198 /* Mark this page used in the map. */ 1199 region_add(&resv->regions, idx, idx + 1); 1200 } 1201 } 1202 1203 static struct page *alloc_huge_page(struct vm_area_struct *vma, 1204 unsigned long addr, int avoid_reserve) 1205 { 1206 struct hugepage_subpool *spool = subpool_vma(vma); 1207 struct hstate *h = hstate_vma(vma); 1208 struct page *page; 1209 long chg; 1210 int ret, idx; 1211 struct hugetlb_cgroup *h_cg; 1212 1213 idx = hstate_index(h); 1214 /* 1215 * Processes that did not create the mapping will have no 1216 * reserves and will not have accounted against subpool 1217 * limit. Check that the subpool limit can be made before 1218 * satisfying the allocation MAP_NORESERVE mappings may also 1219 * need pages and subpool limit allocated allocated if no reserve 1220 * mapping overlaps. 1221 */ 1222 chg = vma_needs_reservation(h, vma, addr); 1223 if (chg < 0) 1224 return ERR_PTR(-ENOMEM); 1225 if (chg || avoid_reserve) 1226 if (hugepage_subpool_get_pages(spool, 1)) 1227 return ERR_PTR(-ENOSPC); 1228 1229 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 1230 if (ret) { 1231 if (chg || avoid_reserve) 1232 hugepage_subpool_put_pages(spool, 1); 1233 return ERR_PTR(-ENOSPC); 1234 } 1235 spin_lock(&hugetlb_lock); 1236 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); 1237 if (!page) { 1238 spin_unlock(&hugetlb_lock); 1239 page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 1240 if (!page) { 1241 hugetlb_cgroup_uncharge_cgroup(idx, 1242 pages_per_huge_page(h), 1243 h_cg); 1244 if (chg || avoid_reserve) 1245 hugepage_subpool_put_pages(spool, 1); 1246 return ERR_PTR(-ENOSPC); 1247 } 1248 spin_lock(&hugetlb_lock); 1249 list_move(&page->lru, &h->hugepage_activelist); 1250 /* Fall through */ 1251 } 1252 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 1253 spin_unlock(&hugetlb_lock); 1254 1255 set_page_private(page, (unsigned long)spool); 1256 1257 vma_commit_reservation(h, vma, addr); 1258 return page; 1259 } 1260 1261 /* 1262 * alloc_huge_page()'s wrapper which simply returns the page if allocation 1263 * succeeds, otherwise NULL. This function is called from new_vma_page(), 1264 * where no ERR_VALUE is expected to be returned. 1265 */ 1266 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, 1267 unsigned long addr, int avoid_reserve) 1268 { 1269 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); 1270 if (IS_ERR(page)) 1271 page = NULL; 1272 return page; 1273 } 1274 1275 int __weak alloc_bootmem_huge_page(struct hstate *h) 1276 { 1277 struct huge_bootmem_page *m; 1278 int nr_nodes, node; 1279 1280 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 1281 void *addr; 1282 1283 addr = __alloc_bootmem_node_nopanic(NODE_DATA(node), 1284 huge_page_size(h), huge_page_size(h), 0); 1285 1286 if (addr) { 1287 /* 1288 * Use the beginning of the huge page to store the 1289 * huge_bootmem_page struct (until gather_bootmem 1290 * puts them into the mem_map). 1291 */ 1292 m = addr; 1293 goto found; 1294 } 1295 } 1296 return 0; 1297 1298 found: 1299 BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); 1300 /* Put them into a private list first because mem_map is not up yet */ 1301 list_add(&m->list, &huge_boot_pages); 1302 m->hstate = h; 1303 return 1; 1304 } 1305 1306 static void prep_compound_huge_page(struct page *page, int order) 1307 { 1308 if (unlikely(order > (MAX_ORDER - 1))) 1309 prep_compound_gigantic_page(page, order); 1310 else 1311 prep_compound_page(page, order); 1312 } 1313 1314 /* Put bootmem huge pages into the standard lists after mem_map is up */ 1315 static void __init gather_bootmem_prealloc(void) 1316 { 1317 struct huge_bootmem_page *m; 1318 1319 list_for_each_entry(m, &huge_boot_pages, list) { 1320 struct hstate *h = m->hstate; 1321 struct page *page; 1322 1323 #ifdef CONFIG_HIGHMEM 1324 page = pfn_to_page(m->phys >> PAGE_SHIFT); 1325 free_bootmem_late((unsigned long)m, 1326 sizeof(struct huge_bootmem_page)); 1327 #else 1328 page = virt_to_page(m); 1329 #endif 1330 WARN_ON(page_count(page) != 1); 1331 prep_compound_huge_page(page, h->order); 1332 WARN_ON(PageReserved(page)); 1333 prep_new_huge_page(h, page, page_to_nid(page)); 1334 /* 1335 * If we had gigantic hugepages allocated at boot time, we need 1336 * to restore the 'stolen' pages to totalram_pages in order to 1337 * fix confusing memory reports from free(1) and another 1338 * side-effects, like CommitLimit going negative. 1339 */ 1340 if (h->order > (MAX_ORDER - 1)) 1341 adjust_managed_page_count(page, 1 << h->order); 1342 } 1343 } 1344 1345 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 1346 { 1347 unsigned long i; 1348 1349 for (i = 0; i < h->max_huge_pages; ++i) { 1350 if (h->order >= MAX_ORDER) { 1351 if (!alloc_bootmem_huge_page(h)) 1352 break; 1353 } else if (!alloc_fresh_huge_page(h, 1354 &node_states[N_MEMORY])) 1355 break; 1356 } 1357 h->max_huge_pages = i; 1358 } 1359 1360 static void __init hugetlb_init_hstates(void) 1361 { 1362 struct hstate *h; 1363 1364 for_each_hstate(h) { 1365 /* oversize hugepages were init'ed in early boot */ 1366 if (h->order < MAX_ORDER) 1367 hugetlb_hstate_alloc_pages(h); 1368 } 1369 } 1370 1371 static char * __init memfmt(char *buf, unsigned long n) 1372 { 1373 if (n >= (1UL << 30)) 1374 sprintf(buf, "%lu GB", n >> 30); 1375 else if (n >= (1UL << 20)) 1376 sprintf(buf, "%lu MB", n >> 20); 1377 else 1378 sprintf(buf, "%lu KB", n >> 10); 1379 return buf; 1380 } 1381 1382 static void __init report_hugepages(void) 1383 { 1384 struct hstate *h; 1385 1386 for_each_hstate(h) { 1387 char buf[32]; 1388 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", 1389 memfmt(buf, huge_page_size(h)), 1390 h->free_huge_pages); 1391 } 1392 } 1393 1394 #ifdef CONFIG_HIGHMEM 1395 static void try_to_free_low(struct hstate *h, unsigned long count, 1396 nodemask_t *nodes_allowed) 1397 { 1398 int i; 1399 1400 if (h->order >= MAX_ORDER) 1401 return; 1402 1403 for_each_node_mask(i, *nodes_allowed) { 1404 struct page *page, *next; 1405 struct list_head *freel = &h->hugepage_freelists[i]; 1406 list_for_each_entry_safe(page, next, freel, lru) { 1407 if (count >= h->nr_huge_pages) 1408 return; 1409 if (PageHighMem(page)) 1410 continue; 1411 list_del(&page->lru); 1412 update_and_free_page(h, page); 1413 h->free_huge_pages--; 1414 h->free_huge_pages_node[page_to_nid(page)]--; 1415 } 1416 } 1417 } 1418 #else 1419 static inline void try_to_free_low(struct hstate *h, unsigned long count, 1420 nodemask_t *nodes_allowed) 1421 { 1422 } 1423 #endif 1424 1425 /* 1426 * Increment or decrement surplus_huge_pages. Keep node-specific counters 1427 * balanced by operating on them in a round-robin fashion. 1428 * Returns 1 if an adjustment was made. 1429 */ 1430 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 1431 int delta) 1432 { 1433 int nr_nodes, node; 1434 1435 VM_BUG_ON(delta != -1 && delta != 1); 1436 1437 if (delta < 0) { 1438 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1439 if (h->surplus_huge_pages_node[node]) 1440 goto found; 1441 } 1442 } else { 1443 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 1444 if (h->surplus_huge_pages_node[node] < 1445 h->nr_huge_pages_node[node]) 1446 goto found; 1447 } 1448 } 1449 return 0; 1450 1451 found: 1452 h->surplus_huge_pages += delta; 1453 h->surplus_huge_pages_node[node] += delta; 1454 return 1; 1455 } 1456 1457 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 1458 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, 1459 nodemask_t *nodes_allowed) 1460 { 1461 unsigned long min_count, ret; 1462 1463 if (h->order >= MAX_ORDER) 1464 return h->max_huge_pages; 1465 1466 /* 1467 * Increase the pool size 1468 * First take pages out of surplus state. Then make up the 1469 * remaining difference by allocating fresh huge pages. 1470 * 1471 * We might race with alloc_buddy_huge_page() here and be unable 1472 * to convert a surplus huge page to a normal huge page. That is 1473 * not critical, though, it just means the overall size of the 1474 * pool might be one hugepage larger than it needs to be, but 1475 * within all the constraints specified by the sysctls. 1476 */ 1477 spin_lock(&hugetlb_lock); 1478 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 1479 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 1480 break; 1481 } 1482 1483 while (count > persistent_huge_pages(h)) { 1484 /* 1485 * If this allocation races such that we no longer need the 1486 * page, free_huge_page will handle it by freeing the page 1487 * and reducing the surplus. 1488 */ 1489 spin_unlock(&hugetlb_lock); 1490 ret = alloc_fresh_huge_page(h, nodes_allowed); 1491 spin_lock(&hugetlb_lock); 1492 if (!ret) 1493 goto out; 1494 1495 /* Bail for signals. Probably ctrl-c from user */ 1496 if (signal_pending(current)) 1497 goto out; 1498 } 1499 1500 /* 1501 * Decrease the pool size 1502 * First return free pages to the buddy allocator (being careful 1503 * to keep enough around to satisfy reservations). Then place 1504 * pages into surplus state as needed so the pool will shrink 1505 * to the desired size as pages become free. 1506 * 1507 * By placing pages into the surplus state independent of the 1508 * overcommit value, we are allowing the surplus pool size to 1509 * exceed overcommit. There are few sane options here. Since 1510 * alloc_buddy_huge_page() is checking the global counter, 1511 * though, we'll note that we're not allowed to exceed surplus 1512 * and won't grow the pool anywhere else. Not until one of the 1513 * sysctls are changed, or the surplus pages go out of use. 1514 */ 1515 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 1516 min_count = max(count, min_count); 1517 try_to_free_low(h, min_count, nodes_allowed); 1518 while (min_count < persistent_huge_pages(h)) { 1519 if (!free_pool_huge_page(h, nodes_allowed, 0)) 1520 break; 1521 } 1522 while (count < persistent_huge_pages(h)) { 1523 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 1524 break; 1525 } 1526 out: 1527 ret = persistent_huge_pages(h); 1528 spin_unlock(&hugetlb_lock); 1529 return ret; 1530 } 1531 1532 #define HSTATE_ATTR_RO(_name) \ 1533 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 1534 1535 #define HSTATE_ATTR(_name) \ 1536 static struct kobj_attribute _name##_attr = \ 1537 __ATTR(_name, 0644, _name##_show, _name##_store) 1538 1539 static struct kobject *hugepages_kobj; 1540 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1541 1542 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 1543 1544 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 1545 { 1546 int i; 1547 1548 for (i = 0; i < HUGE_MAX_HSTATE; i++) 1549 if (hstate_kobjs[i] == kobj) { 1550 if (nidp) 1551 *nidp = NUMA_NO_NODE; 1552 return &hstates[i]; 1553 } 1554 1555 return kobj_to_node_hstate(kobj, nidp); 1556 } 1557 1558 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 1559 struct kobj_attribute *attr, char *buf) 1560 { 1561 struct hstate *h; 1562 unsigned long nr_huge_pages; 1563 int nid; 1564 1565 h = kobj_to_hstate(kobj, &nid); 1566 if (nid == NUMA_NO_NODE) 1567 nr_huge_pages = h->nr_huge_pages; 1568 else 1569 nr_huge_pages = h->nr_huge_pages_node[nid]; 1570 1571 return sprintf(buf, "%lu\n", nr_huge_pages); 1572 } 1573 1574 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 1575 struct kobject *kobj, struct kobj_attribute *attr, 1576 const char *buf, size_t len) 1577 { 1578 int err; 1579 int nid; 1580 unsigned long count; 1581 struct hstate *h; 1582 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); 1583 1584 err = kstrtoul(buf, 10, &count); 1585 if (err) 1586 goto out; 1587 1588 h = kobj_to_hstate(kobj, &nid); 1589 if (h->order >= MAX_ORDER) { 1590 err = -EINVAL; 1591 goto out; 1592 } 1593 1594 if (nid == NUMA_NO_NODE) { 1595 /* 1596 * global hstate attribute 1597 */ 1598 if (!(obey_mempolicy && 1599 init_nodemask_of_mempolicy(nodes_allowed))) { 1600 NODEMASK_FREE(nodes_allowed); 1601 nodes_allowed = &node_states[N_MEMORY]; 1602 } 1603 } else if (nodes_allowed) { 1604 /* 1605 * per node hstate attribute: adjust count to global, 1606 * but restrict alloc/free to the specified node. 1607 */ 1608 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 1609 init_nodemask_of_node(nodes_allowed, nid); 1610 } else 1611 nodes_allowed = &node_states[N_MEMORY]; 1612 1613 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); 1614 1615 if (nodes_allowed != &node_states[N_MEMORY]) 1616 NODEMASK_FREE(nodes_allowed); 1617 1618 return len; 1619 out: 1620 NODEMASK_FREE(nodes_allowed); 1621 return err; 1622 } 1623 1624 static ssize_t nr_hugepages_show(struct kobject *kobj, 1625 struct kobj_attribute *attr, char *buf) 1626 { 1627 return nr_hugepages_show_common(kobj, attr, buf); 1628 } 1629 1630 static ssize_t nr_hugepages_store(struct kobject *kobj, 1631 struct kobj_attribute *attr, const char *buf, size_t len) 1632 { 1633 return nr_hugepages_store_common(false, kobj, attr, buf, len); 1634 } 1635 HSTATE_ATTR(nr_hugepages); 1636 1637 #ifdef CONFIG_NUMA 1638 1639 /* 1640 * hstate attribute for optionally mempolicy-based constraint on persistent 1641 * huge page alloc/free. 1642 */ 1643 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 1644 struct kobj_attribute *attr, char *buf) 1645 { 1646 return nr_hugepages_show_common(kobj, attr, buf); 1647 } 1648 1649 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 1650 struct kobj_attribute *attr, const char *buf, size_t len) 1651 { 1652 return nr_hugepages_store_common(true, kobj, attr, buf, len); 1653 } 1654 HSTATE_ATTR(nr_hugepages_mempolicy); 1655 #endif 1656 1657 1658 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 1659 struct kobj_attribute *attr, char *buf) 1660 { 1661 struct hstate *h = kobj_to_hstate(kobj, NULL); 1662 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 1663 } 1664 1665 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 1666 struct kobj_attribute *attr, const char *buf, size_t count) 1667 { 1668 int err; 1669 unsigned long input; 1670 struct hstate *h = kobj_to_hstate(kobj, NULL); 1671 1672 if (h->order >= MAX_ORDER) 1673 return -EINVAL; 1674 1675 err = kstrtoul(buf, 10, &input); 1676 if (err) 1677 return err; 1678 1679 spin_lock(&hugetlb_lock); 1680 h->nr_overcommit_huge_pages = input; 1681 spin_unlock(&hugetlb_lock); 1682 1683 return count; 1684 } 1685 HSTATE_ATTR(nr_overcommit_hugepages); 1686 1687 static ssize_t free_hugepages_show(struct kobject *kobj, 1688 struct kobj_attribute *attr, char *buf) 1689 { 1690 struct hstate *h; 1691 unsigned long free_huge_pages; 1692 int nid; 1693 1694 h = kobj_to_hstate(kobj, &nid); 1695 if (nid == NUMA_NO_NODE) 1696 free_huge_pages = h->free_huge_pages; 1697 else 1698 free_huge_pages = h->free_huge_pages_node[nid]; 1699 1700 return sprintf(buf, "%lu\n", free_huge_pages); 1701 } 1702 HSTATE_ATTR_RO(free_hugepages); 1703 1704 static ssize_t resv_hugepages_show(struct kobject *kobj, 1705 struct kobj_attribute *attr, char *buf) 1706 { 1707 struct hstate *h = kobj_to_hstate(kobj, NULL); 1708 return sprintf(buf, "%lu\n", h->resv_huge_pages); 1709 } 1710 HSTATE_ATTR_RO(resv_hugepages); 1711 1712 static ssize_t surplus_hugepages_show(struct kobject *kobj, 1713 struct kobj_attribute *attr, char *buf) 1714 { 1715 struct hstate *h; 1716 unsigned long surplus_huge_pages; 1717 int nid; 1718 1719 h = kobj_to_hstate(kobj, &nid); 1720 if (nid == NUMA_NO_NODE) 1721 surplus_huge_pages = h->surplus_huge_pages; 1722 else 1723 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 1724 1725 return sprintf(buf, "%lu\n", surplus_huge_pages); 1726 } 1727 HSTATE_ATTR_RO(surplus_hugepages); 1728 1729 static struct attribute *hstate_attrs[] = { 1730 &nr_hugepages_attr.attr, 1731 &nr_overcommit_hugepages_attr.attr, 1732 &free_hugepages_attr.attr, 1733 &resv_hugepages_attr.attr, 1734 &surplus_hugepages_attr.attr, 1735 #ifdef CONFIG_NUMA 1736 &nr_hugepages_mempolicy_attr.attr, 1737 #endif 1738 NULL, 1739 }; 1740 1741 static struct attribute_group hstate_attr_group = { 1742 .attrs = hstate_attrs, 1743 }; 1744 1745 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 1746 struct kobject **hstate_kobjs, 1747 struct attribute_group *hstate_attr_group) 1748 { 1749 int retval; 1750 int hi = hstate_index(h); 1751 1752 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 1753 if (!hstate_kobjs[hi]) 1754 return -ENOMEM; 1755 1756 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 1757 if (retval) 1758 kobject_put(hstate_kobjs[hi]); 1759 1760 return retval; 1761 } 1762 1763 static void __init hugetlb_sysfs_init(void) 1764 { 1765 struct hstate *h; 1766 int err; 1767 1768 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 1769 if (!hugepages_kobj) 1770 return; 1771 1772 for_each_hstate(h) { 1773 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 1774 hstate_kobjs, &hstate_attr_group); 1775 if (err) 1776 pr_err("Hugetlb: Unable to add hstate %s", h->name); 1777 } 1778 } 1779 1780 #ifdef CONFIG_NUMA 1781 1782 /* 1783 * node_hstate/s - associate per node hstate attributes, via their kobjects, 1784 * with node devices in node_devices[] using a parallel array. The array 1785 * index of a node device or _hstate == node id. 1786 * This is here to avoid any static dependency of the node device driver, in 1787 * the base kernel, on the hugetlb module. 1788 */ 1789 struct node_hstate { 1790 struct kobject *hugepages_kobj; 1791 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1792 }; 1793 struct node_hstate node_hstates[MAX_NUMNODES]; 1794 1795 /* 1796 * A subset of global hstate attributes for node devices 1797 */ 1798 static struct attribute *per_node_hstate_attrs[] = { 1799 &nr_hugepages_attr.attr, 1800 &free_hugepages_attr.attr, 1801 &surplus_hugepages_attr.attr, 1802 NULL, 1803 }; 1804 1805 static struct attribute_group per_node_hstate_attr_group = { 1806 .attrs = per_node_hstate_attrs, 1807 }; 1808 1809 /* 1810 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 1811 * Returns node id via non-NULL nidp. 1812 */ 1813 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 1814 { 1815 int nid; 1816 1817 for (nid = 0; nid < nr_node_ids; nid++) { 1818 struct node_hstate *nhs = &node_hstates[nid]; 1819 int i; 1820 for (i = 0; i < HUGE_MAX_HSTATE; i++) 1821 if (nhs->hstate_kobjs[i] == kobj) { 1822 if (nidp) 1823 *nidp = nid; 1824 return &hstates[i]; 1825 } 1826 } 1827 1828 BUG(); 1829 return NULL; 1830 } 1831 1832 /* 1833 * Unregister hstate attributes from a single node device. 1834 * No-op if no hstate attributes attached. 1835 */ 1836 static void hugetlb_unregister_node(struct node *node) 1837 { 1838 struct hstate *h; 1839 struct node_hstate *nhs = &node_hstates[node->dev.id]; 1840 1841 if (!nhs->hugepages_kobj) 1842 return; /* no hstate attributes */ 1843 1844 for_each_hstate(h) { 1845 int idx = hstate_index(h); 1846 if (nhs->hstate_kobjs[idx]) { 1847 kobject_put(nhs->hstate_kobjs[idx]); 1848 nhs->hstate_kobjs[idx] = NULL; 1849 } 1850 } 1851 1852 kobject_put(nhs->hugepages_kobj); 1853 nhs->hugepages_kobj = NULL; 1854 } 1855 1856 /* 1857 * hugetlb module exit: unregister hstate attributes from node devices 1858 * that have them. 1859 */ 1860 static void hugetlb_unregister_all_nodes(void) 1861 { 1862 int nid; 1863 1864 /* 1865 * disable node device registrations. 1866 */ 1867 register_hugetlbfs_with_node(NULL, NULL); 1868 1869 /* 1870 * remove hstate attributes from any nodes that have them. 1871 */ 1872 for (nid = 0; nid < nr_node_ids; nid++) 1873 hugetlb_unregister_node(node_devices[nid]); 1874 } 1875 1876 /* 1877 * Register hstate attributes for a single node device. 1878 * No-op if attributes already registered. 1879 */ 1880 static void hugetlb_register_node(struct node *node) 1881 { 1882 struct hstate *h; 1883 struct node_hstate *nhs = &node_hstates[node->dev.id]; 1884 int err; 1885 1886 if (nhs->hugepages_kobj) 1887 return; /* already allocated */ 1888 1889 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 1890 &node->dev.kobj); 1891 if (!nhs->hugepages_kobj) 1892 return; 1893 1894 for_each_hstate(h) { 1895 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 1896 nhs->hstate_kobjs, 1897 &per_node_hstate_attr_group); 1898 if (err) { 1899 pr_err("Hugetlb: Unable to add hstate %s for node %d\n", 1900 h->name, node->dev.id); 1901 hugetlb_unregister_node(node); 1902 break; 1903 } 1904 } 1905 } 1906 1907 /* 1908 * hugetlb init time: register hstate attributes for all registered node 1909 * devices of nodes that have memory. All on-line nodes should have 1910 * registered their associated device by this time. 1911 */ 1912 static void hugetlb_register_all_nodes(void) 1913 { 1914 int nid; 1915 1916 for_each_node_state(nid, N_MEMORY) { 1917 struct node *node = node_devices[nid]; 1918 if (node->dev.id == nid) 1919 hugetlb_register_node(node); 1920 } 1921 1922 /* 1923 * Let the node device driver know we're here so it can 1924 * [un]register hstate attributes on node hotplug. 1925 */ 1926 register_hugetlbfs_with_node(hugetlb_register_node, 1927 hugetlb_unregister_node); 1928 } 1929 #else /* !CONFIG_NUMA */ 1930 1931 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 1932 { 1933 BUG(); 1934 if (nidp) 1935 *nidp = -1; 1936 return NULL; 1937 } 1938 1939 static void hugetlb_unregister_all_nodes(void) { } 1940 1941 static void hugetlb_register_all_nodes(void) { } 1942 1943 #endif 1944 1945 static void __exit hugetlb_exit(void) 1946 { 1947 struct hstate *h; 1948 1949 hugetlb_unregister_all_nodes(); 1950 1951 for_each_hstate(h) { 1952 kobject_put(hstate_kobjs[hstate_index(h)]); 1953 } 1954 1955 kobject_put(hugepages_kobj); 1956 } 1957 module_exit(hugetlb_exit); 1958 1959 static int __init hugetlb_init(void) 1960 { 1961 /* Some platform decide whether they support huge pages at boot 1962 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 1963 * there is no such support 1964 */ 1965 if (HPAGE_SHIFT == 0) 1966 return 0; 1967 1968 if (!size_to_hstate(default_hstate_size)) { 1969 default_hstate_size = HPAGE_SIZE; 1970 if (!size_to_hstate(default_hstate_size)) 1971 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 1972 } 1973 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 1974 if (default_hstate_max_huge_pages) 1975 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 1976 1977 hugetlb_init_hstates(); 1978 gather_bootmem_prealloc(); 1979 report_hugepages(); 1980 1981 hugetlb_sysfs_init(); 1982 hugetlb_register_all_nodes(); 1983 hugetlb_cgroup_file_init(); 1984 1985 return 0; 1986 } 1987 module_init(hugetlb_init); 1988 1989 /* Should be called on processing a hugepagesz=... option */ 1990 void __init hugetlb_add_hstate(unsigned order) 1991 { 1992 struct hstate *h; 1993 unsigned long i; 1994 1995 if (size_to_hstate(PAGE_SIZE << order)) { 1996 pr_warning("hugepagesz= specified twice, ignoring\n"); 1997 return; 1998 } 1999 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 2000 BUG_ON(order == 0); 2001 h = &hstates[hugetlb_max_hstate++]; 2002 h->order = order; 2003 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 2004 h->nr_huge_pages = 0; 2005 h->free_huge_pages = 0; 2006 for (i = 0; i < MAX_NUMNODES; ++i) 2007 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 2008 INIT_LIST_HEAD(&h->hugepage_activelist); 2009 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]); 2010 h->next_nid_to_free = first_node(node_states[N_MEMORY]); 2011 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 2012 huge_page_size(h)/1024); 2013 2014 parsed_hstate = h; 2015 } 2016 2017 static int __init hugetlb_nrpages_setup(char *s) 2018 { 2019 unsigned long *mhp; 2020 static unsigned long *last_mhp; 2021 2022 /* 2023 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, 2024 * so this hugepages= parameter goes to the "default hstate". 2025 */ 2026 if (!hugetlb_max_hstate) 2027 mhp = &default_hstate_max_huge_pages; 2028 else 2029 mhp = &parsed_hstate->max_huge_pages; 2030 2031 if (mhp == last_mhp) { 2032 pr_warning("hugepages= specified twice without " 2033 "interleaving hugepagesz=, ignoring\n"); 2034 return 1; 2035 } 2036 2037 if (sscanf(s, "%lu", mhp) <= 0) 2038 *mhp = 0; 2039 2040 /* 2041 * Global state is always initialized later in hugetlb_init. 2042 * But we need to allocate >= MAX_ORDER hstates here early to still 2043 * use the bootmem allocator. 2044 */ 2045 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) 2046 hugetlb_hstate_alloc_pages(parsed_hstate); 2047 2048 last_mhp = mhp; 2049 2050 return 1; 2051 } 2052 __setup("hugepages=", hugetlb_nrpages_setup); 2053 2054 static int __init hugetlb_default_setup(char *s) 2055 { 2056 default_hstate_size = memparse(s, &s); 2057 return 1; 2058 } 2059 __setup("default_hugepagesz=", hugetlb_default_setup); 2060 2061 static unsigned int cpuset_mems_nr(unsigned int *array) 2062 { 2063 int node; 2064 unsigned int nr = 0; 2065 2066 for_each_node_mask(node, cpuset_current_mems_allowed) 2067 nr += array[node]; 2068 2069 return nr; 2070 } 2071 2072 #ifdef CONFIG_SYSCTL 2073 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 2074 struct ctl_table *table, int write, 2075 void __user *buffer, size_t *length, loff_t *ppos) 2076 { 2077 struct hstate *h = &default_hstate; 2078 unsigned long tmp; 2079 int ret; 2080 2081 tmp = h->max_huge_pages; 2082 2083 if (write && h->order >= MAX_ORDER) 2084 return -EINVAL; 2085 2086 table->data = &tmp; 2087 table->maxlen = sizeof(unsigned long); 2088 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 2089 if (ret) 2090 goto out; 2091 2092 if (write) { 2093 NODEMASK_ALLOC(nodemask_t, nodes_allowed, 2094 GFP_KERNEL | __GFP_NORETRY); 2095 if (!(obey_mempolicy && 2096 init_nodemask_of_mempolicy(nodes_allowed))) { 2097 NODEMASK_FREE(nodes_allowed); 2098 nodes_allowed = &node_states[N_MEMORY]; 2099 } 2100 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed); 2101 2102 if (nodes_allowed != &node_states[N_MEMORY]) 2103 NODEMASK_FREE(nodes_allowed); 2104 } 2105 out: 2106 return ret; 2107 } 2108 2109 int hugetlb_sysctl_handler(struct ctl_table *table, int write, 2110 void __user *buffer, size_t *length, loff_t *ppos) 2111 { 2112 2113 return hugetlb_sysctl_handler_common(false, table, write, 2114 buffer, length, ppos); 2115 } 2116 2117 #ifdef CONFIG_NUMA 2118 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 2119 void __user *buffer, size_t *length, loff_t *ppos) 2120 { 2121 return hugetlb_sysctl_handler_common(true, table, write, 2122 buffer, length, ppos); 2123 } 2124 #endif /* CONFIG_NUMA */ 2125 2126 int hugetlb_overcommit_handler(struct ctl_table *table, int write, 2127 void __user *buffer, 2128 size_t *length, loff_t *ppos) 2129 { 2130 struct hstate *h = &default_hstate; 2131 unsigned long tmp; 2132 int ret; 2133 2134 tmp = h->nr_overcommit_huge_pages; 2135 2136 if (write && h->order >= MAX_ORDER) 2137 return -EINVAL; 2138 2139 table->data = &tmp; 2140 table->maxlen = sizeof(unsigned long); 2141 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 2142 if (ret) 2143 goto out; 2144 2145 if (write) { 2146 spin_lock(&hugetlb_lock); 2147 h->nr_overcommit_huge_pages = tmp; 2148 spin_unlock(&hugetlb_lock); 2149 } 2150 out: 2151 return ret; 2152 } 2153 2154 #endif /* CONFIG_SYSCTL */ 2155 2156 void hugetlb_report_meminfo(struct seq_file *m) 2157 { 2158 struct hstate *h = &default_hstate; 2159 seq_printf(m, 2160 "HugePages_Total: %5lu\n" 2161 "HugePages_Free: %5lu\n" 2162 "HugePages_Rsvd: %5lu\n" 2163 "HugePages_Surp: %5lu\n" 2164 "Hugepagesize: %8lu kB\n", 2165 h->nr_huge_pages, 2166 h->free_huge_pages, 2167 h->resv_huge_pages, 2168 h->surplus_huge_pages, 2169 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 2170 } 2171 2172 int hugetlb_report_node_meminfo(int nid, char *buf) 2173 { 2174 struct hstate *h = &default_hstate; 2175 return sprintf(buf, 2176 "Node %d HugePages_Total: %5u\n" 2177 "Node %d HugePages_Free: %5u\n" 2178 "Node %d HugePages_Surp: %5u\n", 2179 nid, h->nr_huge_pages_node[nid], 2180 nid, h->free_huge_pages_node[nid], 2181 nid, h->surplus_huge_pages_node[nid]); 2182 } 2183 2184 void hugetlb_show_meminfo(void) 2185 { 2186 struct hstate *h; 2187 int nid; 2188 2189 for_each_node_state(nid, N_MEMORY) 2190 for_each_hstate(h) 2191 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 2192 nid, 2193 h->nr_huge_pages_node[nid], 2194 h->free_huge_pages_node[nid], 2195 h->surplus_huge_pages_node[nid], 2196 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 2197 } 2198 2199 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 2200 unsigned long hugetlb_total_pages(void) 2201 { 2202 struct hstate *h; 2203 unsigned long nr_total_pages = 0; 2204 2205 for_each_hstate(h) 2206 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 2207 return nr_total_pages; 2208 } 2209 2210 static int hugetlb_acct_memory(struct hstate *h, long delta) 2211 { 2212 int ret = -ENOMEM; 2213 2214 spin_lock(&hugetlb_lock); 2215 /* 2216 * When cpuset is configured, it breaks the strict hugetlb page 2217 * reservation as the accounting is done on a global variable. Such 2218 * reservation is completely rubbish in the presence of cpuset because 2219 * the reservation is not checked against page availability for the 2220 * current cpuset. Application can still potentially OOM'ed by kernel 2221 * with lack of free htlb page in cpuset that the task is in. 2222 * Attempt to enforce strict accounting with cpuset is almost 2223 * impossible (or too ugly) because cpuset is too fluid that 2224 * task or memory node can be dynamically moved between cpusets. 2225 * 2226 * The change of semantics for shared hugetlb mapping with cpuset is 2227 * undesirable. However, in order to preserve some of the semantics, 2228 * we fall back to check against current free page availability as 2229 * a best attempt and hopefully to minimize the impact of changing 2230 * semantics that cpuset has. 2231 */ 2232 if (delta > 0) { 2233 if (gather_surplus_pages(h, delta) < 0) 2234 goto out; 2235 2236 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 2237 return_unused_surplus_pages(h, delta); 2238 goto out; 2239 } 2240 } 2241 2242 ret = 0; 2243 if (delta < 0) 2244 return_unused_surplus_pages(h, (unsigned long) -delta); 2245 2246 out: 2247 spin_unlock(&hugetlb_lock); 2248 return ret; 2249 } 2250 2251 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 2252 { 2253 struct resv_map *resv = vma_resv_map(vma); 2254 2255 /* 2256 * This new VMA should share its siblings reservation map if present. 2257 * The VMA will only ever have a valid reservation map pointer where 2258 * it is being copied for another still existing VMA. As that VMA 2259 * has a reference to the reservation map it cannot disappear until 2260 * after this open call completes. It is therefore safe to take a 2261 * new reference here without additional locking. 2262 */ 2263 if (resv) 2264 kref_get(&resv->refs); 2265 } 2266 2267 static void resv_map_put(struct vm_area_struct *vma) 2268 { 2269 struct resv_map *resv = vma_resv_map(vma); 2270 2271 if (!resv) 2272 return; 2273 kref_put(&resv->refs, resv_map_release); 2274 } 2275 2276 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 2277 { 2278 struct hstate *h = hstate_vma(vma); 2279 struct resv_map *resv = vma_resv_map(vma); 2280 struct hugepage_subpool *spool = subpool_vma(vma); 2281 unsigned long reserve; 2282 unsigned long start; 2283 unsigned long end; 2284 2285 if (resv) { 2286 start = vma_hugecache_offset(h, vma, vma->vm_start); 2287 end = vma_hugecache_offset(h, vma, vma->vm_end); 2288 2289 reserve = (end - start) - 2290 region_count(&resv->regions, start, end); 2291 2292 resv_map_put(vma); 2293 2294 if (reserve) { 2295 hugetlb_acct_memory(h, -reserve); 2296 hugepage_subpool_put_pages(spool, reserve); 2297 } 2298 } 2299 } 2300 2301 /* 2302 * We cannot handle pagefaults against hugetlb pages at all. They cause 2303 * handle_mm_fault() to try to instantiate regular-sized pages in the 2304 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 2305 * this far. 2306 */ 2307 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2308 { 2309 BUG(); 2310 return 0; 2311 } 2312 2313 const struct vm_operations_struct hugetlb_vm_ops = { 2314 .fault = hugetlb_vm_op_fault, 2315 .open = hugetlb_vm_op_open, 2316 .close = hugetlb_vm_op_close, 2317 }; 2318 2319 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 2320 int writable) 2321 { 2322 pte_t entry; 2323 2324 if (writable) { 2325 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 2326 vma->vm_page_prot))); 2327 } else { 2328 entry = huge_pte_wrprotect(mk_huge_pte(page, 2329 vma->vm_page_prot)); 2330 } 2331 entry = pte_mkyoung(entry); 2332 entry = pte_mkhuge(entry); 2333 entry = arch_make_huge_pte(entry, vma, page, writable); 2334 2335 return entry; 2336 } 2337 2338 static void set_huge_ptep_writable(struct vm_area_struct *vma, 2339 unsigned long address, pte_t *ptep) 2340 { 2341 pte_t entry; 2342 2343 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 2344 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 2345 update_mmu_cache(vma, address, ptep); 2346 } 2347 2348 2349 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 2350 struct vm_area_struct *vma) 2351 { 2352 pte_t *src_pte, *dst_pte, entry; 2353 struct page *ptepage; 2354 unsigned long addr; 2355 int cow; 2356 struct hstate *h = hstate_vma(vma); 2357 unsigned long sz = huge_page_size(h); 2358 2359 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 2360 2361 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 2362 spinlock_t *src_ptl, *dst_ptl; 2363 src_pte = huge_pte_offset(src, addr); 2364 if (!src_pte) 2365 continue; 2366 dst_pte = huge_pte_alloc(dst, addr, sz); 2367 if (!dst_pte) 2368 goto nomem; 2369 2370 /* If the pagetables are shared don't copy or take references */ 2371 if (dst_pte == src_pte) 2372 continue; 2373 2374 dst_ptl = huge_pte_lock(h, dst, dst_pte); 2375 src_ptl = huge_pte_lockptr(h, src, src_pte); 2376 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 2377 if (!huge_pte_none(huge_ptep_get(src_pte))) { 2378 if (cow) 2379 huge_ptep_set_wrprotect(src, addr, src_pte); 2380 entry = huge_ptep_get(src_pte); 2381 ptepage = pte_page(entry); 2382 get_page(ptepage); 2383 page_dup_rmap(ptepage); 2384 set_huge_pte_at(dst, addr, dst_pte, entry); 2385 } 2386 spin_unlock(src_ptl); 2387 spin_unlock(dst_ptl); 2388 } 2389 return 0; 2390 2391 nomem: 2392 return -ENOMEM; 2393 } 2394 2395 static int is_hugetlb_entry_migration(pte_t pte) 2396 { 2397 swp_entry_t swp; 2398 2399 if (huge_pte_none(pte) || pte_present(pte)) 2400 return 0; 2401 swp = pte_to_swp_entry(pte); 2402 if (non_swap_entry(swp) && is_migration_entry(swp)) 2403 return 1; 2404 else 2405 return 0; 2406 } 2407 2408 static int is_hugetlb_entry_hwpoisoned(pte_t pte) 2409 { 2410 swp_entry_t swp; 2411 2412 if (huge_pte_none(pte) || pte_present(pte)) 2413 return 0; 2414 swp = pte_to_swp_entry(pte); 2415 if (non_swap_entry(swp) && is_hwpoison_entry(swp)) 2416 return 1; 2417 else 2418 return 0; 2419 } 2420 2421 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 2422 unsigned long start, unsigned long end, 2423 struct page *ref_page) 2424 { 2425 int force_flush = 0; 2426 struct mm_struct *mm = vma->vm_mm; 2427 unsigned long address; 2428 pte_t *ptep; 2429 pte_t pte; 2430 spinlock_t *ptl; 2431 struct page *page; 2432 struct hstate *h = hstate_vma(vma); 2433 unsigned long sz = huge_page_size(h); 2434 const unsigned long mmun_start = start; /* For mmu_notifiers */ 2435 const unsigned long mmun_end = end; /* For mmu_notifiers */ 2436 2437 WARN_ON(!is_vm_hugetlb_page(vma)); 2438 BUG_ON(start & ~huge_page_mask(h)); 2439 BUG_ON(end & ~huge_page_mask(h)); 2440 2441 tlb_start_vma(tlb, vma); 2442 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2443 again: 2444 for (address = start; address < end; address += sz) { 2445 ptep = huge_pte_offset(mm, address); 2446 if (!ptep) 2447 continue; 2448 2449 ptl = huge_pte_lock(h, mm, ptep); 2450 if (huge_pmd_unshare(mm, &address, ptep)) 2451 goto unlock; 2452 2453 pte = huge_ptep_get(ptep); 2454 if (huge_pte_none(pte)) 2455 goto unlock; 2456 2457 /* 2458 * HWPoisoned hugepage is already unmapped and dropped reference 2459 */ 2460 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 2461 huge_pte_clear(mm, address, ptep); 2462 goto unlock; 2463 } 2464 2465 page = pte_page(pte); 2466 /* 2467 * If a reference page is supplied, it is because a specific 2468 * page is being unmapped, not a range. Ensure the page we 2469 * are about to unmap is the actual page of interest. 2470 */ 2471 if (ref_page) { 2472 if (page != ref_page) 2473 goto unlock; 2474 2475 /* 2476 * Mark the VMA as having unmapped its page so that 2477 * future faults in this VMA will fail rather than 2478 * looking like data was lost 2479 */ 2480 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 2481 } 2482 2483 pte = huge_ptep_get_and_clear(mm, address, ptep); 2484 tlb_remove_tlb_entry(tlb, ptep, address); 2485 if (huge_pte_dirty(pte)) 2486 set_page_dirty(page); 2487 2488 page_remove_rmap(page); 2489 force_flush = !__tlb_remove_page(tlb, page); 2490 if (force_flush) { 2491 spin_unlock(ptl); 2492 break; 2493 } 2494 /* Bail out after unmapping reference page if supplied */ 2495 if (ref_page) { 2496 spin_unlock(ptl); 2497 break; 2498 } 2499 unlock: 2500 spin_unlock(ptl); 2501 } 2502 /* 2503 * mmu_gather ran out of room to batch pages, we break out of 2504 * the PTE lock to avoid doing the potential expensive TLB invalidate 2505 * and page-free while holding it. 2506 */ 2507 if (force_flush) { 2508 force_flush = 0; 2509 tlb_flush_mmu(tlb); 2510 if (address < end && !ref_page) 2511 goto again; 2512 } 2513 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2514 tlb_end_vma(tlb, vma); 2515 } 2516 2517 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 2518 struct vm_area_struct *vma, unsigned long start, 2519 unsigned long end, struct page *ref_page) 2520 { 2521 __unmap_hugepage_range(tlb, vma, start, end, ref_page); 2522 2523 /* 2524 * Clear this flag so that x86's huge_pmd_share page_table_shareable 2525 * test will fail on a vma being torn down, and not grab a page table 2526 * on its way out. We're lucky that the flag has such an appropriate 2527 * name, and can in fact be safely cleared here. We could clear it 2528 * before the __unmap_hugepage_range above, but all that's necessary 2529 * is to clear it before releasing the i_mmap_mutex. This works 2530 * because in the context this is called, the VMA is about to be 2531 * destroyed and the i_mmap_mutex is held. 2532 */ 2533 vma->vm_flags &= ~VM_MAYSHARE; 2534 } 2535 2536 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 2537 unsigned long end, struct page *ref_page) 2538 { 2539 struct mm_struct *mm; 2540 struct mmu_gather tlb; 2541 2542 mm = vma->vm_mm; 2543 2544 tlb_gather_mmu(&tlb, mm, start, end); 2545 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 2546 tlb_finish_mmu(&tlb, start, end); 2547 } 2548 2549 /* 2550 * This is called when the original mapper is failing to COW a MAP_PRIVATE 2551 * mappping it owns the reserve page for. The intention is to unmap the page 2552 * from other VMAs and let the children be SIGKILLed if they are faulting the 2553 * same region. 2554 */ 2555 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 2556 struct page *page, unsigned long address) 2557 { 2558 struct hstate *h = hstate_vma(vma); 2559 struct vm_area_struct *iter_vma; 2560 struct address_space *mapping; 2561 pgoff_t pgoff; 2562 2563 /* 2564 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 2565 * from page cache lookup which is in HPAGE_SIZE units. 2566 */ 2567 address = address & huge_page_mask(h); 2568 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 2569 vma->vm_pgoff; 2570 mapping = file_inode(vma->vm_file)->i_mapping; 2571 2572 /* 2573 * Take the mapping lock for the duration of the table walk. As 2574 * this mapping should be shared between all the VMAs, 2575 * __unmap_hugepage_range() is called as the lock is already held 2576 */ 2577 mutex_lock(&mapping->i_mmap_mutex); 2578 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 2579 /* Do not unmap the current VMA */ 2580 if (iter_vma == vma) 2581 continue; 2582 2583 /* 2584 * Unmap the page from other VMAs without their own reserves. 2585 * They get marked to be SIGKILLed if they fault in these 2586 * areas. This is because a future no-page fault on this VMA 2587 * could insert a zeroed page instead of the data existing 2588 * from the time of fork. This would look like data corruption 2589 */ 2590 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 2591 unmap_hugepage_range(iter_vma, address, 2592 address + huge_page_size(h), page); 2593 } 2594 mutex_unlock(&mapping->i_mmap_mutex); 2595 2596 return 1; 2597 } 2598 2599 /* 2600 * Hugetlb_cow() should be called with page lock of the original hugepage held. 2601 * Called with hugetlb_instantiation_mutex held and pte_page locked so we 2602 * cannot race with other handlers or page migration. 2603 * Keep the pte_same checks anyway to make transition from the mutex easier. 2604 */ 2605 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 2606 unsigned long address, pte_t *ptep, pte_t pte, 2607 struct page *pagecache_page, spinlock_t *ptl) 2608 { 2609 struct hstate *h = hstate_vma(vma); 2610 struct page *old_page, *new_page; 2611 int outside_reserve = 0; 2612 unsigned long mmun_start; /* For mmu_notifiers */ 2613 unsigned long mmun_end; /* For mmu_notifiers */ 2614 2615 old_page = pte_page(pte); 2616 2617 retry_avoidcopy: 2618 /* If no-one else is actually using this page, avoid the copy 2619 * and just make the page writable */ 2620 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 2621 page_move_anon_rmap(old_page, vma, address); 2622 set_huge_ptep_writable(vma, address, ptep); 2623 return 0; 2624 } 2625 2626 /* 2627 * If the process that created a MAP_PRIVATE mapping is about to 2628 * perform a COW due to a shared page count, attempt to satisfy 2629 * the allocation without using the existing reserves. The pagecache 2630 * page is used to determine if the reserve at this address was 2631 * consumed or not. If reserves were used, a partial faulted mapping 2632 * at the time of fork() could consume its reserves on COW instead 2633 * of the full address range. 2634 */ 2635 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 2636 old_page != pagecache_page) 2637 outside_reserve = 1; 2638 2639 page_cache_get(old_page); 2640 2641 /* Drop page table lock as buddy allocator may be called */ 2642 spin_unlock(ptl); 2643 new_page = alloc_huge_page(vma, address, outside_reserve); 2644 2645 if (IS_ERR(new_page)) { 2646 long err = PTR_ERR(new_page); 2647 page_cache_release(old_page); 2648 2649 /* 2650 * If a process owning a MAP_PRIVATE mapping fails to COW, 2651 * it is due to references held by a child and an insufficient 2652 * huge page pool. To guarantee the original mappers 2653 * reliability, unmap the page from child processes. The child 2654 * may get SIGKILLed if it later faults. 2655 */ 2656 if (outside_reserve) { 2657 BUG_ON(huge_pte_none(pte)); 2658 if (unmap_ref_private(mm, vma, old_page, address)) { 2659 BUG_ON(huge_pte_none(pte)); 2660 spin_lock(ptl); 2661 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2662 if (likely(pte_same(huge_ptep_get(ptep), pte))) 2663 goto retry_avoidcopy; 2664 /* 2665 * race occurs while re-acquiring page table 2666 * lock, and our job is done. 2667 */ 2668 return 0; 2669 } 2670 WARN_ON_ONCE(1); 2671 } 2672 2673 /* Caller expects lock to be held */ 2674 spin_lock(ptl); 2675 if (err == -ENOMEM) 2676 return VM_FAULT_OOM; 2677 else 2678 return VM_FAULT_SIGBUS; 2679 } 2680 2681 /* 2682 * When the original hugepage is shared one, it does not have 2683 * anon_vma prepared. 2684 */ 2685 if (unlikely(anon_vma_prepare(vma))) { 2686 page_cache_release(new_page); 2687 page_cache_release(old_page); 2688 /* Caller expects lock to be held */ 2689 spin_lock(ptl); 2690 return VM_FAULT_OOM; 2691 } 2692 2693 copy_user_huge_page(new_page, old_page, address, vma, 2694 pages_per_huge_page(h)); 2695 __SetPageUptodate(new_page); 2696 2697 mmun_start = address & huge_page_mask(h); 2698 mmun_end = mmun_start + huge_page_size(h); 2699 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2700 /* 2701 * Retake the page table lock to check for racing updates 2702 * before the page tables are altered 2703 */ 2704 spin_lock(ptl); 2705 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2706 if (likely(pte_same(huge_ptep_get(ptep), pte))) { 2707 ClearPagePrivate(new_page); 2708 2709 /* Break COW */ 2710 huge_ptep_clear_flush(vma, address, ptep); 2711 set_huge_pte_at(mm, address, ptep, 2712 make_huge_pte(vma, new_page, 1)); 2713 page_remove_rmap(old_page); 2714 hugepage_add_new_anon_rmap(new_page, vma, address); 2715 /* Make the old page be freed below */ 2716 new_page = old_page; 2717 } 2718 spin_unlock(ptl); 2719 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2720 page_cache_release(new_page); 2721 page_cache_release(old_page); 2722 2723 /* Caller expects lock to be held */ 2724 spin_lock(ptl); 2725 return 0; 2726 } 2727 2728 /* Return the pagecache page at a given address within a VMA */ 2729 static struct page *hugetlbfs_pagecache_page(struct hstate *h, 2730 struct vm_area_struct *vma, unsigned long address) 2731 { 2732 struct address_space *mapping; 2733 pgoff_t idx; 2734 2735 mapping = vma->vm_file->f_mapping; 2736 idx = vma_hugecache_offset(h, vma, address); 2737 2738 return find_lock_page(mapping, idx); 2739 } 2740 2741 /* 2742 * Return whether there is a pagecache page to back given address within VMA. 2743 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 2744 */ 2745 static bool hugetlbfs_pagecache_present(struct hstate *h, 2746 struct vm_area_struct *vma, unsigned long address) 2747 { 2748 struct address_space *mapping; 2749 pgoff_t idx; 2750 struct page *page; 2751 2752 mapping = vma->vm_file->f_mapping; 2753 idx = vma_hugecache_offset(h, vma, address); 2754 2755 page = find_get_page(mapping, idx); 2756 if (page) 2757 put_page(page); 2758 return page != NULL; 2759 } 2760 2761 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 2762 unsigned long address, pte_t *ptep, unsigned int flags) 2763 { 2764 struct hstate *h = hstate_vma(vma); 2765 int ret = VM_FAULT_SIGBUS; 2766 int anon_rmap = 0; 2767 pgoff_t idx; 2768 unsigned long size; 2769 struct page *page; 2770 struct address_space *mapping; 2771 pte_t new_pte; 2772 spinlock_t *ptl; 2773 2774 /* 2775 * Currently, we are forced to kill the process in the event the 2776 * original mapper has unmapped pages from the child due to a failed 2777 * COW. Warn that such a situation has occurred as it may not be obvious 2778 */ 2779 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 2780 pr_warning("PID %d killed due to inadequate hugepage pool\n", 2781 current->pid); 2782 return ret; 2783 } 2784 2785 mapping = vma->vm_file->f_mapping; 2786 idx = vma_hugecache_offset(h, vma, address); 2787 2788 /* 2789 * Use page lock to guard against racing truncation 2790 * before we get page_table_lock. 2791 */ 2792 retry: 2793 page = find_lock_page(mapping, idx); 2794 if (!page) { 2795 size = i_size_read(mapping->host) >> huge_page_shift(h); 2796 if (idx >= size) 2797 goto out; 2798 page = alloc_huge_page(vma, address, 0); 2799 if (IS_ERR(page)) { 2800 ret = PTR_ERR(page); 2801 if (ret == -ENOMEM) 2802 ret = VM_FAULT_OOM; 2803 else 2804 ret = VM_FAULT_SIGBUS; 2805 goto out; 2806 } 2807 clear_huge_page(page, address, pages_per_huge_page(h)); 2808 __SetPageUptodate(page); 2809 2810 if (vma->vm_flags & VM_MAYSHARE) { 2811 int err; 2812 struct inode *inode = mapping->host; 2813 2814 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 2815 if (err) { 2816 put_page(page); 2817 if (err == -EEXIST) 2818 goto retry; 2819 goto out; 2820 } 2821 ClearPagePrivate(page); 2822 2823 spin_lock(&inode->i_lock); 2824 inode->i_blocks += blocks_per_huge_page(h); 2825 spin_unlock(&inode->i_lock); 2826 } else { 2827 lock_page(page); 2828 if (unlikely(anon_vma_prepare(vma))) { 2829 ret = VM_FAULT_OOM; 2830 goto backout_unlocked; 2831 } 2832 anon_rmap = 1; 2833 } 2834 } else { 2835 /* 2836 * If memory error occurs between mmap() and fault, some process 2837 * don't have hwpoisoned swap entry for errored virtual address. 2838 * So we need to block hugepage fault by PG_hwpoison bit check. 2839 */ 2840 if (unlikely(PageHWPoison(page))) { 2841 ret = VM_FAULT_HWPOISON | 2842 VM_FAULT_SET_HINDEX(hstate_index(h)); 2843 goto backout_unlocked; 2844 } 2845 } 2846 2847 /* 2848 * If we are going to COW a private mapping later, we examine the 2849 * pending reservations for this page now. This will ensure that 2850 * any allocations necessary to record that reservation occur outside 2851 * the spinlock. 2852 */ 2853 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) 2854 if (vma_needs_reservation(h, vma, address) < 0) { 2855 ret = VM_FAULT_OOM; 2856 goto backout_unlocked; 2857 } 2858 2859 ptl = huge_pte_lockptr(h, mm, ptep); 2860 spin_lock(ptl); 2861 size = i_size_read(mapping->host) >> huge_page_shift(h); 2862 if (idx >= size) 2863 goto backout; 2864 2865 ret = 0; 2866 if (!huge_pte_none(huge_ptep_get(ptep))) 2867 goto backout; 2868 2869 if (anon_rmap) { 2870 ClearPagePrivate(page); 2871 hugepage_add_new_anon_rmap(page, vma, address); 2872 } 2873 else 2874 page_dup_rmap(page); 2875 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 2876 && (vma->vm_flags & VM_SHARED))); 2877 set_huge_pte_at(mm, address, ptep, new_pte); 2878 2879 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 2880 /* Optimization, do the COW without a second fault */ 2881 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); 2882 } 2883 2884 spin_unlock(ptl); 2885 unlock_page(page); 2886 out: 2887 return ret; 2888 2889 backout: 2890 spin_unlock(ptl); 2891 backout_unlocked: 2892 unlock_page(page); 2893 put_page(page); 2894 goto out; 2895 } 2896 2897 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2898 unsigned long address, unsigned int flags) 2899 { 2900 pte_t *ptep; 2901 pte_t entry; 2902 spinlock_t *ptl; 2903 int ret; 2904 struct page *page = NULL; 2905 struct page *pagecache_page = NULL; 2906 static DEFINE_MUTEX(hugetlb_instantiation_mutex); 2907 struct hstate *h = hstate_vma(vma); 2908 2909 address &= huge_page_mask(h); 2910 2911 ptep = huge_pte_offset(mm, address); 2912 if (ptep) { 2913 entry = huge_ptep_get(ptep); 2914 if (unlikely(is_hugetlb_entry_migration(entry))) { 2915 migration_entry_wait_huge(vma, mm, ptep); 2916 return 0; 2917 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2918 return VM_FAULT_HWPOISON_LARGE | 2919 VM_FAULT_SET_HINDEX(hstate_index(h)); 2920 } 2921 2922 ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 2923 if (!ptep) 2924 return VM_FAULT_OOM; 2925 2926 /* 2927 * Serialize hugepage allocation and instantiation, so that we don't 2928 * get spurious allocation failures if two CPUs race to instantiate 2929 * the same page in the page cache. 2930 */ 2931 mutex_lock(&hugetlb_instantiation_mutex); 2932 entry = huge_ptep_get(ptep); 2933 if (huge_pte_none(entry)) { 2934 ret = hugetlb_no_page(mm, vma, address, ptep, flags); 2935 goto out_mutex; 2936 } 2937 2938 ret = 0; 2939 2940 /* 2941 * If we are going to COW the mapping later, we examine the pending 2942 * reservations for this page now. This will ensure that any 2943 * allocations necessary to record that reservation occur outside the 2944 * spinlock. For private mappings, we also lookup the pagecache 2945 * page now as it is used to determine if a reservation has been 2946 * consumed. 2947 */ 2948 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 2949 if (vma_needs_reservation(h, vma, address) < 0) { 2950 ret = VM_FAULT_OOM; 2951 goto out_mutex; 2952 } 2953 2954 if (!(vma->vm_flags & VM_MAYSHARE)) 2955 pagecache_page = hugetlbfs_pagecache_page(h, 2956 vma, address); 2957 } 2958 2959 /* 2960 * hugetlb_cow() requires page locks of pte_page(entry) and 2961 * pagecache_page, so here we need take the former one 2962 * when page != pagecache_page or !pagecache_page. 2963 * Note that locking order is always pagecache_page -> page, 2964 * so no worry about deadlock. 2965 */ 2966 page = pte_page(entry); 2967 get_page(page); 2968 if (page != pagecache_page) 2969 lock_page(page); 2970 2971 ptl = huge_pte_lockptr(h, mm, ptep); 2972 spin_lock(ptl); 2973 /* Check for a racing update before calling hugetlb_cow */ 2974 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 2975 goto out_ptl; 2976 2977 2978 if (flags & FAULT_FLAG_WRITE) { 2979 if (!huge_pte_write(entry)) { 2980 ret = hugetlb_cow(mm, vma, address, ptep, entry, 2981 pagecache_page, ptl); 2982 goto out_ptl; 2983 } 2984 entry = huge_pte_mkdirty(entry); 2985 } 2986 entry = pte_mkyoung(entry); 2987 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 2988 flags & FAULT_FLAG_WRITE)) 2989 update_mmu_cache(vma, address, ptep); 2990 2991 out_ptl: 2992 spin_unlock(ptl); 2993 2994 if (pagecache_page) { 2995 unlock_page(pagecache_page); 2996 put_page(pagecache_page); 2997 } 2998 if (page != pagecache_page) 2999 unlock_page(page); 3000 put_page(page); 3001 3002 out_mutex: 3003 mutex_unlock(&hugetlb_instantiation_mutex); 3004 3005 return ret; 3006 } 3007 3008 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 3009 struct page **pages, struct vm_area_struct **vmas, 3010 unsigned long *position, unsigned long *nr_pages, 3011 long i, unsigned int flags) 3012 { 3013 unsigned long pfn_offset; 3014 unsigned long vaddr = *position; 3015 unsigned long remainder = *nr_pages; 3016 struct hstate *h = hstate_vma(vma); 3017 3018 while (vaddr < vma->vm_end && remainder) { 3019 pte_t *pte; 3020 spinlock_t *ptl = NULL; 3021 int absent; 3022 struct page *page; 3023 3024 /* 3025 * Some archs (sparc64, sh*) have multiple pte_ts to 3026 * each hugepage. We have to make sure we get the 3027 * first, for the page indexing below to work. 3028 * 3029 * Note that page table lock is not held when pte is null. 3030 */ 3031 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 3032 if (pte) 3033 ptl = huge_pte_lock(h, mm, pte); 3034 absent = !pte || huge_pte_none(huge_ptep_get(pte)); 3035 3036 /* 3037 * When coredumping, it suits get_dump_page if we just return 3038 * an error where there's an empty slot with no huge pagecache 3039 * to back it. This way, we avoid allocating a hugepage, and 3040 * the sparse dumpfile avoids allocating disk blocks, but its 3041 * huge holes still show up with zeroes where they need to be. 3042 */ 3043 if (absent && (flags & FOLL_DUMP) && 3044 !hugetlbfs_pagecache_present(h, vma, vaddr)) { 3045 if (pte) 3046 spin_unlock(ptl); 3047 remainder = 0; 3048 break; 3049 } 3050 3051 /* 3052 * We need call hugetlb_fault for both hugepages under migration 3053 * (in which case hugetlb_fault waits for the migration,) and 3054 * hwpoisoned hugepages (in which case we need to prevent the 3055 * caller from accessing to them.) In order to do this, we use 3056 * here is_swap_pte instead of is_hugetlb_entry_migration and 3057 * is_hugetlb_entry_hwpoisoned. This is because it simply covers 3058 * both cases, and because we can't follow correct pages 3059 * directly from any kind of swap entries. 3060 */ 3061 if (absent || is_swap_pte(huge_ptep_get(pte)) || 3062 ((flags & FOLL_WRITE) && 3063 !huge_pte_write(huge_ptep_get(pte)))) { 3064 int ret; 3065 3066 if (pte) 3067 spin_unlock(ptl); 3068 ret = hugetlb_fault(mm, vma, vaddr, 3069 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); 3070 if (!(ret & VM_FAULT_ERROR)) 3071 continue; 3072 3073 remainder = 0; 3074 break; 3075 } 3076 3077 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 3078 page = pte_page(huge_ptep_get(pte)); 3079 same_page: 3080 if (pages) { 3081 pages[i] = mem_map_offset(page, pfn_offset); 3082 get_page(pages[i]); 3083 } 3084 3085 if (vmas) 3086 vmas[i] = vma; 3087 3088 vaddr += PAGE_SIZE; 3089 ++pfn_offset; 3090 --remainder; 3091 ++i; 3092 if (vaddr < vma->vm_end && remainder && 3093 pfn_offset < pages_per_huge_page(h)) { 3094 /* 3095 * We use pfn_offset to avoid touching the pageframes 3096 * of this compound page. 3097 */ 3098 goto same_page; 3099 } 3100 spin_unlock(ptl); 3101 } 3102 *nr_pages = remainder; 3103 *position = vaddr; 3104 3105 return i ? i : -EFAULT; 3106 } 3107 3108 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 3109 unsigned long address, unsigned long end, pgprot_t newprot) 3110 { 3111 struct mm_struct *mm = vma->vm_mm; 3112 unsigned long start = address; 3113 pte_t *ptep; 3114 pte_t pte; 3115 struct hstate *h = hstate_vma(vma); 3116 unsigned long pages = 0; 3117 3118 BUG_ON(address >= end); 3119 flush_cache_range(vma, address, end); 3120 3121 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 3122 for (; address < end; address += huge_page_size(h)) { 3123 spinlock_t *ptl; 3124 ptep = huge_pte_offset(mm, address); 3125 if (!ptep) 3126 continue; 3127 ptl = huge_pte_lock(h, mm, ptep); 3128 if (huge_pmd_unshare(mm, &address, ptep)) { 3129 pages++; 3130 spin_unlock(ptl); 3131 continue; 3132 } 3133 if (!huge_pte_none(huge_ptep_get(ptep))) { 3134 pte = huge_ptep_get_and_clear(mm, address, ptep); 3135 pte = pte_mkhuge(huge_pte_modify(pte, newprot)); 3136 pte = arch_make_huge_pte(pte, vma, NULL, 0); 3137 set_huge_pte_at(mm, address, ptep, pte); 3138 pages++; 3139 } 3140 spin_unlock(ptl); 3141 } 3142 /* 3143 * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare 3144 * may have cleared our pud entry and done put_page on the page table: 3145 * once we release i_mmap_mutex, another task can do the final put_page 3146 * and that page table be reused and filled with junk. 3147 */ 3148 flush_tlb_range(vma, start, end); 3149 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 3150 3151 return pages << h->order; 3152 } 3153 3154 int hugetlb_reserve_pages(struct inode *inode, 3155 long from, long to, 3156 struct vm_area_struct *vma, 3157 vm_flags_t vm_flags) 3158 { 3159 long ret, chg; 3160 struct hstate *h = hstate_inode(inode); 3161 struct hugepage_subpool *spool = subpool_inode(inode); 3162 3163 /* 3164 * Only apply hugepage reservation if asked. At fault time, an 3165 * attempt will be made for VM_NORESERVE to allocate a page 3166 * without using reserves 3167 */ 3168 if (vm_flags & VM_NORESERVE) 3169 return 0; 3170 3171 /* 3172 * Shared mappings base their reservation on the number of pages that 3173 * are already allocated on behalf of the file. Private mappings need 3174 * to reserve the full area even if read-only as mprotect() may be 3175 * called to make the mapping read-write. Assume !vma is a shm mapping 3176 */ 3177 if (!vma || vma->vm_flags & VM_MAYSHARE) 3178 chg = region_chg(&inode->i_mapping->private_list, from, to); 3179 else { 3180 struct resv_map *resv_map = resv_map_alloc(); 3181 if (!resv_map) 3182 return -ENOMEM; 3183 3184 chg = to - from; 3185 3186 set_vma_resv_map(vma, resv_map); 3187 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 3188 } 3189 3190 if (chg < 0) { 3191 ret = chg; 3192 goto out_err; 3193 } 3194 3195 /* There must be enough pages in the subpool for the mapping */ 3196 if (hugepage_subpool_get_pages(spool, chg)) { 3197 ret = -ENOSPC; 3198 goto out_err; 3199 } 3200 3201 /* 3202 * Check enough hugepages are available for the reservation. 3203 * Hand the pages back to the subpool if there are not 3204 */ 3205 ret = hugetlb_acct_memory(h, chg); 3206 if (ret < 0) { 3207 hugepage_subpool_put_pages(spool, chg); 3208 goto out_err; 3209 } 3210 3211 /* 3212 * Account for the reservations made. Shared mappings record regions 3213 * that have reservations as they are shared by multiple VMAs. 3214 * When the last VMA disappears, the region map says how much 3215 * the reservation was and the page cache tells how much of 3216 * the reservation was consumed. Private mappings are per-VMA and 3217 * only the consumed reservations are tracked. When the VMA 3218 * disappears, the original reservation is the VMA size and the 3219 * consumed reservations are stored in the map. Hence, nothing 3220 * else has to be done for private mappings here 3221 */ 3222 if (!vma || vma->vm_flags & VM_MAYSHARE) 3223 region_add(&inode->i_mapping->private_list, from, to); 3224 return 0; 3225 out_err: 3226 if (vma) 3227 resv_map_put(vma); 3228 return ret; 3229 } 3230 3231 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 3232 { 3233 struct hstate *h = hstate_inode(inode); 3234 long chg = region_truncate(&inode->i_mapping->private_list, offset); 3235 struct hugepage_subpool *spool = subpool_inode(inode); 3236 3237 spin_lock(&inode->i_lock); 3238 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 3239 spin_unlock(&inode->i_lock); 3240 3241 hugepage_subpool_put_pages(spool, (chg - freed)); 3242 hugetlb_acct_memory(h, -(chg - freed)); 3243 } 3244 3245 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 3246 static unsigned long page_table_shareable(struct vm_area_struct *svma, 3247 struct vm_area_struct *vma, 3248 unsigned long addr, pgoff_t idx) 3249 { 3250 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 3251 svma->vm_start; 3252 unsigned long sbase = saddr & PUD_MASK; 3253 unsigned long s_end = sbase + PUD_SIZE; 3254 3255 /* Allow segments to share if only one is marked locked */ 3256 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; 3257 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED; 3258 3259 /* 3260 * match the virtual addresses, permission and the alignment of the 3261 * page table page. 3262 */ 3263 if (pmd_index(addr) != pmd_index(saddr) || 3264 vm_flags != svm_flags || 3265 sbase < svma->vm_start || svma->vm_end < s_end) 3266 return 0; 3267 3268 return saddr; 3269 } 3270 3271 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) 3272 { 3273 unsigned long base = addr & PUD_MASK; 3274 unsigned long end = base + PUD_SIZE; 3275 3276 /* 3277 * check on proper vm_flags and page table alignment 3278 */ 3279 if (vma->vm_flags & VM_MAYSHARE && 3280 vma->vm_start <= base && end <= vma->vm_end) 3281 return 1; 3282 return 0; 3283 } 3284 3285 /* 3286 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 3287 * and returns the corresponding pte. While this is not necessary for the 3288 * !shared pmd case because we can allocate the pmd later as well, it makes the 3289 * code much cleaner. pmd allocation is essential for the shared case because 3290 * pud has to be populated inside the same i_mmap_mutex section - otherwise 3291 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 3292 * bad pmd for sharing. 3293 */ 3294 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 3295 { 3296 struct vm_area_struct *vma = find_vma(mm, addr); 3297 struct address_space *mapping = vma->vm_file->f_mapping; 3298 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 3299 vma->vm_pgoff; 3300 struct vm_area_struct *svma; 3301 unsigned long saddr; 3302 pte_t *spte = NULL; 3303 pte_t *pte; 3304 spinlock_t *ptl; 3305 3306 if (!vma_shareable(vma, addr)) 3307 return (pte_t *)pmd_alloc(mm, pud, addr); 3308 3309 mutex_lock(&mapping->i_mmap_mutex); 3310 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 3311 if (svma == vma) 3312 continue; 3313 3314 saddr = page_table_shareable(svma, vma, addr, idx); 3315 if (saddr) { 3316 spte = huge_pte_offset(svma->vm_mm, saddr); 3317 if (spte) { 3318 get_page(virt_to_page(spte)); 3319 break; 3320 } 3321 } 3322 } 3323 3324 if (!spte) 3325 goto out; 3326 3327 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); 3328 spin_lock(ptl); 3329 if (pud_none(*pud)) 3330 pud_populate(mm, pud, 3331 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 3332 else 3333 put_page(virt_to_page(spte)); 3334 spin_unlock(ptl); 3335 out: 3336 pte = (pte_t *)pmd_alloc(mm, pud, addr); 3337 mutex_unlock(&mapping->i_mmap_mutex); 3338 return pte; 3339 } 3340 3341 /* 3342 * unmap huge page backed by shared pte. 3343 * 3344 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 3345 * indicated by page_count > 1, unmap is achieved by clearing pud and 3346 * decrementing the ref count. If count == 1, the pte page is not shared. 3347 * 3348 * called with page table lock held. 3349 * 3350 * returns: 1 successfully unmapped a shared pte page 3351 * 0 the underlying pte page is not shared, or it is the last user 3352 */ 3353 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 3354 { 3355 pgd_t *pgd = pgd_offset(mm, *addr); 3356 pud_t *pud = pud_offset(pgd, *addr); 3357 3358 BUG_ON(page_count(virt_to_page(ptep)) == 0); 3359 if (page_count(virt_to_page(ptep)) == 1) 3360 return 0; 3361 3362 pud_clear(pud); 3363 put_page(virt_to_page(ptep)); 3364 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 3365 return 1; 3366 } 3367 #define want_pmd_share() (1) 3368 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 3369 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 3370 { 3371 return NULL; 3372 } 3373 #define want_pmd_share() (0) 3374 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 3375 3376 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 3377 pte_t *huge_pte_alloc(struct mm_struct *mm, 3378 unsigned long addr, unsigned long sz) 3379 { 3380 pgd_t *pgd; 3381 pud_t *pud; 3382 pte_t *pte = NULL; 3383 3384 pgd = pgd_offset(mm, addr); 3385 pud = pud_alloc(mm, pgd, addr); 3386 if (pud) { 3387 if (sz == PUD_SIZE) { 3388 pte = (pte_t *)pud; 3389 } else { 3390 BUG_ON(sz != PMD_SIZE); 3391 if (want_pmd_share() && pud_none(*pud)) 3392 pte = huge_pmd_share(mm, addr, pud); 3393 else 3394 pte = (pte_t *)pmd_alloc(mm, pud, addr); 3395 } 3396 } 3397 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 3398 3399 return pte; 3400 } 3401 3402 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 3403 { 3404 pgd_t *pgd; 3405 pud_t *pud; 3406 pmd_t *pmd = NULL; 3407 3408 pgd = pgd_offset(mm, addr); 3409 if (pgd_present(*pgd)) { 3410 pud = pud_offset(pgd, addr); 3411 if (pud_present(*pud)) { 3412 if (pud_huge(*pud)) 3413 return (pte_t *)pud; 3414 pmd = pmd_offset(pud, addr); 3415 } 3416 } 3417 return (pte_t *) pmd; 3418 } 3419 3420 struct page * 3421 follow_huge_pmd(struct mm_struct *mm, unsigned long address, 3422 pmd_t *pmd, int write) 3423 { 3424 struct page *page; 3425 3426 page = pte_page(*(pte_t *)pmd); 3427 if (page) 3428 page += ((address & ~PMD_MASK) >> PAGE_SHIFT); 3429 return page; 3430 } 3431 3432 struct page * 3433 follow_huge_pud(struct mm_struct *mm, unsigned long address, 3434 pud_t *pud, int write) 3435 { 3436 struct page *page; 3437 3438 page = pte_page(*(pte_t *)pud); 3439 if (page) 3440 page += ((address & ~PUD_MASK) >> PAGE_SHIFT); 3441 return page; 3442 } 3443 3444 #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 3445 3446 /* Can be overriden by architectures */ 3447 __attribute__((weak)) struct page * 3448 follow_huge_pud(struct mm_struct *mm, unsigned long address, 3449 pud_t *pud, int write) 3450 { 3451 BUG(); 3452 return NULL; 3453 } 3454 3455 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 3456 3457 #ifdef CONFIG_MEMORY_FAILURE 3458 3459 /* Should be called in hugetlb_lock */ 3460 static int is_hugepage_on_freelist(struct page *hpage) 3461 { 3462 struct page *page; 3463 struct page *tmp; 3464 struct hstate *h = page_hstate(hpage); 3465 int nid = page_to_nid(hpage); 3466 3467 list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru) 3468 if (page == hpage) 3469 return 1; 3470 return 0; 3471 } 3472 3473 /* 3474 * This function is called from memory failure code. 3475 * Assume the caller holds page lock of the head page. 3476 */ 3477 int dequeue_hwpoisoned_huge_page(struct page *hpage) 3478 { 3479 struct hstate *h = page_hstate(hpage); 3480 int nid = page_to_nid(hpage); 3481 int ret = -EBUSY; 3482 3483 spin_lock(&hugetlb_lock); 3484 if (is_hugepage_on_freelist(hpage)) { 3485 /* 3486 * Hwpoisoned hugepage isn't linked to activelist or freelist, 3487 * but dangling hpage->lru can trigger list-debug warnings 3488 * (this happens when we call unpoison_memory() on it), 3489 * so let it point to itself with list_del_init(). 3490 */ 3491 list_del_init(&hpage->lru); 3492 set_page_refcounted(hpage); 3493 h->free_huge_pages--; 3494 h->free_huge_pages_node[nid]--; 3495 ret = 0; 3496 } 3497 spin_unlock(&hugetlb_lock); 3498 return ret; 3499 } 3500 #endif 3501 3502 bool isolate_huge_page(struct page *page, struct list_head *list) 3503 { 3504 VM_BUG_ON(!PageHead(page)); 3505 if (!get_page_unless_zero(page)) 3506 return false; 3507 spin_lock(&hugetlb_lock); 3508 list_move_tail(&page->lru, list); 3509 spin_unlock(&hugetlb_lock); 3510 return true; 3511 } 3512 3513 void putback_active_hugepage(struct page *page) 3514 { 3515 VM_BUG_ON(!PageHead(page)); 3516 spin_lock(&hugetlb_lock); 3517 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 3518 spin_unlock(&hugetlb_lock); 3519 put_page(page); 3520 } 3521 3522 bool is_hugepage_active(struct page *page) 3523 { 3524 VM_BUG_ON(!PageHuge(page)); 3525 /* 3526 * This function can be called for a tail page because the caller, 3527 * scan_movable_pages, scans through a given pfn-range which typically 3528 * covers one memory block. In systems using gigantic hugepage (1GB 3529 * for x86_64,) a hugepage is larger than a memory block, and we don't 3530 * support migrating such large hugepages for now, so return false 3531 * when called for tail pages. 3532 */ 3533 if (PageTail(page)) 3534 return false; 3535 /* 3536 * Refcount of a hwpoisoned hugepages is 1, but they are not active, 3537 * so we should return false for them. 3538 */ 3539 if (unlikely(PageHWPoison(page))) 3540 return false; 3541 return page_count(page) > 0; 3542 } 3543