1 /* 2 * Generic hugetlb support. 3 * (C) Nadia Yvette Chambers, April 2004 4 */ 5 #include <linux/list.h> 6 #include <linux/init.h> 7 #include <linux/mm.h> 8 #include <linux/seq_file.h> 9 #include <linux/sysctl.h> 10 #include <linux/highmem.h> 11 #include <linux/mmu_notifier.h> 12 #include <linux/nodemask.h> 13 #include <linux/pagemap.h> 14 #include <linux/mempolicy.h> 15 #include <linux/compiler.h> 16 #include <linux/cpuset.h> 17 #include <linux/mutex.h> 18 #include <linux/bootmem.h> 19 #include <linux/sysfs.h> 20 #include <linux/slab.h> 21 #include <linux/rmap.h> 22 #include <linux/swap.h> 23 #include <linux/swapops.h> 24 #include <linux/page-isolation.h> 25 #include <linux/jhash.h> 26 27 #include <asm/page.h> 28 #include <asm/pgtable.h> 29 #include <asm/tlb.h> 30 31 #include <linux/io.h> 32 #include <linux/hugetlb.h> 33 #include <linux/hugetlb_cgroup.h> 34 #include <linux/node.h> 35 #include "internal.h" 36 37 int hugepages_treat_as_movable; 38 39 int hugetlb_max_hstate __read_mostly; 40 unsigned int default_hstate_idx; 41 struct hstate hstates[HUGE_MAX_HSTATE]; 42 /* 43 * Minimum page order among possible hugepage sizes, set to a proper value 44 * at boot time. 45 */ 46 static unsigned int minimum_order __read_mostly = UINT_MAX; 47 48 __initdata LIST_HEAD(huge_boot_pages); 49 50 /* for command line parsing */ 51 static struct hstate * __initdata parsed_hstate; 52 static unsigned long __initdata default_hstate_max_huge_pages; 53 static unsigned long __initdata default_hstate_size; 54 static bool __initdata parsed_valid_hugepagesz = true; 55 56 /* 57 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 58 * free_huge_pages, and surplus_huge_pages. 59 */ 60 DEFINE_SPINLOCK(hugetlb_lock); 61 62 /* 63 * Serializes faults on the same logical page. This is used to 64 * prevent spurious OOMs when the hugepage pool is fully utilized. 65 */ 66 static int num_fault_mutexes; 67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; 68 69 /* Forward declaration */ 70 static int hugetlb_acct_memory(struct hstate *h, long delta); 71 72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) 73 { 74 bool free = (spool->count == 0) && (spool->used_hpages == 0); 75 76 spin_unlock(&spool->lock); 77 78 /* If no pages are used, and no other handles to the subpool 79 * remain, give up any reservations mased on minimum size and 80 * free the subpool */ 81 if (free) { 82 if (spool->min_hpages != -1) 83 hugetlb_acct_memory(spool->hstate, 84 -spool->min_hpages); 85 kfree(spool); 86 } 87 } 88 89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 90 long min_hpages) 91 { 92 struct hugepage_subpool *spool; 93 94 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 95 if (!spool) 96 return NULL; 97 98 spin_lock_init(&spool->lock); 99 spool->count = 1; 100 spool->max_hpages = max_hpages; 101 spool->hstate = h; 102 spool->min_hpages = min_hpages; 103 104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 105 kfree(spool); 106 return NULL; 107 } 108 spool->rsv_hpages = min_hpages; 109 110 return spool; 111 } 112 113 void hugepage_put_subpool(struct hugepage_subpool *spool) 114 { 115 spin_lock(&spool->lock); 116 BUG_ON(!spool->count); 117 spool->count--; 118 unlock_or_release_subpool(spool); 119 } 120 121 /* 122 * Subpool accounting for allocating and reserving pages. 123 * Return -ENOMEM if there are not enough resources to satisfy the 124 * the request. Otherwise, return the number of pages by which the 125 * global pools must be adjusted (upward). The returned value may 126 * only be different than the passed value (delta) in the case where 127 * a subpool minimum size must be manitained. 128 */ 129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 130 long delta) 131 { 132 long ret = delta; 133 134 if (!spool) 135 return ret; 136 137 spin_lock(&spool->lock); 138 139 if (spool->max_hpages != -1) { /* maximum size accounting */ 140 if ((spool->used_hpages + delta) <= spool->max_hpages) 141 spool->used_hpages += delta; 142 else { 143 ret = -ENOMEM; 144 goto unlock_ret; 145 } 146 } 147 148 /* minimum size accounting */ 149 if (spool->min_hpages != -1 && spool->rsv_hpages) { 150 if (delta > spool->rsv_hpages) { 151 /* 152 * Asking for more reserves than those already taken on 153 * behalf of subpool. Return difference. 154 */ 155 ret = delta - spool->rsv_hpages; 156 spool->rsv_hpages = 0; 157 } else { 158 ret = 0; /* reserves already accounted for */ 159 spool->rsv_hpages -= delta; 160 } 161 } 162 163 unlock_ret: 164 spin_unlock(&spool->lock); 165 return ret; 166 } 167 168 /* 169 * Subpool accounting for freeing and unreserving pages. 170 * Return the number of global page reservations that must be dropped. 171 * The return value may only be different than the passed value (delta) 172 * in the case where a subpool minimum size must be maintained. 173 */ 174 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 175 long delta) 176 { 177 long ret = delta; 178 179 if (!spool) 180 return delta; 181 182 spin_lock(&spool->lock); 183 184 if (spool->max_hpages != -1) /* maximum size accounting */ 185 spool->used_hpages -= delta; 186 187 /* minimum size accounting */ 188 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 189 if (spool->rsv_hpages + delta <= spool->min_hpages) 190 ret = 0; 191 else 192 ret = spool->rsv_hpages + delta - spool->min_hpages; 193 194 spool->rsv_hpages += delta; 195 if (spool->rsv_hpages > spool->min_hpages) 196 spool->rsv_hpages = spool->min_hpages; 197 } 198 199 /* 200 * If hugetlbfs_put_super couldn't free spool due to an outstanding 201 * quota reference, free it now. 202 */ 203 unlock_or_release_subpool(spool); 204 205 return ret; 206 } 207 208 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 209 { 210 return HUGETLBFS_SB(inode->i_sb)->spool; 211 } 212 213 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 214 { 215 return subpool_inode(file_inode(vma->vm_file)); 216 } 217 218 /* 219 * Region tracking -- allows tracking of reservations and instantiated pages 220 * across the pages in a mapping. 221 * 222 * The region data structures are embedded into a resv_map and protected 223 * by a resv_map's lock. The set of regions within the resv_map represent 224 * reservations for huge pages, or huge pages that have already been 225 * instantiated within the map. The from and to elements are huge page 226 * indicies into the associated mapping. from indicates the starting index 227 * of the region. to represents the first index past the end of the region. 228 * 229 * For example, a file region structure with from == 0 and to == 4 represents 230 * four huge pages in a mapping. It is important to note that the to element 231 * represents the first element past the end of the region. This is used in 232 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 233 * 234 * Interval notation of the form [from, to) will be used to indicate that 235 * the endpoint from is inclusive and to is exclusive. 236 */ 237 struct file_region { 238 struct list_head link; 239 long from; 240 long to; 241 }; 242 243 /* 244 * Add the huge page range represented by [f, t) to the reserve 245 * map. In the normal case, existing regions will be expanded 246 * to accommodate the specified range. Sufficient regions should 247 * exist for expansion due to the previous call to region_chg 248 * with the same range. However, it is possible that region_del 249 * could have been called after region_chg and modifed the map 250 * in such a way that no region exists to be expanded. In this 251 * case, pull a region descriptor from the cache associated with 252 * the map and use that for the new range. 253 * 254 * Return the number of new huge pages added to the map. This 255 * number is greater than or equal to zero. 256 */ 257 static long region_add(struct resv_map *resv, long f, long t) 258 { 259 struct list_head *head = &resv->regions; 260 struct file_region *rg, *nrg, *trg; 261 long add = 0; 262 263 spin_lock(&resv->lock); 264 /* Locate the region we are either in or before. */ 265 list_for_each_entry(rg, head, link) 266 if (f <= rg->to) 267 break; 268 269 /* 270 * If no region exists which can be expanded to include the 271 * specified range, the list must have been modified by an 272 * interleving call to region_del(). Pull a region descriptor 273 * from the cache and use it for this range. 274 */ 275 if (&rg->link == head || t < rg->from) { 276 VM_BUG_ON(resv->region_cache_count <= 0); 277 278 resv->region_cache_count--; 279 nrg = list_first_entry(&resv->region_cache, struct file_region, 280 link); 281 list_del(&nrg->link); 282 283 nrg->from = f; 284 nrg->to = t; 285 list_add(&nrg->link, rg->link.prev); 286 287 add += t - f; 288 goto out_locked; 289 } 290 291 /* Round our left edge to the current segment if it encloses us. */ 292 if (f > rg->from) 293 f = rg->from; 294 295 /* Check for and consume any regions we now overlap with. */ 296 nrg = rg; 297 list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 298 if (&rg->link == head) 299 break; 300 if (rg->from > t) 301 break; 302 303 /* If this area reaches higher then extend our area to 304 * include it completely. If this is not the first area 305 * which we intend to reuse, free it. */ 306 if (rg->to > t) 307 t = rg->to; 308 if (rg != nrg) { 309 /* Decrement return value by the deleted range. 310 * Another range will span this area so that by 311 * end of routine add will be >= zero 312 */ 313 add -= (rg->to - rg->from); 314 list_del(&rg->link); 315 kfree(rg); 316 } 317 } 318 319 add += (nrg->from - f); /* Added to beginning of region */ 320 nrg->from = f; 321 add += t - nrg->to; /* Added to end of region */ 322 nrg->to = t; 323 324 out_locked: 325 resv->adds_in_progress--; 326 spin_unlock(&resv->lock); 327 VM_BUG_ON(add < 0); 328 return add; 329 } 330 331 /* 332 * Examine the existing reserve map and determine how many 333 * huge pages in the specified range [f, t) are NOT currently 334 * represented. This routine is called before a subsequent 335 * call to region_add that will actually modify the reserve 336 * map to add the specified range [f, t). region_chg does 337 * not change the number of huge pages represented by the 338 * map. However, if the existing regions in the map can not 339 * be expanded to represent the new range, a new file_region 340 * structure is added to the map as a placeholder. This is 341 * so that the subsequent region_add call will have all the 342 * regions it needs and will not fail. 343 * 344 * Upon entry, region_chg will also examine the cache of region descriptors 345 * associated with the map. If there are not enough descriptors cached, one 346 * will be allocated for the in progress add operation. 347 * 348 * Returns the number of huge pages that need to be added to the existing 349 * reservation map for the range [f, t). This number is greater or equal to 350 * zero. -ENOMEM is returned if a new file_region structure or cache entry 351 * is needed and can not be allocated. 352 */ 353 static long region_chg(struct resv_map *resv, long f, long t) 354 { 355 struct list_head *head = &resv->regions; 356 struct file_region *rg, *nrg = NULL; 357 long chg = 0; 358 359 retry: 360 spin_lock(&resv->lock); 361 retry_locked: 362 resv->adds_in_progress++; 363 364 /* 365 * Check for sufficient descriptors in the cache to accommodate 366 * the number of in progress add operations. 367 */ 368 if (resv->adds_in_progress > resv->region_cache_count) { 369 struct file_region *trg; 370 371 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1); 372 /* Must drop lock to allocate a new descriptor. */ 373 resv->adds_in_progress--; 374 spin_unlock(&resv->lock); 375 376 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 377 if (!trg) { 378 kfree(nrg); 379 return -ENOMEM; 380 } 381 382 spin_lock(&resv->lock); 383 list_add(&trg->link, &resv->region_cache); 384 resv->region_cache_count++; 385 goto retry_locked; 386 } 387 388 /* Locate the region we are before or in. */ 389 list_for_each_entry(rg, head, link) 390 if (f <= rg->to) 391 break; 392 393 /* If we are below the current region then a new region is required. 394 * Subtle, allocate a new region at the position but make it zero 395 * size such that we can guarantee to record the reservation. */ 396 if (&rg->link == head || t < rg->from) { 397 if (!nrg) { 398 resv->adds_in_progress--; 399 spin_unlock(&resv->lock); 400 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 401 if (!nrg) 402 return -ENOMEM; 403 404 nrg->from = f; 405 nrg->to = f; 406 INIT_LIST_HEAD(&nrg->link); 407 goto retry; 408 } 409 410 list_add(&nrg->link, rg->link.prev); 411 chg = t - f; 412 goto out_nrg; 413 } 414 415 /* Round our left edge to the current segment if it encloses us. */ 416 if (f > rg->from) 417 f = rg->from; 418 chg = t - f; 419 420 /* Check for and consume any regions we now overlap with. */ 421 list_for_each_entry(rg, rg->link.prev, link) { 422 if (&rg->link == head) 423 break; 424 if (rg->from > t) 425 goto out; 426 427 /* We overlap with this area, if it extends further than 428 * us then we must extend ourselves. Account for its 429 * existing reservation. */ 430 if (rg->to > t) { 431 chg += rg->to - t; 432 t = rg->to; 433 } 434 chg -= rg->to - rg->from; 435 } 436 437 out: 438 spin_unlock(&resv->lock); 439 /* We already know we raced and no longer need the new region */ 440 kfree(nrg); 441 return chg; 442 out_nrg: 443 spin_unlock(&resv->lock); 444 return chg; 445 } 446 447 /* 448 * Abort the in progress add operation. The adds_in_progress field 449 * of the resv_map keeps track of the operations in progress between 450 * calls to region_chg and region_add. Operations are sometimes 451 * aborted after the call to region_chg. In such cases, region_abort 452 * is called to decrement the adds_in_progress counter. 453 * 454 * NOTE: The range arguments [f, t) are not needed or used in this 455 * routine. They are kept to make reading the calling code easier as 456 * arguments will match the associated region_chg call. 457 */ 458 static void region_abort(struct resv_map *resv, long f, long t) 459 { 460 spin_lock(&resv->lock); 461 VM_BUG_ON(!resv->region_cache_count); 462 resv->adds_in_progress--; 463 spin_unlock(&resv->lock); 464 } 465 466 /* 467 * Delete the specified range [f, t) from the reserve map. If the 468 * t parameter is LONG_MAX, this indicates that ALL regions after f 469 * should be deleted. Locate the regions which intersect [f, t) 470 * and either trim, delete or split the existing regions. 471 * 472 * Returns the number of huge pages deleted from the reserve map. 473 * In the normal case, the return value is zero or more. In the 474 * case where a region must be split, a new region descriptor must 475 * be allocated. If the allocation fails, -ENOMEM will be returned. 476 * NOTE: If the parameter t == LONG_MAX, then we will never split 477 * a region and possibly return -ENOMEM. Callers specifying 478 * t == LONG_MAX do not need to check for -ENOMEM error. 479 */ 480 static long region_del(struct resv_map *resv, long f, long t) 481 { 482 struct list_head *head = &resv->regions; 483 struct file_region *rg, *trg; 484 struct file_region *nrg = NULL; 485 long del = 0; 486 487 retry: 488 spin_lock(&resv->lock); 489 list_for_each_entry_safe(rg, trg, head, link) { 490 /* 491 * Skip regions before the range to be deleted. file_region 492 * ranges are normally of the form [from, to). However, there 493 * may be a "placeholder" entry in the map which is of the form 494 * (from, to) with from == to. Check for placeholder entries 495 * at the beginning of the range to be deleted. 496 */ 497 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 498 continue; 499 500 if (rg->from >= t) 501 break; 502 503 if (f > rg->from && t < rg->to) { /* Must split region */ 504 /* 505 * Check for an entry in the cache before dropping 506 * lock and attempting allocation. 507 */ 508 if (!nrg && 509 resv->region_cache_count > resv->adds_in_progress) { 510 nrg = list_first_entry(&resv->region_cache, 511 struct file_region, 512 link); 513 list_del(&nrg->link); 514 resv->region_cache_count--; 515 } 516 517 if (!nrg) { 518 spin_unlock(&resv->lock); 519 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 520 if (!nrg) 521 return -ENOMEM; 522 goto retry; 523 } 524 525 del += t - f; 526 527 /* New entry for end of split region */ 528 nrg->from = t; 529 nrg->to = rg->to; 530 INIT_LIST_HEAD(&nrg->link); 531 532 /* Original entry is trimmed */ 533 rg->to = f; 534 535 list_add(&nrg->link, &rg->link); 536 nrg = NULL; 537 break; 538 } 539 540 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 541 del += rg->to - rg->from; 542 list_del(&rg->link); 543 kfree(rg); 544 continue; 545 } 546 547 if (f <= rg->from) { /* Trim beginning of region */ 548 del += t - rg->from; 549 rg->from = t; 550 } else { /* Trim end of region */ 551 del += rg->to - f; 552 rg->to = f; 553 } 554 } 555 556 spin_unlock(&resv->lock); 557 kfree(nrg); 558 return del; 559 } 560 561 /* 562 * A rare out of memory error was encountered which prevented removal of 563 * the reserve map region for a page. The huge page itself was free'ed 564 * and removed from the page cache. This routine will adjust the subpool 565 * usage count, and the global reserve count if needed. By incrementing 566 * these counts, the reserve map entry which could not be deleted will 567 * appear as a "reserved" entry instead of simply dangling with incorrect 568 * counts. 569 */ 570 void hugetlb_fix_reserve_counts(struct inode *inode) 571 { 572 struct hugepage_subpool *spool = subpool_inode(inode); 573 long rsv_adjust; 574 575 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 576 if (rsv_adjust) { 577 struct hstate *h = hstate_inode(inode); 578 579 hugetlb_acct_memory(h, 1); 580 } 581 } 582 583 /* 584 * Count and return the number of huge pages in the reserve map 585 * that intersect with the range [f, t). 586 */ 587 static long region_count(struct resv_map *resv, long f, long t) 588 { 589 struct list_head *head = &resv->regions; 590 struct file_region *rg; 591 long chg = 0; 592 593 spin_lock(&resv->lock); 594 /* Locate each segment we overlap with, and count that overlap. */ 595 list_for_each_entry(rg, head, link) { 596 long seg_from; 597 long seg_to; 598 599 if (rg->to <= f) 600 continue; 601 if (rg->from >= t) 602 break; 603 604 seg_from = max(rg->from, f); 605 seg_to = min(rg->to, t); 606 607 chg += seg_to - seg_from; 608 } 609 spin_unlock(&resv->lock); 610 611 return chg; 612 } 613 614 /* 615 * Convert the address within this vma to the page offset within 616 * the mapping, in pagecache page units; huge pages here. 617 */ 618 static pgoff_t vma_hugecache_offset(struct hstate *h, 619 struct vm_area_struct *vma, unsigned long address) 620 { 621 return ((address - vma->vm_start) >> huge_page_shift(h)) + 622 (vma->vm_pgoff >> huge_page_order(h)); 623 } 624 625 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 626 unsigned long address) 627 { 628 return vma_hugecache_offset(hstate_vma(vma), vma, address); 629 } 630 EXPORT_SYMBOL_GPL(linear_hugepage_index); 631 632 /* 633 * Return the size of the pages allocated when backing a VMA. In the majority 634 * cases this will be same size as used by the page table entries. 635 */ 636 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 637 { 638 struct hstate *hstate; 639 640 if (!is_vm_hugetlb_page(vma)) 641 return PAGE_SIZE; 642 643 hstate = hstate_vma(vma); 644 645 return 1UL << huge_page_shift(hstate); 646 } 647 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 648 649 /* 650 * Return the page size being used by the MMU to back a VMA. In the majority 651 * of cases, the page size used by the kernel matches the MMU size. On 652 * architectures where it differs, an architecture-specific version of this 653 * function is required. 654 */ 655 #ifndef vma_mmu_pagesize 656 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 657 { 658 return vma_kernel_pagesize(vma); 659 } 660 #endif 661 662 /* 663 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 664 * bits of the reservation map pointer, which are always clear due to 665 * alignment. 666 */ 667 #define HPAGE_RESV_OWNER (1UL << 0) 668 #define HPAGE_RESV_UNMAPPED (1UL << 1) 669 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 670 671 /* 672 * These helpers are used to track how many pages are reserved for 673 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 674 * is guaranteed to have their future faults succeed. 675 * 676 * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 677 * the reserve counters are updated with the hugetlb_lock held. It is safe 678 * to reset the VMA at fork() time as it is not in use yet and there is no 679 * chance of the global counters getting corrupted as a result of the values. 680 * 681 * The private mapping reservation is represented in a subtly different 682 * manner to a shared mapping. A shared mapping has a region map associated 683 * with the underlying file, this region map represents the backing file 684 * pages which have ever had a reservation assigned which this persists even 685 * after the page is instantiated. A private mapping has a region map 686 * associated with the original mmap which is attached to all VMAs which 687 * reference it, this region map represents those offsets which have consumed 688 * reservation ie. where pages have been instantiated. 689 */ 690 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 691 { 692 return (unsigned long)vma->vm_private_data; 693 } 694 695 static void set_vma_private_data(struct vm_area_struct *vma, 696 unsigned long value) 697 { 698 vma->vm_private_data = (void *)value; 699 } 700 701 struct resv_map *resv_map_alloc(void) 702 { 703 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 704 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 705 706 if (!resv_map || !rg) { 707 kfree(resv_map); 708 kfree(rg); 709 return NULL; 710 } 711 712 kref_init(&resv_map->refs); 713 spin_lock_init(&resv_map->lock); 714 INIT_LIST_HEAD(&resv_map->regions); 715 716 resv_map->adds_in_progress = 0; 717 718 INIT_LIST_HEAD(&resv_map->region_cache); 719 list_add(&rg->link, &resv_map->region_cache); 720 resv_map->region_cache_count = 1; 721 722 return resv_map; 723 } 724 725 void resv_map_release(struct kref *ref) 726 { 727 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 728 struct list_head *head = &resv_map->region_cache; 729 struct file_region *rg, *trg; 730 731 /* Clear out any active regions before we release the map. */ 732 region_del(resv_map, 0, LONG_MAX); 733 734 /* ... and any entries left in the cache */ 735 list_for_each_entry_safe(rg, trg, head, link) { 736 list_del(&rg->link); 737 kfree(rg); 738 } 739 740 VM_BUG_ON(resv_map->adds_in_progress); 741 742 kfree(resv_map); 743 } 744 745 static inline struct resv_map *inode_resv_map(struct inode *inode) 746 { 747 return inode->i_mapping->private_data; 748 } 749 750 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 751 { 752 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 753 if (vma->vm_flags & VM_MAYSHARE) { 754 struct address_space *mapping = vma->vm_file->f_mapping; 755 struct inode *inode = mapping->host; 756 757 return inode_resv_map(inode); 758 759 } else { 760 return (struct resv_map *)(get_vma_private_data(vma) & 761 ~HPAGE_RESV_MASK); 762 } 763 } 764 765 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 766 { 767 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 768 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 769 770 set_vma_private_data(vma, (get_vma_private_data(vma) & 771 HPAGE_RESV_MASK) | (unsigned long)map); 772 } 773 774 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 775 { 776 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 777 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 778 779 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 780 } 781 782 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 783 { 784 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 785 786 return (get_vma_private_data(vma) & flag) != 0; 787 } 788 789 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 790 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 791 { 792 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 793 if (!(vma->vm_flags & VM_MAYSHARE)) 794 vma->vm_private_data = (void *)0; 795 } 796 797 /* Returns true if the VMA has associated reserve pages */ 798 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 799 { 800 if (vma->vm_flags & VM_NORESERVE) { 801 /* 802 * This address is already reserved by other process(chg == 0), 803 * so, we should decrement reserved count. Without decrementing, 804 * reserve count remains after releasing inode, because this 805 * allocated page will go into page cache and is regarded as 806 * coming from reserved pool in releasing step. Currently, we 807 * don't have any other solution to deal with this situation 808 * properly, so add work-around here. 809 */ 810 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 811 return true; 812 else 813 return false; 814 } 815 816 /* Shared mappings always use reserves */ 817 if (vma->vm_flags & VM_MAYSHARE) { 818 /* 819 * We know VM_NORESERVE is not set. Therefore, there SHOULD 820 * be a region map for all pages. The only situation where 821 * there is no region map is if a hole was punched via 822 * fallocate. In this case, there really are no reverves to 823 * use. This situation is indicated if chg != 0. 824 */ 825 if (chg) 826 return false; 827 else 828 return true; 829 } 830 831 /* 832 * Only the process that called mmap() has reserves for 833 * private mappings. 834 */ 835 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 836 /* 837 * Like the shared case above, a hole punch or truncate 838 * could have been performed on the private mapping. 839 * Examine the value of chg to determine if reserves 840 * actually exist or were previously consumed. 841 * Very Subtle - The value of chg comes from a previous 842 * call to vma_needs_reserves(). The reserve map for 843 * private mappings has different (opposite) semantics 844 * than that of shared mappings. vma_needs_reserves() 845 * has already taken this difference in semantics into 846 * account. Therefore, the meaning of chg is the same 847 * as in the shared case above. Code could easily be 848 * combined, but keeping it separate draws attention to 849 * subtle differences. 850 */ 851 if (chg) 852 return false; 853 else 854 return true; 855 } 856 857 return false; 858 } 859 860 static void enqueue_huge_page(struct hstate *h, struct page *page) 861 { 862 int nid = page_to_nid(page); 863 list_move(&page->lru, &h->hugepage_freelists[nid]); 864 h->free_huge_pages++; 865 h->free_huge_pages_node[nid]++; 866 } 867 868 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) 869 { 870 struct page *page; 871 872 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) 873 if (!is_migrate_isolate_page(page)) 874 break; 875 /* 876 * if 'non-isolated free hugepage' not found on the list, 877 * the allocation fails. 878 */ 879 if (&h->hugepage_freelists[nid] == &page->lru) 880 return NULL; 881 list_move(&page->lru, &h->hugepage_activelist); 882 set_page_refcounted(page); 883 h->free_huge_pages--; 884 h->free_huge_pages_node[nid]--; 885 return page; 886 } 887 888 /* Movability of hugepages depends on migration support. */ 889 static inline gfp_t htlb_alloc_mask(struct hstate *h) 890 { 891 if (hugepages_treat_as_movable || hugepage_migration_supported(h)) 892 return GFP_HIGHUSER_MOVABLE; 893 else 894 return GFP_HIGHUSER; 895 } 896 897 static struct page *dequeue_huge_page_vma(struct hstate *h, 898 struct vm_area_struct *vma, 899 unsigned long address, int avoid_reserve, 900 long chg) 901 { 902 struct page *page = NULL; 903 struct mempolicy *mpol; 904 nodemask_t *nodemask; 905 struct zonelist *zonelist; 906 struct zone *zone; 907 struct zoneref *z; 908 unsigned int cpuset_mems_cookie; 909 910 /* 911 * A child process with MAP_PRIVATE mappings created by their parent 912 * have no page reserves. This check ensures that reservations are 913 * not "stolen". The child may still get SIGKILLed 914 */ 915 if (!vma_has_reserves(vma, chg) && 916 h->free_huge_pages - h->resv_huge_pages == 0) 917 goto err; 918 919 /* If reserves cannot be used, ensure enough pages are in the pool */ 920 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 921 goto err; 922 923 retry_cpuset: 924 cpuset_mems_cookie = read_mems_allowed_begin(); 925 zonelist = huge_zonelist(vma, address, 926 htlb_alloc_mask(h), &mpol, &nodemask); 927 928 for_each_zone_zonelist_nodemask(zone, z, zonelist, 929 MAX_NR_ZONES - 1, nodemask) { 930 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) { 931 page = dequeue_huge_page_node(h, zone_to_nid(zone)); 932 if (page) { 933 if (avoid_reserve) 934 break; 935 if (!vma_has_reserves(vma, chg)) 936 break; 937 938 SetPagePrivate(page); 939 h->resv_huge_pages--; 940 break; 941 } 942 } 943 } 944 945 mpol_cond_put(mpol); 946 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 947 goto retry_cpuset; 948 return page; 949 950 err: 951 return NULL; 952 } 953 954 /* 955 * common helper functions for hstate_next_node_to_{alloc|free}. 956 * We may have allocated or freed a huge page based on a different 957 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 958 * be outside of *nodes_allowed. Ensure that we use an allowed 959 * node for alloc or free. 960 */ 961 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 962 { 963 nid = next_node_in(nid, *nodes_allowed); 964 VM_BUG_ON(nid >= MAX_NUMNODES); 965 966 return nid; 967 } 968 969 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 970 { 971 if (!node_isset(nid, *nodes_allowed)) 972 nid = next_node_allowed(nid, nodes_allowed); 973 return nid; 974 } 975 976 /* 977 * returns the previously saved node ["this node"] from which to 978 * allocate a persistent huge page for the pool and advance the 979 * next node from which to allocate, handling wrap at end of node 980 * mask. 981 */ 982 static int hstate_next_node_to_alloc(struct hstate *h, 983 nodemask_t *nodes_allowed) 984 { 985 int nid; 986 987 VM_BUG_ON(!nodes_allowed); 988 989 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 990 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 991 992 return nid; 993 } 994 995 /* 996 * helper for free_pool_huge_page() - return the previously saved 997 * node ["this node"] from which to free a huge page. Advance the 998 * next node id whether or not we find a free huge page to free so 999 * that the next attempt to free addresses the next node. 1000 */ 1001 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1002 { 1003 int nid; 1004 1005 VM_BUG_ON(!nodes_allowed); 1006 1007 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1008 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1009 1010 return nid; 1011 } 1012 1013 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 1014 for (nr_nodes = nodes_weight(*mask); \ 1015 nr_nodes > 0 && \ 1016 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1017 nr_nodes--) 1018 1019 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1020 for (nr_nodes = nodes_weight(*mask); \ 1021 nr_nodes > 0 && \ 1022 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1023 nr_nodes--) 1024 1025 #if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && \ 1026 ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || \ 1027 defined(CONFIG_CMA)) 1028 static void destroy_compound_gigantic_page(struct page *page, 1029 unsigned int order) 1030 { 1031 int i; 1032 int nr_pages = 1 << order; 1033 struct page *p = page + 1; 1034 1035 atomic_set(compound_mapcount_ptr(page), 0); 1036 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1037 clear_compound_head(p); 1038 set_page_refcounted(p); 1039 } 1040 1041 set_compound_order(page, 0); 1042 __ClearPageHead(page); 1043 } 1044 1045 static void free_gigantic_page(struct page *page, unsigned int order) 1046 { 1047 free_contig_range(page_to_pfn(page), 1 << order); 1048 } 1049 1050 static int __alloc_gigantic_page(unsigned long start_pfn, 1051 unsigned long nr_pages) 1052 { 1053 unsigned long end_pfn = start_pfn + nr_pages; 1054 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1055 } 1056 1057 static bool pfn_range_valid_gigantic(struct zone *z, 1058 unsigned long start_pfn, unsigned long nr_pages) 1059 { 1060 unsigned long i, end_pfn = start_pfn + nr_pages; 1061 struct page *page; 1062 1063 for (i = start_pfn; i < end_pfn; i++) { 1064 if (!pfn_valid(i)) 1065 return false; 1066 1067 page = pfn_to_page(i); 1068 1069 if (page_zone(page) != z) 1070 return false; 1071 1072 if (PageReserved(page)) 1073 return false; 1074 1075 if (page_count(page) > 0) 1076 return false; 1077 1078 if (PageHuge(page)) 1079 return false; 1080 } 1081 1082 return true; 1083 } 1084 1085 static bool zone_spans_last_pfn(const struct zone *zone, 1086 unsigned long start_pfn, unsigned long nr_pages) 1087 { 1088 unsigned long last_pfn = start_pfn + nr_pages - 1; 1089 return zone_spans_pfn(zone, last_pfn); 1090 } 1091 1092 static struct page *alloc_gigantic_page(int nid, unsigned int order) 1093 { 1094 unsigned long nr_pages = 1 << order; 1095 unsigned long ret, pfn, flags; 1096 struct zone *z; 1097 1098 z = NODE_DATA(nid)->node_zones; 1099 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) { 1100 spin_lock_irqsave(&z->lock, flags); 1101 1102 pfn = ALIGN(z->zone_start_pfn, nr_pages); 1103 while (zone_spans_last_pfn(z, pfn, nr_pages)) { 1104 if (pfn_range_valid_gigantic(z, pfn, nr_pages)) { 1105 /* 1106 * We release the zone lock here because 1107 * alloc_contig_range() will also lock the zone 1108 * at some point. If there's an allocation 1109 * spinning on this lock, it may win the race 1110 * and cause alloc_contig_range() to fail... 1111 */ 1112 spin_unlock_irqrestore(&z->lock, flags); 1113 ret = __alloc_gigantic_page(pfn, nr_pages); 1114 if (!ret) 1115 return pfn_to_page(pfn); 1116 spin_lock_irqsave(&z->lock, flags); 1117 } 1118 pfn += nr_pages; 1119 } 1120 1121 spin_unlock_irqrestore(&z->lock, flags); 1122 } 1123 1124 return NULL; 1125 } 1126 1127 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); 1128 static void prep_compound_gigantic_page(struct page *page, unsigned int order); 1129 1130 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid) 1131 { 1132 struct page *page; 1133 1134 page = alloc_gigantic_page(nid, huge_page_order(h)); 1135 if (page) { 1136 prep_compound_gigantic_page(page, huge_page_order(h)); 1137 prep_new_huge_page(h, page, nid); 1138 } 1139 1140 return page; 1141 } 1142 1143 static int alloc_fresh_gigantic_page(struct hstate *h, 1144 nodemask_t *nodes_allowed) 1145 { 1146 struct page *page = NULL; 1147 int nr_nodes, node; 1148 1149 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1150 page = alloc_fresh_gigantic_page_node(h, node); 1151 if (page) 1152 return 1; 1153 } 1154 1155 return 0; 1156 } 1157 1158 static inline bool gigantic_page_supported(void) { return true; } 1159 #else 1160 static inline bool gigantic_page_supported(void) { return false; } 1161 static inline void free_gigantic_page(struct page *page, unsigned int order) { } 1162 static inline void destroy_compound_gigantic_page(struct page *page, 1163 unsigned int order) { } 1164 static inline int alloc_fresh_gigantic_page(struct hstate *h, 1165 nodemask_t *nodes_allowed) { return 0; } 1166 #endif 1167 1168 static void update_and_free_page(struct hstate *h, struct page *page) 1169 { 1170 int i; 1171 1172 if (hstate_is_gigantic(h) && !gigantic_page_supported()) 1173 return; 1174 1175 h->nr_huge_pages--; 1176 h->nr_huge_pages_node[page_to_nid(page)]--; 1177 for (i = 0; i < pages_per_huge_page(h); i++) { 1178 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1179 1 << PG_referenced | 1 << PG_dirty | 1180 1 << PG_active | 1 << PG_private | 1181 1 << PG_writeback); 1182 } 1183 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); 1184 set_compound_page_dtor(page, NULL_COMPOUND_DTOR); 1185 set_page_refcounted(page); 1186 if (hstate_is_gigantic(h)) { 1187 destroy_compound_gigantic_page(page, huge_page_order(h)); 1188 free_gigantic_page(page, huge_page_order(h)); 1189 } else { 1190 __free_pages(page, huge_page_order(h)); 1191 } 1192 } 1193 1194 struct hstate *size_to_hstate(unsigned long size) 1195 { 1196 struct hstate *h; 1197 1198 for_each_hstate(h) { 1199 if (huge_page_size(h) == size) 1200 return h; 1201 } 1202 return NULL; 1203 } 1204 1205 /* 1206 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked 1207 * to hstate->hugepage_activelist.) 1208 * 1209 * This function can be called for tail pages, but never returns true for them. 1210 */ 1211 bool page_huge_active(struct page *page) 1212 { 1213 VM_BUG_ON_PAGE(!PageHuge(page), page); 1214 return PageHead(page) && PagePrivate(&page[1]); 1215 } 1216 1217 /* never called for tail page */ 1218 static void set_page_huge_active(struct page *page) 1219 { 1220 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); 1221 SetPagePrivate(&page[1]); 1222 } 1223 1224 static void clear_page_huge_active(struct page *page) 1225 { 1226 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); 1227 ClearPagePrivate(&page[1]); 1228 } 1229 1230 void free_huge_page(struct page *page) 1231 { 1232 /* 1233 * Can't pass hstate in here because it is called from the 1234 * compound page destructor. 1235 */ 1236 struct hstate *h = page_hstate(page); 1237 int nid = page_to_nid(page); 1238 struct hugepage_subpool *spool = 1239 (struct hugepage_subpool *)page_private(page); 1240 bool restore_reserve; 1241 1242 set_page_private(page, 0); 1243 page->mapping = NULL; 1244 VM_BUG_ON_PAGE(page_count(page), page); 1245 VM_BUG_ON_PAGE(page_mapcount(page), page); 1246 restore_reserve = PagePrivate(page); 1247 ClearPagePrivate(page); 1248 1249 /* 1250 * A return code of zero implies that the subpool will be under its 1251 * minimum size if the reservation is not restored after page is free. 1252 * Therefore, force restore_reserve operation. 1253 */ 1254 if (hugepage_subpool_put_pages(spool, 1) == 0) 1255 restore_reserve = true; 1256 1257 spin_lock(&hugetlb_lock); 1258 clear_page_huge_active(page); 1259 hugetlb_cgroup_uncharge_page(hstate_index(h), 1260 pages_per_huge_page(h), page); 1261 if (restore_reserve) 1262 h->resv_huge_pages++; 1263 1264 if (h->surplus_huge_pages_node[nid]) { 1265 /* remove the page from active list */ 1266 list_del(&page->lru); 1267 update_and_free_page(h, page); 1268 h->surplus_huge_pages--; 1269 h->surplus_huge_pages_node[nid]--; 1270 } else { 1271 arch_clear_hugepage_flags(page); 1272 enqueue_huge_page(h, page); 1273 } 1274 spin_unlock(&hugetlb_lock); 1275 } 1276 1277 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 1278 { 1279 INIT_LIST_HEAD(&page->lru); 1280 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); 1281 spin_lock(&hugetlb_lock); 1282 set_hugetlb_cgroup(page, NULL); 1283 h->nr_huge_pages++; 1284 h->nr_huge_pages_node[nid]++; 1285 spin_unlock(&hugetlb_lock); 1286 put_page(page); /* free it into the hugepage allocator */ 1287 } 1288 1289 static void prep_compound_gigantic_page(struct page *page, unsigned int order) 1290 { 1291 int i; 1292 int nr_pages = 1 << order; 1293 struct page *p = page + 1; 1294 1295 /* we rely on prep_new_huge_page to set the destructor */ 1296 set_compound_order(page, order); 1297 __ClearPageReserved(page); 1298 __SetPageHead(page); 1299 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1300 /* 1301 * For gigantic hugepages allocated through bootmem at 1302 * boot, it's safer to be consistent with the not-gigantic 1303 * hugepages and clear the PG_reserved bit from all tail pages 1304 * too. Otherwse drivers using get_user_pages() to access tail 1305 * pages may get the reference counting wrong if they see 1306 * PG_reserved set on a tail page (despite the head page not 1307 * having PG_reserved set). Enforcing this consistency between 1308 * head and tail pages allows drivers to optimize away a check 1309 * on the head page when they need know if put_page() is needed 1310 * after get_user_pages(). 1311 */ 1312 __ClearPageReserved(p); 1313 set_page_count(p, 0); 1314 set_compound_head(p, page); 1315 } 1316 atomic_set(compound_mapcount_ptr(page), -1); 1317 } 1318 1319 /* 1320 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 1321 * transparent huge pages. See the PageTransHuge() documentation for more 1322 * details. 1323 */ 1324 int PageHuge(struct page *page) 1325 { 1326 if (!PageCompound(page)) 1327 return 0; 1328 1329 page = compound_head(page); 1330 return page[1].compound_dtor == HUGETLB_PAGE_DTOR; 1331 } 1332 EXPORT_SYMBOL_GPL(PageHuge); 1333 1334 /* 1335 * PageHeadHuge() only returns true for hugetlbfs head page, but not for 1336 * normal or transparent huge pages. 1337 */ 1338 int PageHeadHuge(struct page *page_head) 1339 { 1340 if (!PageHead(page_head)) 1341 return 0; 1342 1343 return get_compound_page_dtor(page_head) == free_huge_page; 1344 } 1345 1346 pgoff_t __basepage_index(struct page *page) 1347 { 1348 struct page *page_head = compound_head(page); 1349 pgoff_t index = page_index(page_head); 1350 unsigned long compound_idx; 1351 1352 if (!PageHuge(page_head)) 1353 return page_index(page); 1354 1355 if (compound_order(page_head) >= MAX_ORDER) 1356 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 1357 else 1358 compound_idx = page - page_head; 1359 1360 return (index << compound_order(page_head)) + compound_idx; 1361 } 1362 1363 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 1364 { 1365 struct page *page; 1366 1367 page = __alloc_pages_node(nid, 1368 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| 1369 __GFP_REPEAT|__GFP_NOWARN, 1370 huge_page_order(h)); 1371 if (page) { 1372 prep_new_huge_page(h, page, nid); 1373 } 1374 1375 return page; 1376 } 1377 1378 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 1379 { 1380 struct page *page; 1381 int nr_nodes, node; 1382 int ret = 0; 1383 1384 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1385 page = alloc_fresh_huge_page_node(h, node); 1386 if (page) { 1387 ret = 1; 1388 break; 1389 } 1390 } 1391 1392 if (ret) 1393 count_vm_event(HTLB_BUDDY_PGALLOC); 1394 else 1395 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1396 1397 return ret; 1398 } 1399 1400 /* 1401 * Free huge page from pool from next node to free. 1402 * Attempt to keep persistent huge pages more or less 1403 * balanced over allowed nodes. 1404 * Called with hugetlb_lock locked. 1405 */ 1406 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 1407 bool acct_surplus) 1408 { 1409 int nr_nodes, node; 1410 int ret = 0; 1411 1412 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 1413 /* 1414 * If we're returning unused surplus pages, only examine 1415 * nodes with surplus pages. 1416 */ 1417 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 1418 !list_empty(&h->hugepage_freelists[node])) { 1419 struct page *page = 1420 list_entry(h->hugepage_freelists[node].next, 1421 struct page, lru); 1422 list_del(&page->lru); 1423 h->free_huge_pages--; 1424 h->free_huge_pages_node[node]--; 1425 if (acct_surplus) { 1426 h->surplus_huge_pages--; 1427 h->surplus_huge_pages_node[node]--; 1428 } 1429 update_and_free_page(h, page); 1430 ret = 1; 1431 break; 1432 } 1433 } 1434 1435 return ret; 1436 } 1437 1438 /* 1439 * Dissolve a given free hugepage into free buddy pages. This function does 1440 * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the 1441 * number of free hugepages would be reduced below the number of reserved 1442 * hugepages. 1443 */ 1444 static int dissolve_free_huge_page(struct page *page) 1445 { 1446 int rc = 0; 1447 1448 spin_lock(&hugetlb_lock); 1449 if (PageHuge(page) && !page_count(page)) { 1450 struct page *head = compound_head(page); 1451 struct hstate *h = page_hstate(head); 1452 int nid = page_to_nid(head); 1453 if (h->free_huge_pages - h->resv_huge_pages == 0) { 1454 rc = -EBUSY; 1455 goto out; 1456 } 1457 list_del(&head->lru); 1458 h->free_huge_pages--; 1459 h->free_huge_pages_node[nid]--; 1460 h->max_huge_pages--; 1461 update_and_free_page(h, head); 1462 } 1463 out: 1464 spin_unlock(&hugetlb_lock); 1465 return rc; 1466 } 1467 1468 /* 1469 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 1470 * make specified memory blocks removable from the system. 1471 * Note that this will dissolve a free gigantic hugepage completely, if any 1472 * part of it lies within the given range. 1473 * Also note that if dissolve_free_huge_page() returns with an error, all 1474 * free hugepages that were dissolved before that error are lost. 1475 */ 1476 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 1477 { 1478 unsigned long pfn; 1479 struct page *page; 1480 int rc = 0; 1481 1482 if (!hugepages_supported()) 1483 return rc; 1484 1485 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { 1486 page = pfn_to_page(pfn); 1487 if (PageHuge(page) && !page_count(page)) { 1488 rc = dissolve_free_huge_page(page); 1489 if (rc) 1490 break; 1491 } 1492 } 1493 1494 return rc; 1495 } 1496 1497 /* 1498 * There are 3 ways this can get called: 1499 * 1. With vma+addr: we use the VMA's memory policy 1500 * 2. With !vma, but nid=NUMA_NO_NODE: We try to allocate a huge 1501 * page from any node, and let the buddy allocator itself figure 1502 * it out. 1503 * 3. With !vma, but nid!=NUMA_NO_NODE. We allocate a huge page 1504 * strictly from 'nid' 1505 */ 1506 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h, 1507 struct vm_area_struct *vma, unsigned long addr, int nid) 1508 { 1509 int order = huge_page_order(h); 1510 gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN; 1511 unsigned int cpuset_mems_cookie; 1512 1513 /* 1514 * We need a VMA to get a memory policy. If we do not 1515 * have one, we use the 'nid' argument. 1516 * 1517 * The mempolicy stuff below has some non-inlined bits 1518 * and calls ->vm_ops. That makes it hard to optimize at 1519 * compile-time, even when NUMA is off and it does 1520 * nothing. This helps the compiler optimize it out. 1521 */ 1522 if (!IS_ENABLED(CONFIG_NUMA) || !vma) { 1523 /* 1524 * If a specific node is requested, make sure to 1525 * get memory from there, but only when a node 1526 * is explicitly specified. 1527 */ 1528 if (nid != NUMA_NO_NODE) 1529 gfp |= __GFP_THISNODE; 1530 /* 1531 * Make sure to call something that can handle 1532 * nid=NUMA_NO_NODE 1533 */ 1534 return alloc_pages_node(nid, gfp, order); 1535 } 1536 1537 /* 1538 * OK, so we have a VMA. Fetch the mempolicy and try to 1539 * allocate a huge page with it. We will only reach this 1540 * when CONFIG_NUMA=y. 1541 */ 1542 do { 1543 struct page *page; 1544 struct mempolicy *mpol; 1545 struct zonelist *zl; 1546 nodemask_t *nodemask; 1547 1548 cpuset_mems_cookie = read_mems_allowed_begin(); 1549 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask); 1550 mpol_cond_put(mpol); 1551 page = __alloc_pages_nodemask(gfp, order, zl, nodemask); 1552 if (page) 1553 return page; 1554 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 1555 1556 return NULL; 1557 } 1558 1559 /* 1560 * There are two ways to allocate a huge page: 1561 * 1. When you have a VMA and an address (like a fault) 1562 * 2. When you have no VMA (like when setting /proc/.../nr_hugepages) 1563 * 1564 * 'vma' and 'addr' are only for (1). 'nid' is always NUMA_NO_NODE in 1565 * this case which signifies that the allocation should be done with 1566 * respect for the VMA's memory policy. 1567 * 1568 * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This 1569 * implies that memory policies will not be taken in to account. 1570 */ 1571 static struct page *__alloc_buddy_huge_page(struct hstate *h, 1572 struct vm_area_struct *vma, unsigned long addr, int nid) 1573 { 1574 struct page *page; 1575 unsigned int r_nid; 1576 1577 if (hstate_is_gigantic(h)) 1578 return NULL; 1579 1580 /* 1581 * Make sure that anyone specifying 'nid' is not also specifying a VMA. 1582 * This makes sure the caller is picking _one_ of the modes with which 1583 * we can call this function, not both. 1584 */ 1585 if (vma || (addr != -1)) { 1586 VM_WARN_ON_ONCE(addr == -1); 1587 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE); 1588 } 1589 /* 1590 * Assume we will successfully allocate the surplus page to 1591 * prevent racing processes from causing the surplus to exceed 1592 * overcommit 1593 * 1594 * This however introduces a different race, where a process B 1595 * tries to grow the static hugepage pool while alloc_pages() is 1596 * called by process A. B will only examine the per-node 1597 * counters in determining if surplus huge pages can be 1598 * converted to normal huge pages in adjust_pool_surplus(). A 1599 * won't be able to increment the per-node counter, until the 1600 * lock is dropped by B, but B doesn't drop hugetlb_lock until 1601 * no more huge pages can be converted from surplus to normal 1602 * state (and doesn't try to convert again). Thus, we have a 1603 * case where a surplus huge page exists, the pool is grown, and 1604 * the surplus huge page still exists after, even though it 1605 * should just have been converted to a normal huge page. This 1606 * does not leak memory, though, as the hugepage will be freed 1607 * once it is out of use. It also does not allow the counters to 1608 * go out of whack in adjust_pool_surplus() as we don't modify 1609 * the node values until we've gotten the hugepage and only the 1610 * per-node value is checked there. 1611 */ 1612 spin_lock(&hugetlb_lock); 1613 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 1614 spin_unlock(&hugetlb_lock); 1615 return NULL; 1616 } else { 1617 h->nr_huge_pages++; 1618 h->surplus_huge_pages++; 1619 } 1620 spin_unlock(&hugetlb_lock); 1621 1622 page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid); 1623 1624 spin_lock(&hugetlb_lock); 1625 if (page) { 1626 INIT_LIST_HEAD(&page->lru); 1627 r_nid = page_to_nid(page); 1628 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); 1629 set_hugetlb_cgroup(page, NULL); 1630 /* 1631 * We incremented the global counters already 1632 */ 1633 h->nr_huge_pages_node[r_nid]++; 1634 h->surplus_huge_pages_node[r_nid]++; 1635 __count_vm_event(HTLB_BUDDY_PGALLOC); 1636 } else { 1637 h->nr_huge_pages--; 1638 h->surplus_huge_pages--; 1639 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1640 } 1641 spin_unlock(&hugetlb_lock); 1642 1643 return page; 1644 } 1645 1646 /* 1647 * Allocate a huge page from 'nid'. Note, 'nid' may be 1648 * NUMA_NO_NODE, which means that it may be allocated 1649 * anywhere. 1650 */ 1651 static 1652 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid) 1653 { 1654 unsigned long addr = -1; 1655 1656 return __alloc_buddy_huge_page(h, NULL, addr, nid); 1657 } 1658 1659 /* 1660 * Use the VMA's mpolicy to allocate a huge page from the buddy. 1661 */ 1662 static 1663 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h, 1664 struct vm_area_struct *vma, unsigned long addr) 1665 { 1666 return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE); 1667 } 1668 1669 /* 1670 * This allocation function is useful in the context where vma is irrelevant. 1671 * E.g. soft-offlining uses this function because it only cares physical 1672 * address of error page. 1673 */ 1674 struct page *alloc_huge_page_node(struct hstate *h, int nid) 1675 { 1676 struct page *page = NULL; 1677 1678 spin_lock(&hugetlb_lock); 1679 if (h->free_huge_pages - h->resv_huge_pages > 0) 1680 page = dequeue_huge_page_node(h, nid); 1681 spin_unlock(&hugetlb_lock); 1682 1683 if (!page) 1684 page = __alloc_buddy_huge_page_no_mpol(h, nid); 1685 1686 return page; 1687 } 1688 1689 /* 1690 * Increase the hugetlb pool such that it can accommodate a reservation 1691 * of size 'delta'. 1692 */ 1693 static int gather_surplus_pages(struct hstate *h, int delta) 1694 { 1695 struct list_head surplus_list; 1696 struct page *page, *tmp; 1697 int ret, i; 1698 int needed, allocated; 1699 bool alloc_ok = true; 1700 1701 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 1702 if (needed <= 0) { 1703 h->resv_huge_pages += delta; 1704 return 0; 1705 } 1706 1707 allocated = 0; 1708 INIT_LIST_HEAD(&surplus_list); 1709 1710 ret = -ENOMEM; 1711 retry: 1712 spin_unlock(&hugetlb_lock); 1713 for (i = 0; i < needed; i++) { 1714 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE); 1715 if (!page) { 1716 alloc_ok = false; 1717 break; 1718 } 1719 list_add(&page->lru, &surplus_list); 1720 } 1721 allocated += i; 1722 1723 /* 1724 * After retaking hugetlb_lock, we need to recalculate 'needed' 1725 * because either resv_huge_pages or free_huge_pages may have changed. 1726 */ 1727 spin_lock(&hugetlb_lock); 1728 needed = (h->resv_huge_pages + delta) - 1729 (h->free_huge_pages + allocated); 1730 if (needed > 0) { 1731 if (alloc_ok) 1732 goto retry; 1733 /* 1734 * We were not able to allocate enough pages to 1735 * satisfy the entire reservation so we free what 1736 * we've allocated so far. 1737 */ 1738 goto free; 1739 } 1740 /* 1741 * The surplus_list now contains _at_least_ the number of extra pages 1742 * needed to accommodate the reservation. Add the appropriate number 1743 * of pages to the hugetlb pool and free the extras back to the buddy 1744 * allocator. Commit the entire reservation here to prevent another 1745 * process from stealing the pages as they are added to the pool but 1746 * before they are reserved. 1747 */ 1748 needed += allocated; 1749 h->resv_huge_pages += delta; 1750 ret = 0; 1751 1752 /* Free the needed pages to the hugetlb pool */ 1753 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 1754 if ((--needed) < 0) 1755 break; 1756 /* 1757 * This page is now managed by the hugetlb allocator and has 1758 * no users -- drop the buddy allocator's reference. 1759 */ 1760 put_page_testzero(page); 1761 VM_BUG_ON_PAGE(page_count(page), page); 1762 enqueue_huge_page(h, page); 1763 } 1764 free: 1765 spin_unlock(&hugetlb_lock); 1766 1767 /* Free unnecessary surplus pages to the buddy allocator */ 1768 list_for_each_entry_safe(page, tmp, &surplus_list, lru) 1769 put_page(page); 1770 spin_lock(&hugetlb_lock); 1771 1772 return ret; 1773 } 1774 1775 /* 1776 * This routine has two main purposes: 1777 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 1778 * in unused_resv_pages. This corresponds to the prior adjustments made 1779 * to the associated reservation map. 1780 * 2) Free any unused surplus pages that may have been allocated to satisfy 1781 * the reservation. As many as unused_resv_pages may be freed. 1782 * 1783 * Called with hugetlb_lock held. However, the lock could be dropped (and 1784 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, 1785 * we must make sure nobody else can claim pages we are in the process of 1786 * freeing. Do this by ensuring resv_huge_page always is greater than the 1787 * number of huge pages we plan to free when dropping the lock. 1788 */ 1789 static void return_unused_surplus_pages(struct hstate *h, 1790 unsigned long unused_resv_pages) 1791 { 1792 unsigned long nr_pages; 1793 1794 /* Cannot return gigantic pages currently */ 1795 if (hstate_is_gigantic(h)) 1796 goto out; 1797 1798 /* 1799 * Part (or even all) of the reservation could have been backed 1800 * by pre-allocated pages. Only free surplus pages. 1801 */ 1802 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 1803 1804 /* 1805 * We want to release as many surplus pages as possible, spread 1806 * evenly across all nodes with memory. Iterate across these nodes 1807 * until we can no longer free unreserved surplus pages. This occurs 1808 * when the nodes with surplus pages have no free pages. 1809 * free_pool_huge_page() will balance the the freed pages across the 1810 * on-line nodes with memory and will handle the hstate accounting. 1811 * 1812 * Note that we decrement resv_huge_pages as we free the pages. If 1813 * we drop the lock, resv_huge_pages will still be sufficiently large 1814 * to cover subsequent pages we may free. 1815 */ 1816 while (nr_pages--) { 1817 h->resv_huge_pages--; 1818 unused_resv_pages--; 1819 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) 1820 goto out; 1821 cond_resched_lock(&hugetlb_lock); 1822 } 1823 1824 out: 1825 /* Fully uncommit the reservation */ 1826 h->resv_huge_pages -= unused_resv_pages; 1827 } 1828 1829 1830 /* 1831 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 1832 * are used by the huge page allocation routines to manage reservations. 1833 * 1834 * vma_needs_reservation is called to determine if the huge page at addr 1835 * within the vma has an associated reservation. If a reservation is 1836 * needed, the value 1 is returned. The caller is then responsible for 1837 * managing the global reservation and subpool usage counts. After 1838 * the huge page has been allocated, vma_commit_reservation is called 1839 * to add the page to the reservation map. If the page allocation fails, 1840 * the reservation must be ended instead of committed. vma_end_reservation 1841 * is called in such cases. 1842 * 1843 * In the normal case, vma_commit_reservation returns the same value 1844 * as the preceding vma_needs_reservation call. The only time this 1845 * is not the case is if a reserve map was changed between calls. It 1846 * is the responsibility of the caller to notice the difference and 1847 * take appropriate action. 1848 * 1849 * vma_add_reservation is used in error paths where a reservation must 1850 * be restored when a newly allocated huge page must be freed. It is 1851 * to be called after calling vma_needs_reservation to determine if a 1852 * reservation exists. 1853 */ 1854 enum vma_resv_mode { 1855 VMA_NEEDS_RESV, 1856 VMA_COMMIT_RESV, 1857 VMA_END_RESV, 1858 VMA_ADD_RESV, 1859 }; 1860 static long __vma_reservation_common(struct hstate *h, 1861 struct vm_area_struct *vma, unsigned long addr, 1862 enum vma_resv_mode mode) 1863 { 1864 struct resv_map *resv; 1865 pgoff_t idx; 1866 long ret; 1867 1868 resv = vma_resv_map(vma); 1869 if (!resv) 1870 return 1; 1871 1872 idx = vma_hugecache_offset(h, vma, addr); 1873 switch (mode) { 1874 case VMA_NEEDS_RESV: 1875 ret = region_chg(resv, idx, idx + 1); 1876 break; 1877 case VMA_COMMIT_RESV: 1878 ret = region_add(resv, idx, idx + 1); 1879 break; 1880 case VMA_END_RESV: 1881 region_abort(resv, idx, idx + 1); 1882 ret = 0; 1883 break; 1884 case VMA_ADD_RESV: 1885 if (vma->vm_flags & VM_MAYSHARE) 1886 ret = region_add(resv, idx, idx + 1); 1887 else { 1888 region_abort(resv, idx, idx + 1); 1889 ret = region_del(resv, idx, idx + 1); 1890 } 1891 break; 1892 default: 1893 BUG(); 1894 } 1895 1896 if (vma->vm_flags & VM_MAYSHARE) 1897 return ret; 1898 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { 1899 /* 1900 * In most cases, reserves always exist for private mappings. 1901 * However, a file associated with mapping could have been 1902 * hole punched or truncated after reserves were consumed. 1903 * As subsequent fault on such a range will not use reserves. 1904 * Subtle - The reserve map for private mappings has the 1905 * opposite meaning than that of shared mappings. If NO 1906 * entry is in the reserve map, it means a reservation exists. 1907 * If an entry exists in the reserve map, it means the 1908 * reservation has already been consumed. As a result, the 1909 * return value of this routine is the opposite of the 1910 * value returned from reserve map manipulation routines above. 1911 */ 1912 if (ret) 1913 return 0; 1914 else 1915 return 1; 1916 } 1917 else 1918 return ret < 0 ? ret : 0; 1919 } 1920 1921 static long vma_needs_reservation(struct hstate *h, 1922 struct vm_area_struct *vma, unsigned long addr) 1923 { 1924 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 1925 } 1926 1927 static long vma_commit_reservation(struct hstate *h, 1928 struct vm_area_struct *vma, unsigned long addr) 1929 { 1930 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 1931 } 1932 1933 static void vma_end_reservation(struct hstate *h, 1934 struct vm_area_struct *vma, unsigned long addr) 1935 { 1936 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 1937 } 1938 1939 static long vma_add_reservation(struct hstate *h, 1940 struct vm_area_struct *vma, unsigned long addr) 1941 { 1942 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 1943 } 1944 1945 /* 1946 * This routine is called to restore a reservation on error paths. In the 1947 * specific error paths, a huge page was allocated (via alloc_huge_page) 1948 * and is about to be freed. If a reservation for the page existed, 1949 * alloc_huge_page would have consumed the reservation and set PagePrivate 1950 * in the newly allocated page. When the page is freed via free_huge_page, 1951 * the global reservation count will be incremented if PagePrivate is set. 1952 * However, free_huge_page can not adjust the reserve map. Adjust the 1953 * reserve map here to be consistent with global reserve count adjustments 1954 * to be made by free_huge_page. 1955 */ 1956 static void restore_reserve_on_error(struct hstate *h, 1957 struct vm_area_struct *vma, unsigned long address, 1958 struct page *page) 1959 { 1960 if (unlikely(PagePrivate(page))) { 1961 long rc = vma_needs_reservation(h, vma, address); 1962 1963 if (unlikely(rc < 0)) { 1964 /* 1965 * Rare out of memory condition in reserve map 1966 * manipulation. Clear PagePrivate so that 1967 * global reserve count will not be incremented 1968 * by free_huge_page. This will make it appear 1969 * as though the reservation for this page was 1970 * consumed. This may prevent the task from 1971 * faulting in the page at a later time. This 1972 * is better than inconsistent global huge page 1973 * accounting of reserve counts. 1974 */ 1975 ClearPagePrivate(page); 1976 } else if (rc) { 1977 rc = vma_add_reservation(h, vma, address); 1978 if (unlikely(rc < 0)) 1979 /* 1980 * See above comment about rare out of 1981 * memory condition. 1982 */ 1983 ClearPagePrivate(page); 1984 } else 1985 vma_end_reservation(h, vma, address); 1986 } 1987 } 1988 1989 struct page *alloc_huge_page(struct vm_area_struct *vma, 1990 unsigned long addr, int avoid_reserve) 1991 { 1992 struct hugepage_subpool *spool = subpool_vma(vma); 1993 struct hstate *h = hstate_vma(vma); 1994 struct page *page; 1995 long map_chg, map_commit; 1996 long gbl_chg; 1997 int ret, idx; 1998 struct hugetlb_cgroup *h_cg; 1999 2000 idx = hstate_index(h); 2001 /* 2002 * Examine the region/reserve map to determine if the process 2003 * has a reservation for the page to be allocated. A return 2004 * code of zero indicates a reservation exists (no change). 2005 */ 2006 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 2007 if (map_chg < 0) 2008 return ERR_PTR(-ENOMEM); 2009 2010 /* 2011 * Processes that did not create the mapping will have no 2012 * reserves as indicated by the region/reserve map. Check 2013 * that the allocation will not exceed the subpool limit. 2014 * Allocations for MAP_NORESERVE mappings also need to be 2015 * checked against any subpool limit. 2016 */ 2017 if (map_chg || avoid_reserve) { 2018 gbl_chg = hugepage_subpool_get_pages(spool, 1); 2019 if (gbl_chg < 0) { 2020 vma_end_reservation(h, vma, addr); 2021 return ERR_PTR(-ENOSPC); 2022 } 2023 2024 /* 2025 * Even though there was no reservation in the region/reserve 2026 * map, there could be reservations associated with the 2027 * subpool that can be used. This would be indicated if the 2028 * return value of hugepage_subpool_get_pages() is zero. 2029 * However, if avoid_reserve is specified we still avoid even 2030 * the subpool reservations. 2031 */ 2032 if (avoid_reserve) 2033 gbl_chg = 1; 2034 } 2035 2036 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 2037 if (ret) 2038 goto out_subpool_put; 2039 2040 spin_lock(&hugetlb_lock); 2041 /* 2042 * glb_chg is passed to indicate whether or not a page must be taken 2043 * from the global free pool (global change). gbl_chg == 0 indicates 2044 * a reservation exists for the allocation. 2045 */ 2046 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); 2047 if (!page) { 2048 spin_unlock(&hugetlb_lock); 2049 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr); 2050 if (!page) 2051 goto out_uncharge_cgroup; 2052 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 2053 SetPagePrivate(page); 2054 h->resv_huge_pages--; 2055 } 2056 spin_lock(&hugetlb_lock); 2057 list_move(&page->lru, &h->hugepage_activelist); 2058 /* Fall through */ 2059 } 2060 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 2061 spin_unlock(&hugetlb_lock); 2062 2063 set_page_private(page, (unsigned long)spool); 2064 2065 map_commit = vma_commit_reservation(h, vma, addr); 2066 if (unlikely(map_chg > map_commit)) { 2067 /* 2068 * The page was added to the reservation map between 2069 * vma_needs_reservation and vma_commit_reservation. 2070 * This indicates a race with hugetlb_reserve_pages. 2071 * Adjust for the subpool count incremented above AND 2072 * in hugetlb_reserve_pages for the same page. Also, 2073 * the reservation count added in hugetlb_reserve_pages 2074 * no longer applies. 2075 */ 2076 long rsv_adjust; 2077 2078 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 2079 hugetlb_acct_memory(h, -rsv_adjust); 2080 } 2081 return page; 2082 2083 out_uncharge_cgroup: 2084 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 2085 out_subpool_put: 2086 if (map_chg || avoid_reserve) 2087 hugepage_subpool_put_pages(spool, 1); 2088 vma_end_reservation(h, vma, addr); 2089 return ERR_PTR(-ENOSPC); 2090 } 2091 2092 /* 2093 * alloc_huge_page()'s wrapper which simply returns the page if allocation 2094 * succeeds, otherwise NULL. This function is called from new_vma_page(), 2095 * where no ERR_VALUE is expected to be returned. 2096 */ 2097 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, 2098 unsigned long addr, int avoid_reserve) 2099 { 2100 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); 2101 if (IS_ERR(page)) 2102 page = NULL; 2103 return page; 2104 } 2105 2106 int __weak alloc_bootmem_huge_page(struct hstate *h) 2107 { 2108 struct huge_bootmem_page *m; 2109 int nr_nodes, node; 2110 2111 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 2112 void *addr; 2113 2114 addr = memblock_virt_alloc_try_nid_nopanic( 2115 huge_page_size(h), huge_page_size(h), 2116 0, BOOTMEM_ALLOC_ACCESSIBLE, node); 2117 if (addr) { 2118 /* 2119 * Use the beginning of the huge page to store the 2120 * huge_bootmem_page struct (until gather_bootmem 2121 * puts them into the mem_map). 2122 */ 2123 m = addr; 2124 goto found; 2125 } 2126 } 2127 return 0; 2128 2129 found: 2130 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); 2131 /* Put them into a private list first because mem_map is not up yet */ 2132 list_add(&m->list, &huge_boot_pages); 2133 m->hstate = h; 2134 return 1; 2135 } 2136 2137 static void __init prep_compound_huge_page(struct page *page, 2138 unsigned int order) 2139 { 2140 if (unlikely(order > (MAX_ORDER - 1))) 2141 prep_compound_gigantic_page(page, order); 2142 else 2143 prep_compound_page(page, order); 2144 } 2145 2146 /* Put bootmem huge pages into the standard lists after mem_map is up */ 2147 static void __init gather_bootmem_prealloc(void) 2148 { 2149 struct huge_bootmem_page *m; 2150 2151 list_for_each_entry(m, &huge_boot_pages, list) { 2152 struct hstate *h = m->hstate; 2153 struct page *page; 2154 2155 #ifdef CONFIG_HIGHMEM 2156 page = pfn_to_page(m->phys >> PAGE_SHIFT); 2157 memblock_free_late(__pa(m), 2158 sizeof(struct huge_bootmem_page)); 2159 #else 2160 page = virt_to_page(m); 2161 #endif 2162 WARN_ON(page_count(page) != 1); 2163 prep_compound_huge_page(page, h->order); 2164 WARN_ON(PageReserved(page)); 2165 prep_new_huge_page(h, page, page_to_nid(page)); 2166 /* 2167 * If we had gigantic hugepages allocated at boot time, we need 2168 * to restore the 'stolen' pages to totalram_pages in order to 2169 * fix confusing memory reports from free(1) and another 2170 * side-effects, like CommitLimit going negative. 2171 */ 2172 if (hstate_is_gigantic(h)) 2173 adjust_managed_page_count(page, 1 << h->order); 2174 } 2175 } 2176 2177 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 2178 { 2179 unsigned long i; 2180 2181 for (i = 0; i < h->max_huge_pages; ++i) { 2182 if (hstate_is_gigantic(h)) { 2183 if (!alloc_bootmem_huge_page(h)) 2184 break; 2185 } else if (!alloc_fresh_huge_page(h, 2186 &node_states[N_MEMORY])) 2187 break; 2188 } 2189 h->max_huge_pages = i; 2190 } 2191 2192 static void __init hugetlb_init_hstates(void) 2193 { 2194 struct hstate *h; 2195 2196 for_each_hstate(h) { 2197 if (minimum_order > huge_page_order(h)) 2198 minimum_order = huge_page_order(h); 2199 2200 /* oversize hugepages were init'ed in early boot */ 2201 if (!hstate_is_gigantic(h)) 2202 hugetlb_hstate_alloc_pages(h); 2203 } 2204 VM_BUG_ON(minimum_order == UINT_MAX); 2205 } 2206 2207 static char * __init memfmt(char *buf, unsigned long n) 2208 { 2209 if (n >= (1UL << 30)) 2210 sprintf(buf, "%lu GB", n >> 30); 2211 else if (n >= (1UL << 20)) 2212 sprintf(buf, "%lu MB", n >> 20); 2213 else 2214 sprintf(buf, "%lu KB", n >> 10); 2215 return buf; 2216 } 2217 2218 static void __init report_hugepages(void) 2219 { 2220 struct hstate *h; 2221 2222 for_each_hstate(h) { 2223 char buf[32]; 2224 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", 2225 memfmt(buf, huge_page_size(h)), 2226 h->free_huge_pages); 2227 } 2228 } 2229 2230 #ifdef CONFIG_HIGHMEM 2231 static void try_to_free_low(struct hstate *h, unsigned long count, 2232 nodemask_t *nodes_allowed) 2233 { 2234 int i; 2235 2236 if (hstate_is_gigantic(h)) 2237 return; 2238 2239 for_each_node_mask(i, *nodes_allowed) { 2240 struct page *page, *next; 2241 struct list_head *freel = &h->hugepage_freelists[i]; 2242 list_for_each_entry_safe(page, next, freel, lru) { 2243 if (count >= h->nr_huge_pages) 2244 return; 2245 if (PageHighMem(page)) 2246 continue; 2247 list_del(&page->lru); 2248 update_and_free_page(h, page); 2249 h->free_huge_pages--; 2250 h->free_huge_pages_node[page_to_nid(page)]--; 2251 } 2252 } 2253 } 2254 #else 2255 static inline void try_to_free_low(struct hstate *h, unsigned long count, 2256 nodemask_t *nodes_allowed) 2257 { 2258 } 2259 #endif 2260 2261 /* 2262 * Increment or decrement surplus_huge_pages. Keep node-specific counters 2263 * balanced by operating on them in a round-robin fashion. 2264 * Returns 1 if an adjustment was made. 2265 */ 2266 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 2267 int delta) 2268 { 2269 int nr_nodes, node; 2270 2271 VM_BUG_ON(delta != -1 && delta != 1); 2272 2273 if (delta < 0) { 2274 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 2275 if (h->surplus_huge_pages_node[node]) 2276 goto found; 2277 } 2278 } else { 2279 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2280 if (h->surplus_huge_pages_node[node] < 2281 h->nr_huge_pages_node[node]) 2282 goto found; 2283 } 2284 } 2285 return 0; 2286 2287 found: 2288 h->surplus_huge_pages += delta; 2289 h->surplus_huge_pages_node[node] += delta; 2290 return 1; 2291 } 2292 2293 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 2294 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, 2295 nodemask_t *nodes_allowed) 2296 { 2297 unsigned long min_count, ret; 2298 2299 if (hstate_is_gigantic(h) && !gigantic_page_supported()) 2300 return h->max_huge_pages; 2301 2302 /* 2303 * Increase the pool size 2304 * First take pages out of surplus state. Then make up the 2305 * remaining difference by allocating fresh huge pages. 2306 * 2307 * We might race with __alloc_buddy_huge_page() here and be unable 2308 * to convert a surplus huge page to a normal huge page. That is 2309 * not critical, though, it just means the overall size of the 2310 * pool might be one hugepage larger than it needs to be, but 2311 * within all the constraints specified by the sysctls. 2312 */ 2313 spin_lock(&hugetlb_lock); 2314 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 2315 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 2316 break; 2317 } 2318 2319 while (count > persistent_huge_pages(h)) { 2320 /* 2321 * If this allocation races such that we no longer need the 2322 * page, free_huge_page will handle it by freeing the page 2323 * and reducing the surplus. 2324 */ 2325 spin_unlock(&hugetlb_lock); 2326 2327 /* yield cpu to avoid soft lockup */ 2328 cond_resched(); 2329 2330 if (hstate_is_gigantic(h)) 2331 ret = alloc_fresh_gigantic_page(h, nodes_allowed); 2332 else 2333 ret = alloc_fresh_huge_page(h, nodes_allowed); 2334 spin_lock(&hugetlb_lock); 2335 if (!ret) 2336 goto out; 2337 2338 /* Bail for signals. Probably ctrl-c from user */ 2339 if (signal_pending(current)) 2340 goto out; 2341 } 2342 2343 /* 2344 * Decrease the pool size 2345 * First return free pages to the buddy allocator (being careful 2346 * to keep enough around to satisfy reservations). Then place 2347 * pages into surplus state as needed so the pool will shrink 2348 * to the desired size as pages become free. 2349 * 2350 * By placing pages into the surplus state independent of the 2351 * overcommit value, we are allowing the surplus pool size to 2352 * exceed overcommit. There are few sane options here. Since 2353 * __alloc_buddy_huge_page() is checking the global counter, 2354 * though, we'll note that we're not allowed to exceed surplus 2355 * and won't grow the pool anywhere else. Not until one of the 2356 * sysctls are changed, or the surplus pages go out of use. 2357 */ 2358 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 2359 min_count = max(count, min_count); 2360 try_to_free_low(h, min_count, nodes_allowed); 2361 while (min_count < persistent_huge_pages(h)) { 2362 if (!free_pool_huge_page(h, nodes_allowed, 0)) 2363 break; 2364 cond_resched_lock(&hugetlb_lock); 2365 } 2366 while (count < persistent_huge_pages(h)) { 2367 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 2368 break; 2369 } 2370 out: 2371 ret = persistent_huge_pages(h); 2372 spin_unlock(&hugetlb_lock); 2373 return ret; 2374 } 2375 2376 #define HSTATE_ATTR_RO(_name) \ 2377 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 2378 2379 #define HSTATE_ATTR(_name) \ 2380 static struct kobj_attribute _name##_attr = \ 2381 __ATTR(_name, 0644, _name##_show, _name##_store) 2382 2383 static struct kobject *hugepages_kobj; 2384 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 2385 2386 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 2387 2388 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 2389 { 2390 int i; 2391 2392 for (i = 0; i < HUGE_MAX_HSTATE; i++) 2393 if (hstate_kobjs[i] == kobj) { 2394 if (nidp) 2395 *nidp = NUMA_NO_NODE; 2396 return &hstates[i]; 2397 } 2398 2399 return kobj_to_node_hstate(kobj, nidp); 2400 } 2401 2402 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 2403 struct kobj_attribute *attr, char *buf) 2404 { 2405 struct hstate *h; 2406 unsigned long nr_huge_pages; 2407 int nid; 2408 2409 h = kobj_to_hstate(kobj, &nid); 2410 if (nid == NUMA_NO_NODE) 2411 nr_huge_pages = h->nr_huge_pages; 2412 else 2413 nr_huge_pages = h->nr_huge_pages_node[nid]; 2414 2415 return sprintf(buf, "%lu\n", nr_huge_pages); 2416 } 2417 2418 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 2419 struct hstate *h, int nid, 2420 unsigned long count, size_t len) 2421 { 2422 int err; 2423 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); 2424 2425 if (hstate_is_gigantic(h) && !gigantic_page_supported()) { 2426 err = -EINVAL; 2427 goto out; 2428 } 2429 2430 if (nid == NUMA_NO_NODE) { 2431 /* 2432 * global hstate attribute 2433 */ 2434 if (!(obey_mempolicy && 2435 init_nodemask_of_mempolicy(nodes_allowed))) { 2436 NODEMASK_FREE(nodes_allowed); 2437 nodes_allowed = &node_states[N_MEMORY]; 2438 } 2439 } else if (nodes_allowed) { 2440 /* 2441 * per node hstate attribute: adjust count to global, 2442 * but restrict alloc/free to the specified node. 2443 */ 2444 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 2445 init_nodemask_of_node(nodes_allowed, nid); 2446 } else 2447 nodes_allowed = &node_states[N_MEMORY]; 2448 2449 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); 2450 2451 if (nodes_allowed != &node_states[N_MEMORY]) 2452 NODEMASK_FREE(nodes_allowed); 2453 2454 return len; 2455 out: 2456 NODEMASK_FREE(nodes_allowed); 2457 return err; 2458 } 2459 2460 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 2461 struct kobject *kobj, const char *buf, 2462 size_t len) 2463 { 2464 struct hstate *h; 2465 unsigned long count; 2466 int nid; 2467 int err; 2468 2469 err = kstrtoul(buf, 10, &count); 2470 if (err) 2471 return err; 2472 2473 h = kobj_to_hstate(kobj, &nid); 2474 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 2475 } 2476 2477 static ssize_t nr_hugepages_show(struct kobject *kobj, 2478 struct kobj_attribute *attr, char *buf) 2479 { 2480 return nr_hugepages_show_common(kobj, attr, buf); 2481 } 2482 2483 static ssize_t nr_hugepages_store(struct kobject *kobj, 2484 struct kobj_attribute *attr, const char *buf, size_t len) 2485 { 2486 return nr_hugepages_store_common(false, kobj, buf, len); 2487 } 2488 HSTATE_ATTR(nr_hugepages); 2489 2490 #ifdef CONFIG_NUMA 2491 2492 /* 2493 * hstate attribute for optionally mempolicy-based constraint on persistent 2494 * huge page alloc/free. 2495 */ 2496 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 2497 struct kobj_attribute *attr, char *buf) 2498 { 2499 return nr_hugepages_show_common(kobj, attr, buf); 2500 } 2501 2502 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 2503 struct kobj_attribute *attr, const char *buf, size_t len) 2504 { 2505 return nr_hugepages_store_common(true, kobj, buf, len); 2506 } 2507 HSTATE_ATTR(nr_hugepages_mempolicy); 2508 #endif 2509 2510 2511 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 2512 struct kobj_attribute *attr, char *buf) 2513 { 2514 struct hstate *h = kobj_to_hstate(kobj, NULL); 2515 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 2516 } 2517 2518 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 2519 struct kobj_attribute *attr, const char *buf, size_t count) 2520 { 2521 int err; 2522 unsigned long input; 2523 struct hstate *h = kobj_to_hstate(kobj, NULL); 2524 2525 if (hstate_is_gigantic(h)) 2526 return -EINVAL; 2527 2528 err = kstrtoul(buf, 10, &input); 2529 if (err) 2530 return err; 2531 2532 spin_lock(&hugetlb_lock); 2533 h->nr_overcommit_huge_pages = input; 2534 spin_unlock(&hugetlb_lock); 2535 2536 return count; 2537 } 2538 HSTATE_ATTR(nr_overcommit_hugepages); 2539 2540 static ssize_t free_hugepages_show(struct kobject *kobj, 2541 struct kobj_attribute *attr, char *buf) 2542 { 2543 struct hstate *h; 2544 unsigned long free_huge_pages; 2545 int nid; 2546 2547 h = kobj_to_hstate(kobj, &nid); 2548 if (nid == NUMA_NO_NODE) 2549 free_huge_pages = h->free_huge_pages; 2550 else 2551 free_huge_pages = h->free_huge_pages_node[nid]; 2552 2553 return sprintf(buf, "%lu\n", free_huge_pages); 2554 } 2555 HSTATE_ATTR_RO(free_hugepages); 2556 2557 static ssize_t resv_hugepages_show(struct kobject *kobj, 2558 struct kobj_attribute *attr, char *buf) 2559 { 2560 struct hstate *h = kobj_to_hstate(kobj, NULL); 2561 return sprintf(buf, "%lu\n", h->resv_huge_pages); 2562 } 2563 HSTATE_ATTR_RO(resv_hugepages); 2564 2565 static ssize_t surplus_hugepages_show(struct kobject *kobj, 2566 struct kobj_attribute *attr, char *buf) 2567 { 2568 struct hstate *h; 2569 unsigned long surplus_huge_pages; 2570 int nid; 2571 2572 h = kobj_to_hstate(kobj, &nid); 2573 if (nid == NUMA_NO_NODE) 2574 surplus_huge_pages = h->surplus_huge_pages; 2575 else 2576 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 2577 2578 return sprintf(buf, "%lu\n", surplus_huge_pages); 2579 } 2580 HSTATE_ATTR_RO(surplus_hugepages); 2581 2582 static struct attribute *hstate_attrs[] = { 2583 &nr_hugepages_attr.attr, 2584 &nr_overcommit_hugepages_attr.attr, 2585 &free_hugepages_attr.attr, 2586 &resv_hugepages_attr.attr, 2587 &surplus_hugepages_attr.attr, 2588 #ifdef CONFIG_NUMA 2589 &nr_hugepages_mempolicy_attr.attr, 2590 #endif 2591 NULL, 2592 }; 2593 2594 static struct attribute_group hstate_attr_group = { 2595 .attrs = hstate_attrs, 2596 }; 2597 2598 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 2599 struct kobject **hstate_kobjs, 2600 struct attribute_group *hstate_attr_group) 2601 { 2602 int retval; 2603 int hi = hstate_index(h); 2604 2605 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 2606 if (!hstate_kobjs[hi]) 2607 return -ENOMEM; 2608 2609 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 2610 if (retval) 2611 kobject_put(hstate_kobjs[hi]); 2612 2613 return retval; 2614 } 2615 2616 static void __init hugetlb_sysfs_init(void) 2617 { 2618 struct hstate *h; 2619 int err; 2620 2621 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 2622 if (!hugepages_kobj) 2623 return; 2624 2625 for_each_hstate(h) { 2626 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 2627 hstate_kobjs, &hstate_attr_group); 2628 if (err) 2629 pr_err("Hugetlb: Unable to add hstate %s", h->name); 2630 } 2631 } 2632 2633 #ifdef CONFIG_NUMA 2634 2635 /* 2636 * node_hstate/s - associate per node hstate attributes, via their kobjects, 2637 * with node devices in node_devices[] using a parallel array. The array 2638 * index of a node device or _hstate == node id. 2639 * This is here to avoid any static dependency of the node device driver, in 2640 * the base kernel, on the hugetlb module. 2641 */ 2642 struct node_hstate { 2643 struct kobject *hugepages_kobj; 2644 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 2645 }; 2646 static struct node_hstate node_hstates[MAX_NUMNODES]; 2647 2648 /* 2649 * A subset of global hstate attributes for node devices 2650 */ 2651 static struct attribute *per_node_hstate_attrs[] = { 2652 &nr_hugepages_attr.attr, 2653 &free_hugepages_attr.attr, 2654 &surplus_hugepages_attr.attr, 2655 NULL, 2656 }; 2657 2658 static struct attribute_group per_node_hstate_attr_group = { 2659 .attrs = per_node_hstate_attrs, 2660 }; 2661 2662 /* 2663 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 2664 * Returns node id via non-NULL nidp. 2665 */ 2666 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 2667 { 2668 int nid; 2669 2670 for (nid = 0; nid < nr_node_ids; nid++) { 2671 struct node_hstate *nhs = &node_hstates[nid]; 2672 int i; 2673 for (i = 0; i < HUGE_MAX_HSTATE; i++) 2674 if (nhs->hstate_kobjs[i] == kobj) { 2675 if (nidp) 2676 *nidp = nid; 2677 return &hstates[i]; 2678 } 2679 } 2680 2681 BUG(); 2682 return NULL; 2683 } 2684 2685 /* 2686 * Unregister hstate attributes from a single node device. 2687 * No-op if no hstate attributes attached. 2688 */ 2689 static void hugetlb_unregister_node(struct node *node) 2690 { 2691 struct hstate *h; 2692 struct node_hstate *nhs = &node_hstates[node->dev.id]; 2693 2694 if (!nhs->hugepages_kobj) 2695 return; /* no hstate attributes */ 2696 2697 for_each_hstate(h) { 2698 int idx = hstate_index(h); 2699 if (nhs->hstate_kobjs[idx]) { 2700 kobject_put(nhs->hstate_kobjs[idx]); 2701 nhs->hstate_kobjs[idx] = NULL; 2702 } 2703 } 2704 2705 kobject_put(nhs->hugepages_kobj); 2706 nhs->hugepages_kobj = NULL; 2707 } 2708 2709 2710 /* 2711 * Register hstate attributes for a single node device. 2712 * No-op if attributes already registered. 2713 */ 2714 static void hugetlb_register_node(struct node *node) 2715 { 2716 struct hstate *h; 2717 struct node_hstate *nhs = &node_hstates[node->dev.id]; 2718 int err; 2719 2720 if (nhs->hugepages_kobj) 2721 return; /* already allocated */ 2722 2723 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 2724 &node->dev.kobj); 2725 if (!nhs->hugepages_kobj) 2726 return; 2727 2728 for_each_hstate(h) { 2729 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 2730 nhs->hstate_kobjs, 2731 &per_node_hstate_attr_group); 2732 if (err) { 2733 pr_err("Hugetlb: Unable to add hstate %s for node %d\n", 2734 h->name, node->dev.id); 2735 hugetlb_unregister_node(node); 2736 break; 2737 } 2738 } 2739 } 2740 2741 /* 2742 * hugetlb init time: register hstate attributes for all registered node 2743 * devices of nodes that have memory. All on-line nodes should have 2744 * registered their associated device by this time. 2745 */ 2746 static void __init hugetlb_register_all_nodes(void) 2747 { 2748 int nid; 2749 2750 for_each_node_state(nid, N_MEMORY) { 2751 struct node *node = node_devices[nid]; 2752 if (node->dev.id == nid) 2753 hugetlb_register_node(node); 2754 } 2755 2756 /* 2757 * Let the node device driver know we're here so it can 2758 * [un]register hstate attributes on node hotplug. 2759 */ 2760 register_hugetlbfs_with_node(hugetlb_register_node, 2761 hugetlb_unregister_node); 2762 } 2763 #else /* !CONFIG_NUMA */ 2764 2765 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 2766 { 2767 BUG(); 2768 if (nidp) 2769 *nidp = -1; 2770 return NULL; 2771 } 2772 2773 static void hugetlb_register_all_nodes(void) { } 2774 2775 #endif 2776 2777 static int __init hugetlb_init(void) 2778 { 2779 int i; 2780 2781 if (!hugepages_supported()) 2782 return 0; 2783 2784 if (!size_to_hstate(default_hstate_size)) { 2785 default_hstate_size = HPAGE_SIZE; 2786 if (!size_to_hstate(default_hstate_size)) 2787 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 2788 } 2789 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 2790 if (default_hstate_max_huge_pages) { 2791 if (!default_hstate.max_huge_pages) 2792 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 2793 } 2794 2795 hugetlb_init_hstates(); 2796 gather_bootmem_prealloc(); 2797 report_hugepages(); 2798 2799 hugetlb_sysfs_init(); 2800 hugetlb_register_all_nodes(); 2801 hugetlb_cgroup_file_init(); 2802 2803 #ifdef CONFIG_SMP 2804 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 2805 #else 2806 num_fault_mutexes = 1; 2807 #endif 2808 hugetlb_fault_mutex_table = 2809 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL); 2810 BUG_ON(!hugetlb_fault_mutex_table); 2811 2812 for (i = 0; i < num_fault_mutexes; i++) 2813 mutex_init(&hugetlb_fault_mutex_table[i]); 2814 return 0; 2815 } 2816 subsys_initcall(hugetlb_init); 2817 2818 /* Should be called on processing a hugepagesz=... option */ 2819 void __init hugetlb_bad_size(void) 2820 { 2821 parsed_valid_hugepagesz = false; 2822 } 2823 2824 void __init hugetlb_add_hstate(unsigned int order) 2825 { 2826 struct hstate *h; 2827 unsigned long i; 2828 2829 if (size_to_hstate(PAGE_SIZE << order)) { 2830 pr_warn("hugepagesz= specified twice, ignoring\n"); 2831 return; 2832 } 2833 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 2834 BUG_ON(order == 0); 2835 h = &hstates[hugetlb_max_hstate++]; 2836 h->order = order; 2837 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 2838 h->nr_huge_pages = 0; 2839 h->free_huge_pages = 0; 2840 for (i = 0; i < MAX_NUMNODES; ++i) 2841 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 2842 INIT_LIST_HEAD(&h->hugepage_activelist); 2843 h->next_nid_to_alloc = first_memory_node; 2844 h->next_nid_to_free = first_memory_node; 2845 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 2846 huge_page_size(h)/1024); 2847 2848 parsed_hstate = h; 2849 } 2850 2851 static int __init hugetlb_nrpages_setup(char *s) 2852 { 2853 unsigned long *mhp; 2854 static unsigned long *last_mhp; 2855 2856 if (!parsed_valid_hugepagesz) { 2857 pr_warn("hugepages = %s preceded by " 2858 "an unsupported hugepagesz, ignoring\n", s); 2859 parsed_valid_hugepagesz = true; 2860 return 1; 2861 } 2862 /* 2863 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, 2864 * so this hugepages= parameter goes to the "default hstate". 2865 */ 2866 else if (!hugetlb_max_hstate) 2867 mhp = &default_hstate_max_huge_pages; 2868 else 2869 mhp = &parsed_hstate->max_huge_pages; 2870 2871 if (mhp == last_mhp) { 2872 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n"); 2873 return 1; 2874 } 2875 2876 if (sscanf(s, "%lu", mhp) <= 0) 2877 *mhp = 0; 2878 2879 /* 2880 * Global state is always initialized later in hugetlb_init. 2881 * But we need to allocate >= MAX_ORDER hstates here early to still 2882 * use the bootmem allocator. 2883 */ 2884 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) 2885 hugetlb_hstate_alloc_pages(parsed_hstate); 2886 2887 last_mhp = mhp; 2888 2889 return 1; 2890 } 2891 __setup("hugepages=", hugetlb_nrpages_setup); 2892 2893 static int __init hugetlb_default_setup(char *s) 2894 { 2895 default_hstate_size = memparse(s, &s); 2896 return 1; 2897 } 2898 __setup("default_hugepagesz=", hugetlb_default_setup); 2899 2900 static unsigned int cpuset_mems_nr(unsigned int *array) 2901 { 2902 int node; 2903 unsigned int nr = 0; 2904 2905 for_each_node_mask(node, cpuset_current_mems_allowed) 2906 nr += array[node]; 2907 2908 return nr; 2909 } 2910 2911 #ifdef CONFIG_SYSCTL 2912 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 2913 struct ctl_table *table, int write, 2914 void __user *buffer, size_t *length, loff_t *ppos) 2915 { 2916 struct hstate *h = &default_hstate; 2917 unsigned long tmp = h->max_huge_pages; 2918 int ret; 2919 2920 if (!hugepages_supported()) 2921 return -EOPNOTSUPP; 2922 2923 table->data = &tmp; 2924 table->maxlen = sizeof(unsigned long); 2925 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 2926 if (ret) 2927 goto out; 2928 2929 if (write) 2930 ret = __nr_hugepages_store_common(obey_mempolicy, h, 2931 NUMA_NO_NODE, tmp, *length); 2932 out: 2933 return ret; 2934 } 2935 2936 int hugetlb_sysctl_handler(struct ctl_table *table, int write, 2937 void __user *buffer, size_t *length, loff_t *ppos) 2938 { 2939 2940 return hugetlb_sysctl_handler_common(false, table, write, 2941 buffer, length, ppos); 2942 } 2943 2944 #ifdef CONFIG_NUMA 2945 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 2946 void __user *buffer, size_t *length, loff_t *ppos) 2947 { 2948 return hugetlb_sysctl_handler_common(true, table, write, 2949 buffer, length, ppos); 2950 } 2951 #endif /* CONFIG_NUMA */ 2952 2953 int hugetlb_overcommit_handler(struct ctl_table *table, int write, 2954 void __user *buffer, 2955 size_t *length, loff_t *ppos) 2956 { 2957 struct hstate *h = &default_hstate; 2958 unsigned long tmp; 2959 int ret; 2960 2961 if (!hugepages_supported()) 2962 return -EOPNOTSUPP; 2963 2964 tmp = h->nr_overcommit_huge_pages; 2965 2966 if (write && hstate_is_gigantic(h)) 2967 return -EINVAL; 2968 2969 table->data = &tmp; 2970 table->maxlen = sizeof(unsigned long); 2971 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 2972 if (ret) 2973 goto out; 2974 2975 if (write) { 2976 spin_lock(&hugetlb_lock); 2977 h->nr_overcommit_huge_pages = tmp; 2978 spin_unlock(&hugetlb_lock); 2979 } 2980 out: 2981 return ret; 2982 } 2983 2984 #endif /* CONFIG_SYSCTL */ 2985 2986 void hugetlb_report_meminfo(struct seq_file *m) 2987 { 2988 struct hstate *h = &default_hstate; 2989 if (!hugepages_supported()) 2990 return; 2991 seq_printf(m, 2992 "HugePages_Total: %5lu\n" 2993 "HugePages_Free: %5lu\n" 2994 "HugePages_Rsvd: %5lu\n" 2995 "HugePages_Surp: %5lu\n" 2996 "Hugepagesize: %8lu kB\n", 2997 h->nr_huge_pages, 2998 h->free_huge_pages, 2999 h->resv_huge_pages, 3000 h->surplus_huge_pages, 3001 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 3002 } 3003 3004 int hugetlb_report_node_meminfo(int nid, char *buf) 3005 { 3006 struct hstate *h = &default_hstate; 3007 if (!hugepages_supported()) 3008 return 0; 3009 return sprintf(buf, 3010 "Node %d HugePages_Total: %5u\n" 3011 "Node %d HugePages_Free: %5u\n" 3012 "Node %d HugePages_Surp: %5u\n", 3013 nid, h->nr_huge_pages_node[nid], 3014 nid, h->free_huge_pages_node[nid], 3015 nid, h->surplus_huge_pages_node[nid]); 3016 } 3017 3018 void hugetlb_show_meminfo(void) 3019 { 3020 struct hstate *h; 3021 int nid; 3022 3023 if (!hugepages_supported()) 3024 return; 3025 3026 for_each_node_state(nid, N_MEMORY) 3027 for_each_hstate(h) 3028 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 3029 nid, 3030 h->nr_huge_pages_node[nid], 3031 h->free_huge_pages_node[nid], 3032 h->surplus_huge_pages_node[nid], 3033 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 3034 } 3035 3036 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 3037 { 3038 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 3039 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); 3040 } 3041 3042 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 3043 unsigned long hugetlb_total_pages(void) 3044 { 3045 struct hstate *h; 3046 unsigned long nr_total_pages = 0; 3047 3048 for_each_hstate(h) 3049 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 3050 return nr_total_pages; 3051 } 3052 3053 static int hugetlb_acct_memory(struct hstate *h, long delta) 3054 { 3055 int ret = -ENOMEM; 3056 3057 spin_lock(&hugetlb_lock); 3058 /* 3059 * When cpuset is configured, it breaks the strict hugetlb page 3060 * reservation as the accounting is done on a global variable. Such 3061 * reservation is completely rubbish in the presence of cpuset because 3062 * the reservation is not checked against page availability for the 3063 * current cpuset. Application can still potentially OOM'ed by kernel 3064 * with lack of free htlb page in cpuset that the task is in. 3065 * Attempt to enforce strict accounting with cpuset is almost 3066 * impossible (or too ugly) because cpuset is too fluid that 3067 * task or memory node can be dynamically moved between cpusets. 3068 * 3069 * The change of semantics for shared hugetlb mapping with cpuset is 3070 * undesirable. However, in order to preserve some of the semantics, 3071 * we fall back to check against current free page availability as 3072 * a best attempt and hopefully to minimize the impact of changing 3073 * semantics that cpuset has. 3074 */ 3075 if (delta > 0) { 3076 if (gather_surplus_pages(h, delta) < 0) 3077 goto out; 3078 3079 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 3080 return_unused_surplus_pages(h, delta); 3081 goto out; 3082 } 3083 } 3084 3085 ret = 0; 3086 if (delta < 0) 3087 return_unused_surplus_pages(h, (unsigned long) -delta); 3088 3089 out: 3090 spin_unlock(&hugetlb_lock); 3091 return ret; 3092 } 3093 3094 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 3095 { 3096 struct resv_map *resv = vma_resv_map(vma); 3097 3098 /* 3099 * This new VMA should share its siblings reservation map if present. 3100 * The VMA will only ever have a valid reservation map pointer where 3101 * it is being copied for another still existing VMA. As that VMA 3102 * has a reference to the reservation map it cannot disappear until 3103 * after this open call completes. It is therefore safe to take a 3104 * new reference here without additional locking. 3105 */ 3106 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3107 kref_get(&resv->refs); 3108 } 3109 3110 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 3111 { 3112 struct hstate *h = hstate_vma(vma); 3113 struct resv_map *resv = vma_resv_map(vma); 3114 struct hugepage_subpool *spool = subpool_vma(vma); 3115 unsigned long reserve, start, end; 3116 long gbl_reserve; 3117 3118 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3119 return; 3120 3121 start = vma_hugecache_offset(h, vma, vma->vm_start); 3122 end = vma_hugecache_offset(h, vma, vma->vm_end); 3123 3124 reserve = (end - start) - region_count(resv, start, end); 3125 3126 kref_put(&resv->refs, resv_map_release); 3127 3128 if (reserve) { 3129 /* 3130 * Decrement reserve counts. The global reserve count may be 3131 * adjusted if the subpool has a minimum size. 3132 */ 3133 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 3134 hugetlb_acct_memory(h, -gbl_reserve); 3135 } 3136 } 3137 3138 /* 3139 * We cannot handle pagefaults against hugetlb pages at all. They cause 3140 * handle_mm_fault() to try to instantiate regular-sized pages in the 3141 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 3142 * this far. 3143 */ 3144 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 3145 { 3146 BUG(); 3147 return 0; 3148 } 3149 3150 const struct vm_operations_struct hugetlb_vm_ops = { 3151 .fault = hugetlb_vm_op_fault, 3152 .open = hugetlb_vm_op_open, 3153 .close = hugetlb_vm_op_close, 3154 }; 3155 3156 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 3157 int writable) 3158 { 3159 pte_t entry; 3160 3161 if (writable) { 3162 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 3163 vma->vm_page_prot))); 3164 } else { 3165 entry = huge_pte_wrprotect(mk_huge_pte(page, 3166 vma->vm_page_prot)); 3167 } 3168 entry = pte_mkyoung(entry); 3169 entry = pte_mkhuge(entry); 3170 entry = arch_make_huge_pte(entry, vma, page, writable); 3171 3172 return entry; 3173 } 3174 3175 static void set_huge_ptep_writable(struct vm_area_struct *vma, 3176 unsigned long address, pte_t *ptep) 3177 { 3178 pte_t entry; 3179 3180 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 3181 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 3182 update_mmu_cache(vma, address, ptep); 3183 } 3184 3185 static int is_hugetlb_entry_migration(pte_t pte) 3186 { 3187 swp_entry_t swp; 3188 3189 if (huge_pte_none(pte) || pte_present(pte)) 3190 return 0; 3191 swp = pte_to_swp_entry(pte); 3192 if (non_swap_entry(swp) && is_migration_entry(swp)) 3193 return 1; 3194 else 3195 return 0; 3196 } 3197 3198 static int is_hugetlb_entry_hwpoisoned(pte_t pte) 3199 { 3200 swp_entry_t swp; 3201 3202 if (huge_pte_none(pte) || pte_present(pte)) 3203 return 0; 3204 swp = pte_to_swp_entry(pte); 3205 if (non_swap_entry(swp) && is_hwpoison_entry(swp)) 3206 return 1; 3207 else 3208 return 0; 3209 } 3210 3211 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 3212 struct vm_area_struct *vma) 3213 { 3214 pte_t *src_pte, *dst_pte, entry; 3215 struct page *ptepage; 3216 unsigned long addr; 3217 int cow; 3218 struct hstate *h = hstate_vma(vma); 3219 unsigned long sz = huge_page_size(h); 3220 unsigned long mmun_start; /* For mmu_notifiers */ 3221 unsigned long mmun_end; /* For mmu_notifiers */ 3222 int ret = 0; 3223 3224 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 3225 3226 mmun_start = vma->vm_start; 3227 mmun_end = vma->vm_end; 3228 if (cow) 3229 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end); 3230 3231 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 3232 spinlock_t *src_ptl, *dst_ptl; 3233 src_pte = huge_pte_offset(src, addr); 3234 if (!src_pte) 3235 continue; 3236 dst_pte = huge_pte_alloc(dst, addr, sz); 3237 if (!dst_pte) { 3238 ret = -ENOMEM; 3239 break; 3240 } 3241 3242 /* If the pagetables are shared don't copy or take references */ 3243 if (dst_pte == src_pte) 3244 continue; 3245 3246 dst_ptl = huge_pte_lock(h, dst, dst_pte); 3247 src_ptl = huge_pte_lockptr(h, src, src_pte); 3248 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 3249 entry = huge_ptep_get(src_pte); 3250 if (huge_pte_none(entry)) { /* skip none entry */ 3251 ; 3252 } else if (unlikely(is_hugetlb_entry_migration(entry) || 3253 is_hugetlb_entry_hwpoisoned(entry))) { 3254 swp_entry_t swp_entry = pte_to_swp_entry(entry); 3255 3256 if (is_write_migration_entry(swp_entry) && cow) { 3257 /* 3258 * COW mappings require pages in both 3259 * parent and child to be set to read. 3260 */ 3261 make_migration_entry_read(&swp_entry); 3262 entry = swp_entry_to_pte(swp_entry); 3263 set_huge_pte_at(src, addr, src_pte, entry); 3264 } 3265 set_huge_pte_at(dst, addr, dst_pte, entry); 3266 } else { 3267 if (cow) { 3268 huge_ptep_set_wrprotect(src, addr, src_pte); 3269 mmu_notifier_invalidate_range(src, mmun_start, 3270 mmun_end); 3271 } 3272 entry = huge_ptep_get(src_pte); 3273 ptepage = pte_page(entry); 3274 get_page(ptepage); 3275 page_dup_rmap(ptepage, true); 3276 set_huge_pte_at(dst, addr, dst_pte, entry); 3277 hugetlb_count_add(pages_per_huge_page(h), dst); 3278 } 3279 spin_unlock(src_ptl); 3280 spin_unlock(dst_ptl); 3281 } 3282 3283 if (cow) 3284 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end); 3285 3286 return ret; 3287 } 3288 3289 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 3290 unsigned long start, unsigned long end, 3291 struct page *ref_page) 3292 { 3293 struct mm_struct *mm = vma->vm_mm; 3294 unsigned long address; 3295 pte_t *ptep; 3296 pte_t pte; 3297 spinlock_t *ptl; 3298 struct page *page; 3299 struct hstate *h = hstate_vma(vma); 3300 unsigned long sz = huge_page_size(h); 3301 const unsigned long mmun_start = start; /* For mmu_notifiers */ 3302 const unsigned long mmun_end = end; /* For mmu_notifiers */ 3303 3304 WARN_ON(!is_vm_hugetlb_page(vma)); 3305 BUG_ON(start & ~huge_page_mask(h)); 3306 BUG_ON(end & ~huge_page_mask(h)); 3307 3308 /* 3309 * This is a hugetlb vma, all the pte entries should point 3310 * to huge page. 3311 */ 3312 tlb_remove_check_page_size_change(tlb, sz); 3313 tlb_start_vma(tlb, vma); 3314 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 3315 address = start; 3316 for (; address < end; address += sz) { 3317 ptep = huge_pte_offset(mm, address); 3318 if (!ptep) 3319 continue; 3320 3321 ptl = huge_pte_lock(h, mm, ptep); 3322 if (huge_pmd_unshare(mm, &address, ptep)) { 3323 spin_unlock(ptl); 3324 continue; 3325 } 3326 3327 pte = huge_ptep_get(ptep); 3328 if (huge_pte_none(pte)) { 3329 spin_unlock(ptl); 3330 continue; 3331 } 3332 3333 /* 3334 * Migrating hugepage or HWPoisoned hugepage is already 3335 * unmapped and its refcount is dropped, so just clear pte here. 3336 */ 3337 if (unlikely(!pte_present(pte))) { 3338 huge_pte_clear(mm, address, ptep); 3339 spin_unlock(ptl); 3340 continue; 3341 } 3342 3343 page = pte_page(pte); 3344 /* 3345 * If a reference page is supplied, it is because a specific 3346 * page is being unmapped, not a range. Ensure the page we 3347 * are about to unmap is the actual page of interest. 3348 */ 3349 if (ref_page) { 3350 if (page != ref_page) { 3351 spin_unlock(ptl); 3352 continue; 3353 } 3354 /* 3355 * Mark the VMA as having unmapped its page so that 3356 * future faults in this VMA will fail rather than 3357 * looking like data was lost 3358 */ 3359 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 3360 } 3361 3362 pte = huge_ptep_get_and_clear(mm, address, ptep); 3363 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 3364 if (huge_pte_dirty(pte)) 3365 set_page_dirty(page); 3366 3367 hugetlb_count_sub(pages_per_huge_page(h), mm); 3368 page_remove_rmap(page, true); 3369 3370 spin_unlock(ptl); 3371 tlb_remove_page_size(tlb, page, huge_page_size(h)); 3372 /* 3373 * Bail out after unmapping reference page if supplied 3374 */ 3375 if (ref_page) 3376 break; 3377 } 3378 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 3379 tlb_end_vma(tlb, vma); 3380 } 3381 3382 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 3383 struct vm_area_struct *vma, unsigned long start, 3384 unsigned long end, struct page *ref_page) 3385 { 3386 __unmap_hugepage_range(tlb, vma, start, end, ref_page); 3387 3388 /* 3389 * Clear this flag so that x86's huge_pmd_share page_table_shareable 3390 * test will fail on a vma being torn down, and not grab a page table 3391 * on its way out. We're lucky that the flag has such an appropriate 3392 * name, and can in fact be safely cleared here. We could clear it 3393 * before the __unmap_hugepage_range above, but all that's necessary 3394 * is to clear it before releasing the i_mmap_rwsem. This works 3395 * because in the context this is called, the VMA is about to be 3396 * destroyed and the i_mmap_rwsem is held. 3397 */ 3398 vma->vm_flags &= ~VM_MAYSHARE; 3399 } 3400 3401 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 3402 unsigned long end, struct page *ref_page) 3403 { 3404 struct mm_struct *mm; 3405 struct mmu_gather tlb; 3406 3407 mm = vma->vm_mm; 3408 3409 tlb_gather_mmu(&tlb, mm, start, end); 3410 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 3411 tlb_finish_mmu(&tlb, start, end); 3412 } 3413 3414 /* 3415 * This is called when the original mapper is failing to COW a MAP_PRIVATE 3416 * mappping it owns the reserve page for. The intention is to unmap the page 3417 * from other VMAs and let the children be SIGKILLed if they are faulting the 3418 * same region. 3419 */ 3420 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 3421 struct page *page, unsigned long address) 3422 { 3423 struct hstate *h = hstate_vma(vma); 3424 struct vm_area_struct *iter_vma; 3425 struct address_space *mapping; 3426 pgoff_t pgoff; 3427 3428 /* 3429 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 3430 * from page cache lookup which is in HPAGE_SIZE units. 3431 */ 3432 address = address & huge_page_mask(h); 3433 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 3434 vma->vm_pgoff; 3435 mapping = vma->vm_file->f_mapping; 3436 3437 /* 3438 * Take the mapping lock for the duration of the table walk. As 3439 * this mapping should be shared between all the VMAs, 3440 * __unmap_hugepage_range() is called as the lock is already held 3441 */ 3442 i_mmap_lock_write(mapping); 3443 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 3444 /* Do not unmap the current VMA */ 3445 if (iter_vma == vma) 3446 continue; 3447 3448 /* 3449 * Shared VMAs have their own reserves and do not affect 3450 * MAP_PRIVATE accounting but it is possible that a shared 3451 * VMA is using the same page so check and skip such VMAs. 3452 */ 3453 if (iter_vma->vm_flags & VM_MAYSHARE) 3454 continue; 3455 3456 /* 3457 * Unmap the page from other VMAs without their own reserves. 3458 * They get marked to be SIGKILLed if they fault in these 3459 * areas. This is because a future no-page fault on this VMA 3460 * could insert a zeroed page instead of the data existing 3461 * from the time of fork. This would look like data corruption 3462 */ 3463 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 3464 unmap_hugepage_range(iter_vma, address, 3465 address + huge_page_size(h), page); 3466 } 3467 i_mmap_unlock_write(mapping); 3468 } 3469 3470 /* 3471 * Hugetlb_cow() should be called with page lock of the original hugepage held. 3472 * Called with hugetlb_instantiation_mutex held and pte_page locked so we 3473 * cannot race with other handlers or page migration. 3474 * Keep the pte_same checks anyway to make transition from the mutex easier. 3475 */ 3476 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 3477 unsigned long address, pte_t *ptep, 3478 struct page *pagecache_page, spinlock_t *ptl) 3479 { 3480 pte_t pte; 3481 struct hstate *h = hstate_vma(vma); 3482 struct page *old_page, *new_page; 3483 int ret = 0, outside_reserve = 0; 3484 unsigned long mmun_start; /* For mmu_notifiers */ 3485 unsigned long mmun_end; /* For mmu_notifiers */ 3486 3487 pte = huge_ptep_get(ptep); 3488 old_page = pte_page(pte); 3489 3490 retry_avoidcopy: 3491 /* If no-one else is actually using this page, avoid the copy 3492 * and just make the page writable */ 3493 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 3494 page_move_anon_rmap(old_page, vma); 3495 set_huge_ptep_writable(vma, address, ptep); 3496 return 0; 3497 } 3498 3499 /* 3500 * If the process that created a MAP_PRIVATE mapping is about to 3501 * perform a COW due to a shared page count, attempt to satisfy 3502 * the allocation without using the existing reserves. The pagecache 3503 * page is used to determine if the reserve at this address was 3504 * consumed or not. If reserves were used, a partial faulted mapping 3505 * at the time of fork() could consume its reserves on COW instead 3506 * of the full address range. 3507 */ 3508 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 3509 old_page != pagecache_page) 3510 outside_reserve = 1; 3511 3512 get_page(old_page); 3513 3514 /* 3515 * Drop page table lock as buddy allocator may be called. It will 3516 * be acquired again before returning to the caller, as expected. 3517 */ 3518 spin_unlock(ptl); 3519 new_page = alloc_huge_page(vma, address, outside_reserve); 3520 3521 if (IS_ERR(new_page)) { 3522 /* 3523 * If a process owning a MAP_PRIVATE mapping fails to COW, 3524 * it is due to references held by a child and an insufficient 3525 * huge page pool. To guarantee the original mappers 3526 * reliability, unmap the page from child processes. The child 3527 * may get SIGKILLed if it later faults. 3528 */ 3529 if (outside_reserve) { 3530 put_page(old_page); 3531 BUG_ON(huge_pte_none(pte)); 3532 unmap_ref_private(mm, vma, old_page, address); 3533 BUG_ON(huge_pte_none(pte)); 3534 spin_lock(ptl); 3535 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 3536 if (likely(ptep && 3537 pte_same(huge_ptep_get(ptep), pte))) 3538 goto retry_avoidcopy; 3539 /* 3540 * race occurs while re-acquiring page table 3541 * lock, and our job is done. 3542 */ 3543 return 0; 3544 } 3545 3546 ret = (PTR_ERR(new_page) == -ENOMEM) ? 3547 VM_FAULT_OOM : VM_FAULT_SIGBUS; 3548 goto out_release_old; 3549 } 3550 3551 /* 3552 * When the original hugepage is shared one, it does not have 3553 * anon_vma prepared. 3554 */ 3555 if (unlikely(anon_vma_prepare(vma))) { 3556 ret = VM_FAULT_OOM; 3557 goto out_release_all; 3558 } 3559 3560 copy_user_huge_page(new_page, old_page, address, vma, 3561 pages_per_huge_page(h)); 3562 __SetPageUptodate(new_page); 3563 set_page_huge_active(new_page); 3564 3565 mmun_start = address & huge_page_mask(h); 3566 mmun_end = mmun_start + huge_page_size(h); 3567 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 3568 3569 /* 3570 * Retake the page table lock to check for racing updates 3571 * before the page tables are altered 3572 */ 3573 spin_lock(ptl); 3574 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 3575 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 3576 ClearPagePrivate(new_page); 3577 3578 /* Break COW */ 3579 huge_ptep_clear_flush(vma, address, ptep); 3580 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); 3581 set_huge_pte_at(mm, address, ptep, 3582 make_huge_pte(vma, new_page, 1)); 3583 page_remove_rmap(old_page, true); 3584 hugepage_add_new_anon_rmap(new_page, vma, address); 3585 /* Make the old page be freed below */ 3586 new_page = old_page; 3587 } 3588 spin_unlock(ptl); 3589 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 3590 out_release_all: 3591 restore_reserve_on_error(h, vma, address, new_page); 3592 put_page(new_page); 3593 out_release_old: 3594 put_page(old_page); 3595 3596 spin_lock(ptl); /* Caller expects lock to be held */ 3597 return ret; 3598 } 3599 3600 /* Return the pagecache page at a given address within a VMA */ 3601 static struct page *hugetlbfs_pagecache_page(struct hstate *h, 3602 struct vm_area_struct *vma, unsigned long address) 3603 { 3604 struct address_space *mapping; 3605 pgoff_t idx; 3606 3607 mapping = vma->vm_file->f_mapping; 3608 idx = vma_hugecache_offset(h, vma, address); 3609 3610 return find_lock_page(mapping, idx); 3611 } 3612 3613 /* 3614 * Return whether there is a pagecache page to back given address within VMA. 3615 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 3616 */ 3617 static bool hugetlbfs_pagecache_present(struct hstate *h, 3618 struct vm_area_struct *vma, unsigned long address) 3619 { 3620 struct address_space *mapping; 3621 pgoff_t idx; 3622 struct page *page; 3623 3624 mapping = vma->vm_file->f_mapping; 3625 idx = vma_hugecache_offset(h, vma, address); 3626 3627 page = find_get_page(mapping, idx); 3628 if (page) 3629 put_page(page); 3630 return page != NULL; 3631 } 3632 3633 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 3634 pgoff_t idx) 3635 { 3636 struct inode *inode = mapping->host; 3637 struct hstate *h = hstate_inode(inode); 3638 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 3639 3640 if (err) 3641 return err; 3642 ClearPagePrivate(page); 3643 3644 spin_lock(&inode->i_lock); 3645 inode->i_blocks += blocks_per_huge_page(h); 3646 spin_unlock(&inode->i_lock); 3647 return 0; 3648 } 3649 3650 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 3651 struct address_space *mapping, pgoff_t idx, 3652 unsigned long address, pte_t *ptep, unsigned int flags) 3653 { 3654 struct hstate *h = hstate_vma(vma); 3655 int ret = VM_FAULT_SIGBUS; 3656 int anon_rmap = 0; 3657 unsigned long size; 3658 struct page *page; 3659 pte_t new_pte; 3660 spinlock_t *ptl; 3661 3662 /* 3663 * Currently, we are forced to kill the process in the event the 3664 * original mapper has unmapped pages from the child due to a failed 3665 * COW. Warn that such a situation has occurred as it may not be obvious 3666 */ 3667 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 3668 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 3669 current->pid); 3670 return ret; 3671 } 3672 3673 /* 3674 * Use page lock to guard against racing truncation 3675 * before we get page_table_lock. 3676 */ 3677 retry: 3678 page = find_lock_page(mapping, idx); 3679 if (!page) { 3680 size = i_size_read(mapping->host) >> huge_page_shift(h); 3681 if (idx >= size) 3682 goto out; 3683 page = alloc_huge_page(vma, address, 0); 3684 if (IS_ERR(page)) { 3685 ret = PTR_ERR(page); 3686 if (ret == -ENOMEM) 3687 ret = VM_FAULT_OOM; 3688 else 3689 ret = VM_FAULT_SIGBUS; 3690 goto out; 3691 } 3692 clear_huge_page(page, address, pages_per_huge_page(h)); 3693 __SetPageUptodate(page); 3694 set_page_huge_active(page); 3695 3696 if (vma->vm_flags & VM_MAYSHARE) { 3697 int err = huge_add_to_page_cache(page, mapping, idx); 3698 if (err) { 3699 put_page(page); 3700 if (err == -EEXIST) 3701 goto retry; 3702 goto out; 3703 } 3704 } else { 3705 lock_page(page); 3706 if (unlikely(anon_vma_prepare(vma))) { 3707 ret = VM_FAULT_OOM; 3708 goto backout_unlocked; 3709 } 3710 anon_rmap = 1; 3711 } 3712 } else { 3713 /* 3714 * If memory error occurs between mmap() and fault, some process 3715 * don't have hwpoisoned swap entry for errored virtual address. 3716 * So we need to block hugepage fault by PG_hwpoison bit check. 3717 */ 3718 if (unlikely(PageHWPoison(page))) { 3719 ret = VM_FAULT_HWPOISON | 3720 VM_FAULT_SET_HINDEX(hstate_index(h)); 3721 goto backout_unlocked; 3722 } 3723 } 3724 3725 /* 3726 * If we are going to COW a private mapping later, we examine the 3727 * pending reservations for this page now. This will ensure that 3728 * any allocations necessary to record that reservation occur outside 3729 * the spinlock. 3730 */ 3731 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 3732 if (vma_needs_reservation(h, vma, address) < 0) { 3733 ret = VM_FAULT_OOM; 3734 goto backout_unlocked; 3735 } 3736 /* Just decrements count, does not deallocate */ 3737 vma_end_reservation(h, vma, address); 3738 } 3739 3740 ptl = huge_pte_lock(h, mm, ptep); 3741 size = i_size_read(mapping->host) >> huge_page_shift(h); 3742 if (idx >= size) 3743 goto backout; 3744 3745 ret = 0; 3746 if (!huge_pte_none(huge_ptep_get(ptep))) 3747 goto backout; 3748 3749 if (anon_rmap) { 3750 ClearPagePrivate(page); 3751 hugepage_add_new_anon_rmap(page, vma, address); 3752 } else 3753 page_dup_rmap(page, true); 3754 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 3755 && (vma->vm_flags & VM_SHARED))); 3756 set_huge_pte_at(mm, address, ptep, new_pte); 3757 3758 hugetlb_count_add(pages_per_huge_page(h), mm); 3759 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 3760 /* Optimization, do the COW without a second fault */ 3761 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); 3762 } 3763 3764 spin_unlock(ptl); 3765 unlock_page(page); 3766 out: 3767 return ret; 3768 3769 backout: 3770 spin_unlock(ptl); 3771 backout_unlocked: 3772 unlock_page(page); 3773 restore_reserve_on_error(h, vma, address, page); 3774 put_page(page); 3775 goto out; 3776 } 3777 3778 #ifdef CONFIG_SMP 3779 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, 3780 struct vm_area_struct *vma, 3781 struct address_space *mapping, 3782 pgoff_t idx, unsigned long address) 3783 { 3784 unsigned long key[2]; 3785 u32 hash; 3786 3787 if (vma->vm_flags & VM_SHARED) { 3788 key[0] = (unsigned long) mapping; 3789 key[1] = idx; 3790 } else { 3791 key[0] = (unsigned long) mm; 3792 key[1] = address >> huge_page_shift(h); 3793 } 3794 3795 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); 3796 3797 return hash & (num_fault_mutexes - 1); 3798 } 3799 #else 3800 /* 3801 * For uniprocesor systems we always use a single mutex, so just 3802 * return 0 and avoid the hashing overhead. 3803 */ 3804 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, 3805 struct vm_area_struct *vma, 3806 struct address_space *mapping, 3807 pgoff_t idx, unsigned long address) 3808 { 3809 return 0; 3810 } 3811 #endif 3812 3813 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 3814 unsigned long address, unsigned int flags) 3815 { 3816 pte_t *ptep, entry; 3817 spinlock_t *ptl; 3818 int ret; 3819 u32 hash; 3820 pgoff_t idx; 3821 struct page *page = NULL; 3822 struct page *pagecache_page = NULL; 3823 struct hstate *h = hstate_vma(vma); 3824 struct address_space *mapping; 3825 int need_wait_lock = 0; 3826 3827 address &= huge_page_mask(h); 3828 3829 ptep = huge_pte_offset(mm, address); 3830 if (ptep) { 3831 entry = huge_ptep_get(ptep); 3832 if (unlikely(is_hugetlb_entry_migration(entry))) { 3833 migration_entry_wait_huge(vma, mm, ptep); 3834 return 0; 3835 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 3836 return VM_FAULT_HWPOISON_LARGE | 3837 VM_FAULT_SET_HINDEX(hstate_index(h)); 3838 } else { 3839 ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 3840 if (!ptep) 3841 return VM_FAULT_OOM; 3842 } 3843 3844 mapping = vma->vm_file->f_mapping; 3845 idx = vma_hugecache_offset(h, vma, address); 3846 3847 /* 3848 * Serialize hugepage allocation and instantiation, so that we don't 3849 * get spurious allocation failures if two CPUs race to instantiate 3850 * the same page in the page cache. 3851 */ 3852 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); 3853 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3854 3855 entry = huge_ptep_get(ptep); 3856 if (huge_pte_none(entry)) { 3857 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); 3858 goto out_mutex; 3859 } 3860 3861 ret = 0; 3862 3863 /* 3864 * entry could be a migration/hwpoison entry at this point, so this 3865 * check prevents the kernel from going below assuming that we have 3866 * a active hugepage in pagecache. This goto expects the 2nd page fault, 3867 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly 3868 * handle it. 3869 */ 3870 if (!pte_present(entry)) 3871 goto out_mutex; 3872 3873 /* 3874 * If we are going to COW the mapping later, we examine the pending 3875 * reservations for this page now. This will ensure that any 3876 * allocations necessary to record that reservation occur outside the 3877 * spinlock. For private mappings, we also lookup the pagecache 3878 * page now as it is used to determine if a reservation has been 3879 * consumed. 3880 */ 3881 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 3882 if (vma_needs_reservation(h, vma, address) < 0) { 3883 ret = VM_FAULT_OOM; 3884 goto out_mutex; 3885 } 3886 /* Just decrements count, does not deallocate */ 3887 vma_end_reservation(h, vma, address); 3888 3889 if (!(vma->vm_flags & VM_MAYSHARE)) 3890 pagecache_page = hugetlbfs_pagecache_page(h, 3891 vma, address); 3892 } 3893 3894 ptl = huge_pte_lock(h, mm, ptep); 3895 3896 /* Check for a racing update before calling hugetlb_cow */ 3897 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 3898 goto out_ptl; 3899 3900 /* 3901 * hugetlb_cow() requires page locks of pte_page(entry) and 3902 * pagecache_page, so here we need take the former one 3903 * when page != pagecache_page or !pagecache_page. 3904 */ 3905 page = pte_page(entry); 3906 if (page != pagecache_page) 3907 if (!trylock_page(page)) { 3908 need_wait_lock = 1; 3909 goto out_ptl; 3910 } 3911 3912 get_page(page); 3913 3914 if (flags & FAULT_FLAG_WRITE) { 3915 if (!huge_pte_write(entry)) { 3916 ret = hugetlb_cow(mm, vma, address, ptep, 3917 pagecache_page, ptl); 3918 goto out_put_page; 3919 } 3920 entry = huge_pte_mkdirty(entry); 3921 } 3922 entry = pte_mkyoung(entry); 3923 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 3924 flags & FAULT_FLAG_WRITE)) 3925 update_mmu_cache(vma, address, ptep); 3926 out_put_page: 3927 if (page != pagecache_page) 3928 unlock_page(page); 3929 put_page(page); 3930 out_ptl: 3931 spin_unlock(ptl); 3932 3933 if (pagecache_page) { 3934 unlock_page(pagecache_page); 3935 put_page(pagecache_page); 3936 } 3937 out_mutex: 3938 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 3939 /* 3940 * Generally it's safe to hold refcount during waiting page lock. But 3941 * here we just wait to defer the next page fault to avoid busy loop and 3942 * the page is not used after unlocked before returning from the current 3943 * page fault. So we are safe from accessing freed page, even if we wait 3944 * here without taking refcount. 3945 */ 3946 if (need_wait_lock) 3947 wait_on_page_locked(page); 3948 return ret; 3949 } 3950 3951 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 3952 struct page **pages, struct vm_area_struct **vmas, 3953 unsigned long *position, unsigned long *nr_pages, 3954 long i, unsigned int flags) 3955 { 3956 unsigned long pfn_offset; 3957 unsigned long vaddr = *position; 3958 unsigned long remainder = *nr_pages; 3959 struct hstate *h = hstate_vma(vma); 3960 3961 while (vaddr < vma->vm_end && remainder) { 3962 pte_t *pte; 3963 spinlock_t *ptl = NULL; 3964 int absent; 3965 struct page *page; 3966 3967 /* 3968 * If we have a pending SIGKILL, don't keep faulting pages and 3969 * potentially allocating memory. 3970 */ 3971 if (unlikely(fatal_signal_pending(current))) { 3972 remainder = 0; 3973 break; 3974 } 3975 3976 /* 3977 * Some archs (sparc64, sh*) have multiple pte_ts to 3978 * each hugepage. We have to make sure we get the 3979 * first, for the page indexing below to work. 3980 * 3981 * Note that page table lock is not held when pte is null. 3982 */ 3983 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 3984 if (pte) 3985 ptl = huge_pte_lock(h, mm, pte); 3986 absent = !pte || huge_pte_none(huge_ptep_get(pte)); 3987 3988 /* 3989 * When coredumping, it suits get_dump_page if we just return 3990 * an error where there's an empty slot with no huge pagecache 3991 * to back it. This way, we avoid allocating a hugepage, and 3992 * the sparse dumpfile avoids allocating disk blocks, but its 3993 * huge holes still show up with zeroes where they need to be. 3994 */ 3995 if (absent && (flags & FOLL_DUMP) && 3996 !hugetlbfs_pagecache_present(h, vma, vaddr)) { 3997 if (pte) 3998 spin_unlock(ptl); 3999 remainder = 0; 4000 break; 4001 } 4002 4003 /* 4004 * We need call hugetlb_fault for both hugepages under migration 4005 * (in which case hugetlb_fault waits for the migration,) and 4006 * hwpoisoned hugepages (in which case we need to prevent the 4007 * caller from accessing to them.) In order to do this, we use 4008 * here is_swap_pte instead of is_hugetlb_entry_migration and 4009 * is_hugetlb_entry_hwpoisoned. This is because it simply covers 4010 * both cases, and because we can't follow correct pages 4011 * directly from any kind of swap entries. 4012 */ 4013 if (absent || is_swap_pte(huge_ptep_get(pte)) || 4014 ((flags & FOLL_WRITE) && 4015 !huge_pte_write(huge_ptep_get(pte)))) { 4016 int ret; 4017 4018 if (pte) 4019 spin_unlock(ptl); 4020 ret = hugetlb_fault(mm, vma, vaddr, 4021 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); 4022 if (!(ret & VM_FAULT_ERROR)) 4023 continue; 4024 4025 remainder = 0; 4026 break; 4027 } 4028 4029 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 4030 page = pte_page(huge_ptep_get(pte)); 4031 same_page: 4032 if (pages) { 4033 pages[i] = mem_map_offset(page, pfn_offset); 4034 get_page(pages[i]); 4035 } 4036 4037 if (vmas) 4038 vmas[i] = vma; 4039 4040 vaddr += PAGE_SIZE; 4041 ++pfn_offset; 4042 --remainder; 4043 ++i; 4044 if (vaddr < vma->vm_end && remainder && 4045 pfn_offset < pages_per_huge_page(h)) { 4046 /* 4047 * We use pfn_offset to avoid touching the pageframes 4048 * of this compound page. 4049 */ 4050 goto same_page; 4051 } 4052 spin_unlock(ptl); 4053 } 4054 *nr_pages = remainder; 4055 *position = vaddr; 4056 4057 return i ? i : -EFAULT; 4058 } 4059 4060 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 4061 /* 4062 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can 4063 * implement this. 4064 */ 4065 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 4066 #endif 4067 4068 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 4069 unsigned long address, unsigned long end, pgprot_t newprot) 4070 { 4071 struct mm_struct *mm = vma->vm_mm; 4072 unsigned long start = address; 4073 pte_t *ptep; 4074 pte_t pte; 4075 struct hstate *h = hstate_vma(vma); 4076 unsigned long pages = 0; 4077 4078 BUG_ON(address >= end); 4079 flush_cache_range(vma, address, end); 4080 4081 mmu_notifier_invalidate_range_start(mm, start, end); 4082 i_mmap_lock_write(vma->vm_file->f_mapping); 4083 for (; address < end; address += huge_page_size(h)) { 4084 spinlock_t *ptl; 4085 ptep = huge_pte_offset(mm, address); 4086 if (!ptep) 4087 continue; 4088 ptl = huge_pte_lock(h, mm, ptep); 4089 if (huge_pmd_unshare(mm, &address, ptep)) { 4090 pages++; 4091 spin_unlock(ptl); 4092 continue; 4093 } 4094 pte = huge_ptep_get(ptep); 4095 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 4096 spin_unlock(ptl); 4097 continue; 4098 } 4099 if (unlikely(is_hugetlb_entry_migration(pte))) { 4100 swp_entry_t entry = pte_to_swp_entry(pte); 4101 4102 if (is_write_migration_entry(entry)) { 4103 pte_t newpte; 4104 4105 make_migration_entry_read(&entry); 4106 newpte = swp_entry_to_pte(entry); 4107 set_huge_pte_at(mm, address, ptep, newpte); 4108 pages++; 4109 } 4110 spin_unlock(ptl); 4111 continue; 4112 } 4113 if (!huge_pte_none(pte)) { 4114 pte = huge_ptep_get_and_clear(mm, address, ptep); 4115 pte = pte_mkhuge(huge_pte_modify(pte, newprot)); 4116 pte = arch_make_huge_pte(pte, vma, NULL, 0); 4117 set_huge_pte_at(mm, address, ptep, pte); 4118 pages++; 4119 } 4120 spin_unlock(ptl); 4121 } 4122 /* 4123 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 4124 * may have cleared our pud entry and done put_page on the page table: 4125 * once we release i_mmap_rwsem, another task can do the final put_page 4126 * and that page table be reused and filled with junk. 4127 */ 4128 flush_hugetlb_tlb_range(vma, start, end); 4129 mmu_notifier_invalidate_range(mm, start, end); 4130 i_mmap_unlock_write(vma->vm_file->f_mapping); 4131 mmu_notifier_invalidate_range_end(mm, start, end); 4132 4133 return pages << h->order; 4134 } 4135 4136 int hugetlb_reserve_pages(struct inode *inode, 4137 long from, long to, 4138 struct vm_area_struct *vma, 4139 vm_flags_t vm_flags) 4140 { 4141 long ret, chg; 4142 struct hstate *h = hstate_inode(inode); 4143 struct hugepage_subpool *spool = subpool_inode(inode); 4144 struct resv_map *resv_map; 4145 long gbl_reserve; 4146 4147 /* 4148 * Only apply hugepage reservation if asked. At fault time, an 4149 * attempt will be made for VM_NORESERVE to allocate a page 4150 * without using reserves 4151 */ 4152 if (vm_flags & VM_NORESERVE) 4153 return 0; 4154 4155 /* 4156 * Shared mappings base their reservation on the number of pages that 4157 * are already allocated on behalf of the file. Private mappings need 4158 * to reserve the full area even if read-only as mprotect() may be 4159 * called to make the mapping read-write. Assume !vma is a shm mapping 4160 */ 4161 if (!vma || vma->vm_flags & VM_MAYSHARE) { 4162 resv_map = inode_resv_map(inode); 4163 4164 chg = region_chg(resv_map, from, to); 4165 4166 } else { 4167 resv_map = resv_map_alloc(); 4168 if (!resv_map) 4169 return -ENOMEM; 4170 4171 chg = to - from; 4172 4173 set_vma_resv_map(vma, resv_map); 4174 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 4175 } 4176 4177 if (chg < 0) { 4178 ret = chg; 4179 goto out_err; 4180 } 4181 4182 /* 4183 * There must be enough pages in the subpool for the mapping. If 4184 * the subpool has a minimum size, there may be some global 4185 * reservations already in place (gbl_reserve). 4186 */ 4187 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 4188 if (gbl_reserve < 0) { 4189 ret = -ENOSPC; 4190 goto out_err; 4191 } 4192 4193 /* 4194 * Check enough hugepages are available for the reservation. 4195 * Hand the pages back to the subpool if there are not 4196 */ 4197 ret = hugetlb_acct_memory(h, gbl_reserve); 4198 if (ret < 0) { 4199 /* put back original number of pages, chg */ 4200 (void)hugepage_subpool_put_pages(spool, chg); 4201 goto out_err; 4202 } 4203 4204 /* 4205 * Account for the reservations made. Shared mappings record regions 4206 * that have reservations as they are shared by multiple VMAs. 4207 * When the last VMA disappears, the region map says how much 4208 * the reservation was and the page cache tells how much of 4209 * the reservation was consumed. Private mappings are per-VMA and 4210 * only the consumed reservations are tracked. When the VMA 4211 * disappears, the original reservation is the VMA size and the 4212 * consumed reservations are stored in the map. Hence, nothing 4213 * else has to be done for private mappings here 4214 */ 4215 if (!vma || vma->vm_flags & VM_MAYSHARE) { 4216 long add = region_add(resv_map, from, to); 4217 4218 if (unlikely(chg > add)) { 4219 /* 4220 * pages in this range were added to the reserve 4221 * map between region_chg and region_add. This 4222 * indicates a race with alloc_huge_page. Adjust 4223 * the subpool and reserve counts modified above 4224 * based on the difference. 4225 */ 4226 long rsv_adjust; 4227 4228 rsv_adjust = hugepage_subpool_put_pages(spool, 4229 chg - add); 4230 hugetlb_acct_memory(h, -rsv_adjust); 4231 } 4232 } 4233 return 0; 4234 out_err: 4235 if (!vma || vma->vm_flags & VM_MAYSHARE) 4236 region_abort(resv_map, from, to); 4237 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4238 kref_put(&resv_map->refs, resv_map_release); 4239 return ret; 4240 } 4241 4242 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 4243 long freed) 4244 { 4245 struct hstate *h = hstate_inode(inode); 4246 struct resv_map *resv_map = inode_resv_map(inode); 4247 long chg = 0; 4248 struct hugepage_subpool *spool = subpool_inode(inode); 4249 long gbl_reserve; 4250 4251 if (resv_map) { 4252 chg = region_del(resv_map, start, end); 4253 /* 4254 * region_del() can fail in the rare case where a region 4255 * must be split and another region descriptor can not be 4256 * allocated. If end == LONG_MAX, it will not fail. 4257 */ 4258 if (chg < 0) 4259 return chg; 4260 } 4261 4262 spin_lock(&inode->i_lock); 4263 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 4264 spin_unlock(&inode->i_lock); 4265 4266 /* 4267 * If the subpool has a minimum size, the number of global 4268 * reservations to be released may be adjusted. 4269 */ 4270 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 4271 hugetlb_acct_memory(h, -gbl_reserve); 4272 4273 return 0; 4274 } 4275 4276 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 4277 static unsigned long page_table_shareable(struct vm_area_struct *svma, 4278 struct vm_area_struct *vma, 4279 unsigned long addr, pgoff_t idx) 4280 { 4281 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 4282 svma->vm_start; 4283 unsigned long sbase = saddr & PUD_MASK; 4284 unsigned long s_end = sbase + PUD_SIZE; 4285 4286 /* Allow segments to share if only one is marked locked */ 4287 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; 4288 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; 4289 4290 /* 4291 * match the virtual addresses, permission and the alignment of the 4292 * page table page. 4293 */ 4294 if (pmd_index(addr) != pmd_index(saddr) || 4295 vm_flags != svm_flags || 4296 sbase < svma->vm_start || svma->vm_end < s_end) 4297 return 0; 4298 4299 return saddr; 4300 } 4301 4302 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) 4303 { 4304 unsigned long base = addr & PUD_MASK; 4305 unsigned long end = base + PUD_SIZE; 4306 4307 /* 4308 * check on proper vm_flags and page table alignment 4309 */ 4310 if (vma->vm_flags & VM_MAYSHARE && 4311 vma->vm_start <= base && end <= vma->vm_end) 4312 return true; 4313 return false; 4314 } 4315 4316 /* 4317 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 4318 * and returns the corresponding pte. While this is not necessary for the 4319 * !shared pmd case because we can allocate the pmd later as well, it makes the 4320 * code much cleaner. pmd allocation is essential for the shared case because 4321 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 4322 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 4323 * bad pmd for sharing. 4324 */ 4325 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 4326 { 4327 struct vm_area_struct *vma = find_vma(mm, addr); 4328 struct address_space *mapping = vma->vm_file->f_mapping; 4329 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 4330 vma->vm_pgoff; 4331 struct vm_area_struct *svma; 4332 unsigned long saddr; 4333 pte_t *spte = NULL; 4334 pte_t *pte; 4335 spinlock_t *ptl; 4336 4337 if (!vma_shareable(vma, addr)) 4338 return (pte_t *)pmd_alloc(mm, pud, addr); 4339 4340 i_mmap_lock_write(mapping); 4341 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 4342 if (svma == vma) 4343 continue; 4344 4345 saddr = page_table_shareable(svma, vma, addr, idx); 4346 if (saddr) { 4347 spte = huge_pte_offset(svma->vm_mm, saddr); 4348 if (spte) { 4349 get_page(virt_to_page(spte)); 4350 break; 4351 } 4352 } 4353 } 4354 4355 if (!spte) 4356 goto out; 4357 4358 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); 4359 if (pud_none(*pud)) { 4360 pud_populate(mm, pud, 4361 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 4362 mm_inc_nr_pmds(mm); 4363 } else { 4364 put_page(virt_to_page(spte)); 4365 } 4366 spin_unlock(ptl); 4367 out: 4368 pte = (pte_t *)pmd_alloc(mm, pud, addr); 4369 i_mmap_unlock_write(mapping); 4370 return pte; 4371 } 4372 4373 /* 4374 * unmap huge page backed by shared pte. 4375 * 4376 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 4377 * indicated by page_count > 1, unmap is achieved by clearing pud and 4378 * decrementing the ref count. If count == 1, the pte page is not shared. 4379 * 4380 * called with page table lock held. 4381 * 4382 * returns: 1 successfully unmapped a shared pte page 4383 * 0 the underlying pte page is not shared, or it is the last user 4384 */ 4385 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 4386 { 4387 pgd_t *pgd = pgd_offset(mm, *addr); 4388 pud_t *pud = pud_offset(pgd, *addr); 4389 4390 BUG_ON(page_count(virt_to_page(ptep)) == 0); 4391 if (page_count(virt_to_page(ptep)) == 1) 4392 return 0; 4393 4394 pud_clear(pud); 4395 put_page(virt_to_page(ptep)); 4396 mm_dec_nr_pmds(mm); 4397 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 4398 return 1; 4399 } 4400 #define want_pmd_share() (1) 4401 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 4402 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 4403 { 4404 return NULL; 4405 } 4406 4407 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 4408 { 4409 return 0; 4410 } 4411 #define want_pmd_share() (0) 4412 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 4413 4414 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 4415 pte_t *huge_pte_alloc(struct mm_struct *mm, 4416 unsigned long addr, unsigned long sz) 4417 { 4418 pgd_t *pgd; 4419 pud_t *pud; 4420 pte_t *pte = NULL; 4421 4422 pgd = pgd_offset(mm, addr); 4423 pud = pud_alloc(mm, pgd, addr); 4424 if (pud) { 4425 if (sz == PUD_SIZE) { 4426 pte = (pte_t *)pud; 4427 } else { 4428 BUG_ON(sz != PMD_SIZE); 4429 if (want_pmd_share() && pud_none(*pud)) 4430 pte = huge_pmd_share(mm, addr, pud); 4431 else 4432 pte = (pte_t *)pmd_alloc(mm, pud, addr); 4433 } 4434 } 4435 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); 4436 4437 return pte; 4438 } 4439 4440 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 4441 { 4442 pgd_t *pgd; 4443 pud_t *pud; 4444 pmd_t *pmd = NULL; 4445 4446 pgd = pgd_offset(mm, addr); 4447 if (pgd_present(*pgd)) { 4448 pud = pud_offset(pgd, addr); 4449 if (pud_present(*pud)) { 4450 if (pud_huge(*pud)) 4451 return (pte_t *)pud; 4452 pmd = pmd_offset(pud, addr); 4453 } 4454 } 4455 return (pte_t *) pmd; 4456 } 4457 4458 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 4459 4460 /* 4461 * These functions are overwritable if your architecture needs its own 4462 * behavior. 4463 */ 4464 struct page * __weak 4465 follow_huge_addr(struct mm_struct *mm, unsigned long address, 4466 int write) 4467 { 4468 return ERR_PTR(-EINVAL); 4469 } 4470 4471 struct page * __weak 4472 follow_huge_pmd(struct mm_struct *mm, unsigned long address, 4473 pmd_t *pmd, int flags) 4474 { 4475 struct page *page = NULL; 4476 spinlock_t *ptl; 4477 retry: 4478 ptl = pmd_lockptr(mm, pmd); 4479 spin_lock(ptl); 4480 /* 4481 * make sure that the address range covered by this pmd is not 4482 * unmapped from other threads. 4483 */ 4484 if (!pmd_huge(*pmd)) 4485 goto out; 4486 if (pmd_present(*pmd)) { 4487 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 4488 if (flags & FOLL_GET) 4489 get_page(page); 4490 } else { 4491 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { 4492 spin_unlock(ptl); 4493 __migration_entry_wait(mm, (pte_t *)pmd, ptl); 4494 goto retry; 4495 } 4496 /* 4497 * hwpoisoned entry is treated as no_page_table in 4498 * follow_page_mask(). 4499 */ 4500 } 4501 out: 4502 spin_unlock(ptl); 4503 return page; 4504 } 4505 4506 struct page * __weak 4507 follow_huge_pud(struct mm_struct *mm, unsigned long address, 4508 pud_t *pud, int flags) 4509 { 4510 if (flags & FOLL_GET) 4511 return NULL; 4512 4513 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); 4514 } 4515 4516 #ifdef CONFIG_MEMORY_FAILURE 4517 4518 /* 4519 * This function is called from memory failure code. 4520 */ 4521 int dequeue_hwpoisoned_huge_page(struct page *hpage) 4522 { 4523 struct hstate *h = page_hstate(hpage); 4524 int nid = page_to_nid(hpage); 4525 int ret = -EBUSY; 4526 4527 spin_lock(&hugetlb_lock); 4528 /* 4529 * Just checking !page_huge_active is not enough, because that could be 4530 * an isolated/hwpoisoned hugepage (which have >0 refcount). 4531 */ 4532 if (!page_huge_active(hpage) && !page_count(hpage)) { 4533 /* 4534 * Hwpoisoned hugepage isn't linked to activelist or freelist, 4535 * but dangling hpage->lru can trigger list-debug warnings 4536 * (this happens when we call unpoison_memory() on it), 4537 * so let it point to itself with list_del_init(). 4538 */ 4539 list_del_init(&hpage->lru); 4540 set_page_refcounted(hpage); 4541 h->free_huge_pages--; 4542 h->free_huge_pages_node[nid]--; 4543 ret = 0; 4544 } 4545 spin_unlock(&hugetlb_lock); 4546 return ret; 4547 } 4548 #endif 4549 4550 bool isolate_huge_page(struct page *page, struct list_head *list) 4551 { 4552 bool ret = true; 4553 4554 VM_BUG_ON_PAGE(!PageHead(page), page); 4555 spin_lock(&hugetlb_lock); 4556 if (!page_huge_active(page) || !get_page_unless_zero(page)) { 4557 ret = false; 4558 goto unlock; 4559 } 4560 clear_page_huge_active(page); 4561 list_move_tail(&page->lru, list); 4562 unlock: 4563 spin_unlock(&hugetlb_lock); 4564 return ret; 4565 } 4566 4567 void putback_active_hugepage(struct page *page) 4568 { 4569 VM_BUG_ON_PAGE(!PageHead(page), page); 4570 spin_lock(&hugetlb_lock); 4571 set_page_huge_active(page); 4572 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 4573 spin_unlock(&hugetlb_lock); 4574 put_page(page); 4575 } 4576