1 /* 2 * Generic hugetlb support. 3 * (C) Nadia Yvette Chambers, April 2004 4 */ 5 #include <linux/list.h> 6 #include <linux/init.h> 7 #include <linux/mm.h> 8 #include <linux/seq_file.h> 9 #include <linux/sysctl.h> 10 #include <linux/highmem.h> 11 #include <linux/mmu_notifier.h> 12 #include <linux/nodemask.h> 13 #include <linux/pagemap.h> 14 #include <linux/mempolicy.h> 15 #include <linux/compiler.h> 16 #include <linux/cpuset.h> 17 #include <linux/mutex.h> 18 #include <linux/memblock.h> 19 #include <linux/sysfs.h> 20 #include <linux/slab.h> 21 #include <linux/mmdebug.h> 22 #include <linux/sched/signal.h> 23 #include <linux/rmap.h> 24 #include <linux/string_helpers.h> 25 #include <linux/swap.h> 26 #include <linux/swapops.h> 27 #include <linux/jhash.h> 28 29 #include <asm/page.h> 30 #include <asm/pgtable.h> 31 #include <asm/tlb.h> 32 33 #include <linux/io.h> 34 #include <linux/hugetlb.h> 35 #include <linux/hugetlb_cgroup.h> 36 #include <linux/node.h> 37 #include <linux/userfaultfd_k.h> 38 #include <linux/page_owner.h> 39 #include "internal.h" 40 41 int hugetlb_max_hstate __read_mostly; 42 unsigned int default_hstate_idx; 43 struct hstate hstates[HUGE_MAX_HSTATE]; 44 /* 45 * Minimum page order among possible hugepage sizes, set to a proper value 46 * at boot time. 47 */ 48 static unsigned int minimum_order __read_mostly = UINT_MAX; 49 50 __initdata LIST_HEAD(huge_boot_pages); 51 52 /* for command line parsing */ 53 static struct hstate * __initdata parsed_hstate; 54 static unsigned long __initdata default_hstate_max_huge_pages; 55 static unsigned long __initdata default_hstate_size; 56 static bool __initdata parsed_valid_hugepagesz = true; 57 58 /* 59 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 60 * free_huge_pages, and surplus_huge_pages. 61 */ 62 DEFINE_SPINLOCK(hugetlb_lock); 63 64 /* 65 * Serializes faults on the same logical page. This is used to 66 * prevent spurious OOMs when the hugepage pool is fully utilized. 67 */ 68 static int num_fault_mutexes; 69 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; 70 71 /* Forward declaration */ 72 static int hugetlb_acct_memory(struct hstate *h, long delta); 73 74 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) 75 { 76 bool free = (spool->count == 0) && (spool->used_hpages == 0); 77 78 spin_unlock(&spool->lock); 79 80 /* If no pages are used, and no other handles to the subpool 81 * remain, give up any reservations mased on minimum size and 82 * free the subpool */ 83 if (free) { 84 if (spool->min_hpages != -1) 85 hugetlb_acct_memory(spool->hstate, 86 -spool->min_hpages); 87 kfree(spool); 88 } 89 } 90 91 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 92 long min_hpages) 93 { 94 struct hugepage_subpool *spool; 95 96 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 97 if (!spool) 98 return NULL; 99 100 spin_lock_init(&spool->lock); 101 spool->count = 1; 102 spool->max_hpages = max_hpages; 103 spool->hstate = h; 104 spool->min_hpages = min_hpages; 105 106 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 107 kfree(spool); 108 return NULL; 109 } 110 spool->rsv_hpages = min_hpages; 111 112 return spool; 113 } 114 115 void hugepage_put_subpool(struct hugepage_subpool *spool) 116 { 117 spin_lock(&spool->lock); 118 BUG_ON(!spool->count); 119 spool->count--; 120 unlock_or_release_subpool(spool); 121 } 122 123 /* 124 * Subpool accounting for allocating and reserving pages. 125 * Return -ENOMEM if there are not enough resources to satisfy the 126 * the request. Otherwise, return the number of pages by which the 127 * global pools must be adjusted (upward). The returned value may 128 * only be different than the passed value (delta) in the case where 129 * a subpool minimum size must be manitained. 130 */ 131 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 132 long delta) 133 { 134 long ret = delta; 135 136 if (!spool) 137 return ret; 138 139 spin_lock(&spool->lock); 140 141 if (spool->max_hpages != -1) { /* maximum size accounting */ 142 if ((spool->used_hpages + delta) <= spool->max_hpages) 143 spool->used_hpages += delta; 144 else { 145 ret = -ENOMEM; 146 goto unlock_ret; 147 } 148 } 149 150 /* minimum size accounting */ 151 if (spool->min_hpages != -1 && spool->rsv_hpages) { 152 if (delta > spool->rsv_hpages) { 153 /* 154 * Asking for more reserves than those already taken on 155 * behalf of subpool. Return difference. 156 */ 157 ret = delta - spool->rsv_hpages; 158 spool->rsv_hpages = 0; 159 } else { 160 ret = 0; /* reserves already accounted for */ 161 spool->rsv_hpages -= delta; 162 } 163 } 164 165 unlock_ret: 166 spin_unlock(&spool->lock); 167 return ret; 168 } 169 170 /* 171 * Subpool accounting for freeing and unreserving pages. 172 * Return the number of global page reservations that must be dropped. 173 * The return value may only be different than the passed value (delta) 174 * in the case where a subpool minimum size must be maintained. 175 */ 176 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 177 long delta) 178 { 179 long ret = delta; 180 181 if (!spool) 182 return delta; 183 184 spin_lock(&spool->lock); 185 186 if (spool->max_hpages != -1) /* maximum size accounting */ 187 spool->used_hpages -= delta; 188 189 /* minimum size accounting */ 190 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 191 if (spool->rsv_hpages + delta <= spool->min_hpages) 192 ret = 0; 193 else 194 ret = spool->rsv_hpages + delta - spool->min_hpages; 195 196 spool->rsv_hpages += delta; 197 if (spool->rsv_hpages > spool->min_hpages) 198 spool->rsv_hpages = spool->min_hpages; 199 } 200 201 /* 202 * If hugetlbfs_put_super couldn't free spool due to an outstanding 203 * quota reference, free it now. 204 */ 205 unlock_or_release_subpool(spool); 206 207 return ret; 208 } 209 210 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 211 { 212 return HUGETLBFS_SB(inode->i_sb)->spool; 213 } 214 215 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 216 { 217 return subpool_inode(file_inode(vma->vm_file)); 218 } 219 220 /* 221 * Region tracking -- allows tracking of reservations and instantiated pages 222 * across the pages in a mapping. 223 * 224 * The region data structures are embedded into a resv_map and protected 225 * by a resv_map's lock. The set of regions within the resv_map represent 226 * reservations for huge pages, or huge pages that have already been 227 * instantiated within the map. The from and to elements are huge page 228 * indicies into the associated mapping. from indicates the starting index 229 * of the region. to represents the first index past the end of the region. 230 * 231 * For example, a file region structure with from == 0 and to == 4 represents 232 * four huge pages in a mapping. It is important to note that the to element 233 * represents the first element past the end of the region. This is used in 234 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 235 * 236 * Interval notation of the form [from, to) will be used to indicate that 237 * the endpoint from is inclusive and to is exclusive. 238 */ 239 struct file_region { 240 struct list_head link; 241 long from; 242 long to; 243 }; 244 245 /* 246 * Add the huge page range represented by [f, t) to the reserve 247 * map. In the normal case, existing regions will be expanded 248 * to accommodate the specified range. Sufficient regions should 249 * exist for expansion due to the previous call to region_chg 250 * with the same range. However, it is possible that region_del 251 * could have been called after region_chg and modifed the map 252 * in such a way that no region exists to be expanded. In this 253 * case, pull a region descriptor from the cache associated with 254 * the map and use that for the new range. 255 * 256 * Return the number of new huge pages added to the map. This 257 * number is greater than or equal to zero. 258 */ 259 static long region_add(struct resv_map *resv, long f, long t) 260 { 261 struct list_head *head = &resv->regions; 262 struct file_region *rg, *nrg, *trg; 263 long add = 0; 264 265 spin_lock(&resv->lock); 266 /* Locate the region we are either in or before. */ 267 list_for_each_entry(rg, head, link) 268 if (f <= rg->to) 269 break; 270 271 /* 272 * If no region exists which can be expanded to include the 273 * specified range, the list must have been modified by an 274 * interleving call to region_del(). Pull a region descriptor 275 * from the cache and use it for this range. 276 */ 277 if (&rg->link == head || t < rg->from) { 278 VM_BUG_ON(resv->region_cache_count <= 0); 279 280 resv->region_cache_count--; 281 nrg = list_first_entry(&resv->region_cache, struct file_region, 282 link); 283 list_del(&nrg->link); 284 285 nrg->from = f; 286 nrg->to = t; 287 list_add(&nrg->link, rg->link.prev); 288 289 add += t - f; 290 goto out_locked; 291 } 292 293 /* Round our left edge to the current segment if it encloses us. */ 294 if (f > rg->from) 295 f = rg->from; 296 297 /* Check for and consume any regions we now overlap with. */ 298 nrg = rg; 299 list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 300 if (&rg->link == head) 301 break; 302 if (rg->from > t) 303 break; 304 305 /* If this area reaches higher then extend our area to 306 * include it completely. If this is not the first area 307 * which we intend to reuse, free it. */ 308 if (rg->to > t) 309 t = rg->to; 310 if (rg != nrg) { 311 /* Decrement return value by the deleted range. 312 * Another range will span this area so that by 313 * end of routine add will be >= zero 314 */ 315 add -= (rg->to - rg->from); 316 list_del(&rg->link); 317 kfree(rg); 318 } 319 } 320 321 add += (nrg->from - f); /* Added to beginning of region */ 322 nrg->from = f; 323 add += t - nrg->to; /* Added to end of region */ 324 nrg->to = t; 325 326 out_locked: 327 resv->adds_in_progress--; 328 spin_unlock(&resv->lock); 329 VM_BUG_ON(add < 0); 330 return add; 331 } 332 333 /* 334 * Examine the existing reserve map and determine how many 335 * huge pages in the specified range [f, t) are NOT currently 336 * represented. This routine is called before a subsequent 337 * call to region_add that will actually modify the reserve 338 * map to add the specified range [f, t). region_chg does 339 * not change the number of huge pages represented by the 340 * map. However, if the existing regions in the map can not 341 * be expanded to represent the new range, a new file_region 342 * structure is added to the map as a placeholder. This is 343 * so that the subsequent region_add call will have all the 344 * regions it needs and will not fail. 345 * 346 * Upon entry, region_chg will also examine the cache of region descriptors 347 * associated with the map. If there are not enough descriptors cached, one 348 * will be allocated for the in progress add operation. 349 * 350 * Returns the number of huge pages that need to be added to the existing 351 * reservation map for the range [f, t). This number is greater or equal to 352 * zero. -ENOMEM is returned if a new file_region structure or cache entry 353 * is needed and can not be allocated. 354 */ 355 static long region_chg(struct resv_map *resv, long f, long t) 356 { 357 struct list_head *head = &resv->regions; 358 struct file_region *rg, *nrg = NULL; 359 long chg = 0; 360 361 retry: 362 spin_lock(&resv->lock); 363 retry_locked: 364 resv->adds_in_progress++; 365 366 /* 367 * Check for sufficient descriptors in the cache to accommodate 368 * the number of in progress add operations. 369 */ 370 if (resv->adds_in_progress > resv->region_cache_count) { 371 struct file_region *trg; 372 373 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1); 374 /* Must drop lock to allocate a new descriptor. */ 375 resv->adds_in_progress--; 376 spin_unlock(&resv->lock); 377 378 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 379 if (!trg) { 380 kfree(nrg); 381 return -ENOMEM; 382 } 383 384 spin_lock(&resv->lock); 385 list_add(&trg->link, &resv->region_cache); 386 resv->region_cache_count++; 387 goto retry_locked; 388 } 389 390 /* Locate the region we are before or in. */ 391 list_for_each_entry(rg, head, link) 392 if (f <= rg->to) 393 break; 394 395 /* If we are below the current region then a new region is required. 396 * Subtle, allocate a new region at the position but make it zero 397 * size such that we can guarantee to record the reservation. */ 398 if (&rg->link == head || t < rg->from) { 399 if (!nrg) { 400 resv->adds_in_progress--; 401 spin_unlock(&resv->lock); 402 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 403 if (!nrg) 404 return -ENOMEM; 405 406 nrg->from = f; 407 nrg->to = f; 408 INIT_LIST_HEAD(&nrg->link); 409 goto retry; 410 } 411 412 list_add(&nrg->link, rg->link.prev); 413 chg = t - f; 414 goto out_nrg; 415 } 416 417 /* Round our left edge to the current segment if it encloses us. */ 418 if (f > rg->from) 419 f = rg->from; 420 chg = t - f; 421 422 /* Check for and consume any regions we now overlap with. */ 423 list_for_each_entry(rg, rg->link.prev, link) { 424 if (&rg->link == head) 425 break; 426 if (rg->from > t) 427 goto out; 428 429 /* We overlap with this area, if it extends further than 430 * us then we must extend ourselves. Account for its 431 * existing reservation. */ 432 if (rg->to > t) { 433 chg += rg->to - t; 434 t = rg->to; 435 } 436 chg -= rg->to - rg->from; 437 } 438 439 out: 440 spin_unlock(&resv->lock); 441 /* We already know we raced and no longer need the new region */ 442 kfree(nrg); 443 return chg; 444 out_nrg: 445 spin_unlock(&resv->lock); 446 return chg; 447 } 448 449 /* 450 * Abort the in progress add operation. The adds_in_progress field 451 * of the resv_map keeps track of the operations in progress between 452 * calls to region_chg and region_add. Operations are sometimes 453 * aborted after the call to region_chg. In such cases, region_abort 454 * is called to decrement the adds_in_progress counter. 455 * 456 * NOTE: The range arguments [f, t) are not needed or used in this 457 * routine. They are kept to make reading the calling code easier as 458 * arguments will match the associated region_chg call. 459 */ 460 static void region_abort(struct resv_map *resv, long f, long t) 461 { 462 spin_lock(&resv->lock); 463 VM_BUG_ON(!resv->region_cache_count); 464 resv->adds_in_progress--; 465 spin_unlock(&resv->lock); 466 } 467 468 /* 469 * Delete the specified range [f, t) from the reserve map. If the 470 * t parameter is LONG_MAX, this indicates that ALL regions after f 471 * should be deleted. Locate the regions which intersect [f, t) 472 * and either trim, delete or split the existing regions. 473 * 474 * Returns the number of huge pages deleted from the reserve map. 475 * In the normal case, the return value is zero or more. In the 476 * case where a region must be split, a new region descriptor must 477 * be allocated. If the allocation fails, -ENOMEM will be returned. 478 * NOTE: If the parameter t == LONG_MAX, then we will never split 479 * a region and possibly return -ENOMEM. Callers specifying 480 * t == LONG_MAX do not need to check for -ENOMEM error. 481 */ 482 static long region_del(struct resv_map *resv, long f, long t) 483 { 484 struct list_head *head = &resv->regions; 485 struct file_region *rg, *trg; 486 struct file_region *nrg = NULL; 487 long del = 0; 488 489 retry: 490 spin_lock(&resv->lock); 491 list_for_each_entry_safe(rg, trg, head, link) { 492 /* 493 * Skip regions before the range to be deleted. file_region 494 * ranges are normally of the form [from, to). However, there 495 * may be a "placeholder" entry in the map which is of the form 496 * (from, to) with from == to. Check for placeholder entries 497 * at the beginning of the range to be deleted. 498 */ 499 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 500 continue; 501 502 if (rg->from >= t) 503 break; 504 505 if (f > rg->from && t < rg->to) { /* Must split region */ 506 /* 507 * Check for an entry in the cache before dropping 508 * lock and attempting allocation. 509 */ 510 if (!nrg && 511 resv->region_cache_count > resv->adds_in_progress) { 512 nrg = list_first_entry(&resv->region_cache, 513 struct file_region, 514 link); 515 list_del(&nrg->link); 516 resv->region_cache_count--; 517 } 518 519 if (!nrg) { 520 spin_unlock(&resv->lock); 521 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 522 if (!nrg) 523 return -ENOMEM; 524 goto retry; 525 } 526 527 del += t - f; 528 529 /* New entry for end of split region */ 530 nrg->from = t; 531 nrg->to = rg->to; 532 INIT_LIST_HEAD(&nrg->link); 533 534 /* Original entry is trimmed */ 535 rg->to = f; 536 537 list_add(&nrg->link, &rg->link); 538 nrg = NULL; 539 break; 540 } 541 542 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 543 del += rg->to - rg->from; 544 list_del(&rg->link); 545 kfree(rg); 546 continue; 547 } 548 549 if (f <= rg->from) { /* Trim beginning of region */ 550 del += t - rg->from; 551 rg->from = t; 552 } else { /* Trim end of region */ 553 del += rg->to - f; 554 rg->to = f; 555 } 556 } 557 558 spin_unlock(&resv->lock); 559 kfree(nrg); 560 return del; 561 } 562 563 /* 564 * A rare out of memory error was encountered which prevented removal of 565 * the reserve map region for a page. The huge page itself was free'ed 566 * and removed from the page cache. This routine will adjust the subpool 567 * usage count, and the global reserve count if needed. By incrementing 568 * these counts, the reserve map entry which could not be deleted will 569 * appear as a "reserved" entry instead of simply dangling with incorrect 570 * counts. 571 */ 572 void hugetlb_fix_reserve_counts(struct inode *inode) 573 { 574 struct hugepage_subpool *spool = subpool_inode(inode); 575 long rsv_adjust; 576 577 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 578 if (rsv_adjust) { 579 struct hstate *h = hstate_inode(inode); 580 581 hugetlb_acct_memory(h, 1); 582 } 583 } 584 585 /* 586 * Count and return the number of huge pages in the reserve map 587 * that intersect with the range [f, t). 588 */ 589 static long region_count(struct resv_map *resv, long f, long t) 590 { 591 struct list_head *head = &resv->regions; 592 struct file_region *rg; 593 long chg = 0; 594 595 spin_lock(&resv->lock); 596 /* Locate each segment we overlap with, and count that overlap. */ 597 list_for_each_entry(rg, head, link) { 598 long seg_from; 599 long seg_to; 600 601 if (rg->to <= f) 602 continue; 603 if (rg->from >= t) 604 break; 605 606 seg_from = max(rg->from, f); 607 seg_to = min(rg->to, t); 608 609 chg += seg_to - seg_from; 610 } 611 spin_unlock(&resv->lock); 612 613 return chg; 614 } 615 616 /* 617 * Convert the address within this vma to the page offset within 618 * the mapping, in pagecache page units; huge pages here. 619 */ 620 static pgoff_t vma_hugecache_offset(struct hstate *h, 621 struct vm_area_struct *vma, unsigned long address) 622 { 623 return ((address - vma->vm_start) >> huge_page_shift(h)) + 624 (vma->vm_pgoff >> huge_page_order(h)); 625 } 626 627 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 628 unsigned long address) 629 { 630 return vma_hugecache_offset(hstate_vma(vma), vma, address); 631 } 632 EXPORT_SYMBOL_GPL(linear_hugepage_index); 633 634 /* 635 * Return the size of the pages allocated when backing a VMA. In the majority 636 * cases this will be same size as used by the page table entries. 637 */ 638 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 639 { 640 if (vma->vm_ops && vma->vm_ops->pagesize) 641 return vma->vm_ops->pagesize(vma); 642 return PAGE_SIZE; 643 } 644 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 645 646 /* 647 * Return the page size being used by the MMU to back a VMA. In the majority 648 * of cases, the page size used by the kernel matches the MMU size. On 649 * architectures where it differs, an architecture-specific 'strong' 650 * version of this symbol is required. 651 */ 652 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 653 { 654 return vma_kernel_pagesize(vma); 655 } 656 657 /* 658 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 659 * bits of the reservation map pointer, which are always clear due to 660 * alignment. 661 */ 662 #define HPAGE_RESV_OWNER (1UL << 0) 663 #define HPAGE_RESV_UNMAPPED (1UL << 1) 664 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 665 666 /* 667 * These helpers are used to track how many pages are reserved for 668 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 669 * is guaranteed to have their future faults succeed. 670 * 671 * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 672 * the reserve counters are updated with the hugetlb_lock held. It is safe 673 * to reset the VMA at fork() time as it is not in use yet and there is no 674 * chance of the global counters getting corrupted as a result of the values. 675 * 676 * The private mapping reservation is represented in a subtly different 677 * manner to a shared mapping. A shared mapping has a region map associated 678 * with the underlying file, this region map represents the backing file 679 * pages which have ever had a reservation assigned which this persists even 680 * after the page is instantiated. A private mapping has a region map 681 * associated with the original mmap which is attached to all VMAs which 682 * reference it, this region map represents those offsets which have consumed 683 * reservation ie. where pages have been instantiated. 684 */ 685 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 686 { 687 return (unsigned long)vma->vm_private_data; 688 } 689 690 static void set_vma_private_data(struct vm_area_struct *vma, 691 unsigned long value) 692 { 693 vma->vm_private_data = (void *)value; 694 } 695 696 struct resv_map *resv_map_alloc(void) 697 { 698 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 699 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 700 701 if (!resv_map || !rg) { 702 kfree(resv_map); 703 kfree(rg); 704 return NULL; 705 } 706 707 kref_init(&resv_map->refs); 708 spin_lock_init(&resv_map->lock); 709 INIT_LIST_HEAD(&resv_map->regions); 710 711 resv_map->adds_in_progress = 0; 712 713 INIT_LIST_HEAD(&resv_map->region_cache); 714 list_add(&rg->link, &resv_map->region_cache); 715 resv_map->region_cache_count = 1; 716 717 return resv_map; 718 } 719 720 void resv_map_release(struct kref *ref) 721 { 722 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 723 struct list_head *head = &resv_map->region_cache; 724 struct file_region *rg, *trg; 725 726 /* Clear out any active regions before we release the map. */ 727 region_del(resv_map, 0, LONG_MAX); 728 729 /* ... and any entries left in the cache */ 730 list_for_each_entry_safe(rg, trg, head, link) { 731 list_del(&rg->link); 732 kfree(rg); 733 } 734 735 VM_BUG_ON(resv_map->adds_in_progress); 736 737 kfree(resv_map); 738 } 739 740 static inline struct resv_map *inode_resv_map(struct inode *inode) 741 { 742 return inode->i_mapping->private_data; 743 } 744 745 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 746 { 747 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 748 if (vma->vm_flags & VM_MAYSHARE) { 749 struct address_space *mapping = vma->vm_file->f_mapping; 750 struct inode *inode = mapping->host; 751 752 return inode_resv_map(inode); 753 754 } else { 755 return (struct resv_map *)(get_vma_private_data(vma) & 756 ~HPAGE_RESV_MASK); 757 } 758 } 759 760 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 761 { 762 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 763 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 764 765 set_vma_private_data(vma, (get_vma_private_data(vma) & 766 HPAGE_RESV_MASK) | (unsigned long)map); 767 } 768 769 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 770 { 771 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 772 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 773 774 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 775 } 776 777 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 778 { 779 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 780 781 return (get_vma_private_data(vma) & flag) != 0; 782 } 783 784 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 785 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 786 { 787 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 788 if (!(vma->vm_flags & VM_MAYSHARE)) 789 vma->vm_private_data = (void *)0; 790 } 791 792 /* Returns true if the VMA has associated reserve pages */ 793 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 794 { 795 if (vma->vm_flags & VM_NORESERVE) { 796 /* 797 * This address is already reserved by other process(chg == 0), 798 * so, we should decrement reserved count. Without decrementing, 799 * reserve count remains after releasing inode, because this 800 * allocated page will go into page cache and is regarded as 801 * coming from reserved pool in releasing step. Currently, we 802 * don't have any other solution to deal with this situation 803 * properly, so add work-around here. 804 */ 805 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 806 return true; 807 else 808 return false; 809 } 810 811 /* Shared mappings always use reserves */ 812 if (vma->vm_flags & VM_MAYSHARE) { 813 /* 814 * We know VM_NORESERVE is not set. Therefore, there SHOULD 815 * be a region map for all pages. The only situation where 816 * there is no region map is if a hole was punched via 817 * fallocate. In this case, there really are no reverves to 818 * use. This situation is indicated if chg != 0. 819 */ 820 if (chg) 821 return false; 822 else 823 return true; 824 } 825 826 /* 827 * Only the process that called mmap() has reserves for 828 * private mappings. 829 */ 830 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 831 /* 832 * Like the shared case above, a hole punch or truncate 833 * could have been performed on the private mapping. 834 * Examine the value of chg to determine if reserves 835 * actually exist or were previously consumed. 836 * Very Subtle - The value of chg comes from a previous 837 * call to vma_needs_reserves(). The reserve map for 838 * private mappings has different (opposite) semantics 839 * than that of shared mappings. vma_needs_reserves() 840 * has already taken this difference in semantics into 841 * account. Therefore, the meaning of chg is the same 842 * as in the shared case above. Code could easily be 843 * combined, but keeping it separate draws attention to 844 * subtle differences. 845 */ 846 if (chg) 847 return false; 848 else 849 return true; 850 } 851 852 return false; 853 } 854 855 static void enqueue_huge_page(struct hstate *h, struct page *page) 856 { 857 int nid = page_to_nid(page); 858 list_move(&page->lru, &h->hugepage_freelists[nid]); 859 h->free_huge_pages++; 860 h->free_huge_pages_node[nid]++; 861 } 862 863 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) 864 { 865 struct page *page; 866 867 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) 868 if (!PageHWPoison(page)) 869 break; 870 /* 871 * if 'non-isolated free hugepage' not found on the list, 872 * the allocation fails. 873 */ 874 if (&h->hugepage_freelists[nid] == &page->lru) 875 return NULL; 876 list_move(&page->lru, &h->hugepage_activelist); 877 set_page_refcounted(page); 878 h->free_huge_pages--; 879 h->free_huge_pages_node[nid]--; 880 return page; 881 } 882 883 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, 884 nodemask_t *nmask) 885 { 886 unsigned int cpuset_mems_cookie; 887 struct zonelist *zonelist; 888 struct zone *zone; 889 struct zoneref *z; 890 int node = -1; 891 892 zonelist = node_zonelist(nid, gfp_mask); 893 894 retry_cpuset: 895 cpuset_mems_cookie = read_mems_allowed_begin(); 896 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 897 struct page *page; 898 899 if (!cpuset_zone_allowed(zone, gfp_mask)) 900 continue; 901 /* 902 * no need to ask again on the same node. Pool is node rather than 903 * zone aware 904 */ 905 if (zone_to_nid(zone) == node) 906 continue; 907 node = zone_to_nid(zone); 908 909 page = dequeue_huge_page_node_exact(h, node); 910 if (page) 911 return page; 912 } 913 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 914 goto retry_cpuset; 915 916 return NULL; 917 } 918 919 /* Movability of hugepages depends on migration support. */ 920 static inline gfp_t htlb_alloc_mask(struct hstate *h) 921 { 922 if (hugepage_migration_supported(h)) 923 return GFP_HIGHUSER_MOVABLE; 924 else 925 return GFP_HIGHUSER; 926 } 927 928 static struct page *dequeue_huge_page_vma(struct hstate *h, 929 struct vm_area_struct *vma, 930 unsigned long address, int avoid_reserve, 931 long chg) 932 { 933 struct page *page; 934 struct mempolicy *mpol; 935 gfp_t gfp_mask; 936 nodemask_t *nodemask; 937 int nid; 938 939 /* 940 * A child process with MAP_PRIVATE mappings created by their parent 941 * have no page reserves. This check ensures that reservations are 942 * not "stolen". The child may still get SIGKILLed 943 */ 944 if (!vma_has_reserves(vma, chg) && 945 h->free_huge_pages - h->resv_huge_pages == 0) 946 goto err; 947 948 /* If reserves cannot be used, ensure enough pages are in the pool */ 949 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 950 goto err; 951 952 gfp_mask = htlb_alloc_mask(h); 953 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 954 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 955 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { 956 SetPagePrivate(page); 957 h->resv_huge_pages--; 958 } 959 960 mpol_cond_put(mpol); 961 return page; 962 963 err: 964 return NULL; 965 } 966 967 /* 968 * common helper functions for hstate_next_node_to_{alloc|free}. 969 * We may have allocated or freed a huge page based on a different 970 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 971 * be outside of *nodes_allowed. Ensure that we use an allowed 972 * node for alloc or free. 973 */ 974 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 975 { 976 nid = next_node_in(nid, *nodes_allowed); 977 VM_BUG_ON(nid >= MAX_NUMNODES); 978 979 return nid; 980 } 981 982 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 983 { 984 if (!node_isset(nid, *nodes_allowed)) 985 nid = next_node_allowed(nid, nodes_allowed); 986 return nid; 987 } 988 989 /* 990 * returns the previously saved node ["this node"] from which to 991 * allocate a persistent huge page for the pool and advance the 992 * next node from which to allocate, handling wrap at end of node 993 * mask. 994 */ 995 static int hstate_next_node_to_alloc(struct hstate *h, 996 nodemask_t *nodes_allowed) 997 { 998 int nid; 999 1000 VM_BUG_ON(!nodes_allowed); 1001 1002 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 1003 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 1004 1005 return nid; 1006 } 1007 1008 /* 1009 * helper for free_pool_huge_page() - return the previously saved 1010 * node ["this node"] from which to free a huge page. Advance the 1011 * next node id whether or not we find a free huge page to free so 1012 * that the next attempt to free addresses the next node. 1013 */ 1014 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1015 { 1016 int nid; 1017 1018 VM_BUG_ON(!nodes_allowed); 1019 1020 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1021 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1022 1023 return nid; 1024 } 1025 1026 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 1027 for (nr_nodes = nodes_weight(*mask); \ 1028 nr_nodes > 0 && \ 1029 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1030 nr_nodes--) 1031 1032 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1033 for (nr_nodes = nodes_weight(*mask); \ 1034 nr_nodes > 0 && \ 1035 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1036 nr_nodes--) 1037 1038 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1039 static void destroy_compound_gigantic_page(struct page *page, 1040 unsigned int order) 1041 { 1042 int i; 1043 int nr_pages = 1 << order; 1044 struct page *p = page + 1; 1045 1046 atomic_set(compound_mapcount_ptr(page), 0); 1047 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1048 clear_compound_head(p); 1049 set_page_refcounted(p); 1050 } 1051 1052 set_compound_order(page, 0); 1053 __ClearPageHead(page); 1054 } 1055 1056 static void free_gigantic_page(struct page *page, unsigned int order) 1057 { 1058 free_contig_range(page_to_pfn(page), 1 << order); 1059 } 1060 1061 static int __alloc_gigantic_page(unsigned long start_pfn, 1062 unsigned long nr_pages, gfp_t gfp_mask) 1063 { 1064 unsigned long end_pfn = start_pfn + nr_pages; 1065 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 1066 gfp_mask); 1067 } 1068 1069 static bool pfn_range_valid_gigantic(struct zone *z, 1070 unsigned long start_pfn, unsigned long nr_pages) 1071 { 1072 unsigned long i, end_pfn = start_pfn + nr_pages; 1073 struct page *page; 1074 1075 for (i = start_pfn; i < end_pfn; i++) { 1076 if (!pfn_valid(i)) 1077 return false; 1078 1079 page = pfn_to_page(i); 1080 1081 if (page_zone(page) != z) 1082 return false; 1083 1084 if (PageReserved(page)) 1085 return false; 1086 1087 if (page_count(page) > 0) 1088 return false; 1089 1090 if (PageHuge(page)) 1091 return false; 1092 } 1093 1094 return true; 1095 } 1096 1097 static bool zone_spans_last_pfn(const struct zone *zone, 1098 unsigned long start_pfn, unsigned long nr_pages) 1099 { 1100 unsigned long last_pfn = start_pfn + nr_pages - 1; 1101 return zone_spans_pfn(zone, last_pfn); 1102 } 1103 1104 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1105 int nid, nodemask_t *nodemask) 1106 { 1107 unsigned int order = huge_page_order(h); 1108 unsigned long nr_pages = 1 << order; 1109 unsigned long ret, pfn, flags; 1110 struct zonelist *zonelist; 1111 struct zone *zone; 1112 struct zoneref *z; 1113 1114 zonelist = node_zonelist(nid, gfp_mask); 1115 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) { 1116 spin_lock_irqsave(&zone->lock, flags); 1117 1118 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 1119 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 1120 if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) { 1121 /* 1122 * We release the zone lock here because 1123 * alloc_contig_range() will also lock the zone 1124 * at some point. If there's an allocation 1125 * spinning on this lock, it may win the race 1126 * and cause alloc_contig_range() to fail... 1127 */ 1128 spin_unlock_irqrestore(&zone->lock, flags); 1129 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask); 1130 if (!ret) 1131 return pfn_to_page(pfn); 1132 spin_lock_irqsave(&zone->lock, flags); 1133 } 1134 pfn += nr_pages; 1135 } 1136 1137 spin_unlock_irqrestore(&zone->lock, flags); 1138 } 1139 1140 return NULL; 1141 } 1142 1143 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); 1144 static void prep_compound_gigantic_page(struct page *page, unsigned int order); 1145 1146 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1147 static inline bool gigantic_page_supported(void) { return false; } 1148 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1149 int nid, nodemask_t *nodemask) { return NULL; } 1150 static inline void free_gigantic_page(struct page *page, unsigned int order) { } 1151 static inline void destroy_compound_gigantic_page(struct page *page, 1152 unsigned int order) { } 1153 #endif 1154 1155 static void update_and_free_page(struct hstate *h, struct page *page) 1156 { 1157 int i; 1158 1159 if (hstate_is_gigantic(h) && !gigantic_page_supported()) 1160 return; 1161 1162 h->nr_huge_pages--; 1163 h->nr_huge_pages_node[page_to_nid(page)]--; 1164 for (i = 0; i < pages_per_huge_page(h); i++) { 1165 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1166 1 << PG_referenced | 1 << PG_dirty | 1167 1 << PG_active | 1 << PG_private | 1168 1 << PG_writeback); 1169 } 1170 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); 1171 set_compound_page_dtor(page, NULL_COMPOUND_DTOR); 1172 set_page_refcounted(page); 1173 if (hstate_is_gigantic(h)) { 1174 destroy_compound_gigantic_page(page, huge_page_order(h)); 1175 free_gigantic_page(page, huge_page_order(h)); 1176 } else { 1177 __free_pages(page, huge_page_order(h)); 1178 } 1179 } 1180 1181 struct hstate *size_to_hstate(unsigned long size) 1182 { 1183 struct hstate *h; 1184 1185 for_each_hstate(h) { 1186 if (huge_page_size(h) == size) 1187 return h; 1188 } 1189 return NULL; 1190 } 1191 1192 /* 1193 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked 1194 * to hstate->hugepage_activelist.) 1195 * 1196 * This function can be called for tail pages, but never returns true for them. 1197 */ 1198 bool page_huge_active(struct page *page) 1199 { 1200 VM_BUG_ON_PAGE(!PageHuge(page), page); 1201 return PageHead(page) && PagePrivate(&page[1]); 1202 } 1203 1204 /* never called for tail page */ 1205 static void set_page_huge_active(struct page *page) 1206 { 1207 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); 1208 SetPagePrivate(&page[1]); 1209 } 1210 1211 static void clear_page_huge_active(struct page *page) 1212 { 1213 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); 1214 ClearPagePrivate(&page[1]); 1215 } 1216 1217 /* 1218 * Internal hugetlb specific page flag. Do not use outside of the hugetlb 1219 * code 1220 */ 1221 static inline bool PageHugeTemporary(struct page *page) 1222 { 1223 if (!PageHuge(page)) 1224 return false; 1225 1226 return (unsigned long)page[2].mapping == -1U; 1227 } 1228 1229 static inline void SetPageHugeTemporary(struct page *page) 1230 { 1231 page[2].mapping = (void *)-1U; 1232 } 1233 1234 static inline void ClearPageHugeTemporary(struct page *page) 1235 { 1236 page[2].mapping = NULL; 1237 } 1238 1239 void free_huge_page(struct page *page) 1240 { 1241 /* 1242 * Can't pass hstate in here because it is called from the 1243 * compound page destructor. 1244 */ 1245 struct hstate *h = page_hstate(page); 1246 int nid = page_to_nid(page); 1247 struct hugepage_subpool *spool = 1248 (struct hugepage_subpool *)page_private(page); 1249 bool restore_reserve; 1250 1251 set_page_private(page, 0); 1252 page->mapping = NULL; 1253 VM_BUG_ON_PAGE(page_count(page), page); 1254 VM_BUG_ON_PAGE(page_mapcount(page), page); 1255 restore_reserve = PagePrivate(page); 1256 ClearPagePrivate(page); 1257 1258 /* 1259 * A return code of zero implies that the subpool will be under its 1260 * minimum size if the reservation is not restored after page is free. 1261 * Therefore, force restore_reserve operation. 1262 */ 1263 if (hugepage_subpool_put_pages(spool, 1) == 0) 1264 restore_reserve = true; 1265 1266 spin_lock(&hugetlb_lock); 1267 clear_page_huge_active(page); 1268 hugetlb_cgroup_uncharge_page(hstate_index(h), 1269 pages_per_huge_page(h), page); 1270 if (restore_reserve) 1271 h->resv_huge_pages++; 1272 1273 if (PageHugeTemporary(page)) { 1274 list_del(&page->lru); 1275 ClearPageHugeTemporary(page); 1276 update_and_free_page(h, page); 1277 } else if (h->surplus_huge_pages_node[nid]) { 1278 /* remove the page from active list */ 1279 list_del(&page->lru); 1280 update_and_free_page(h, page); 1281 h->surplus_huge_pages--; 1282 h->surplus_huge_pages_node[nid]--; 1283 } else { 1284 arch_clear_hugepage_flags(page); 1285 enqueue_huge_page(h, page); 1286 } 1287 spin_unlock(&hugetlb_lock); 1288 } 1289 1290 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 1291 { 1292 INIT_LIST_HEAD(&page->lru); 1293 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); 1294 spin_lock(&hugetlb_lock); 1295 set_hugetlb_cgroup(page, NULL); 1296 h->nr_huge_pages++; 1297 h->nr_huge_pages_node[nid]++; 1298 spin_unlock(&hugetlb_lock); 1299 } 1300 1301 static void prep_compound_gigantic_page(struct page *page, unsigned int order) 1302 { 1303 int i; 1304 int nr_pages = 1 << order; 1305 struct page *p = page + 1; 1306 1307 /* we rely on prep_new_huge_page to set the destructor */ 1308 set_compound_order(page, order); 1309 __ClearPageReserved(page); 1310 __SetPageHead(page); 1311 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1312 /* 1313 * For gigantic hugepages allocated through bootmem at 1314 * boot, it's safer to be consistent with the not-gigantic 1315 * hugepages and clear the PG_reserved bit from all tail pages 1316 * too. Otherwse drivers using get_user_pages() to access tail 1317 * pages may get the reference counting wrong if they see 1318 * PG_reserved set on a tail page (despite the head page not 1319 * having PG_reserved set). Enforcing this consistency between 1320 * head and tail pages allows drivers to optimize away a check 1321 * on the head page when they need know if put_page() is needed 1322 * after get_user_pages(). 1323 */ 1324 __ClearPageReserved(p); 1325 set_page_count(p, 0); 1326 set_compound_head(p, page); 1327 } 1328 atomic_set(compound_mapcount_ptr(page), -1); 1329 } 1330 1331 /* 1332 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 1333 * transparent huge pages. See the PageTransHuge() documentation for more 1334 * details. 1335 */ 1336 int PageHuge(struct page *page) 1337 { 1338 if (!PageCompound(page)) 1339 return 0; 1340 1341 page = compound_head(page); 1342 return page[1].compound_dtor == HUGETLB_PAGE_DTOR; 1343 } 1344 EXPORT_SYMBOL_GPL(PageHuge); 1345 1346 /* 1347 * PageHeadHuge() only returns true for hugetlbfs head page, but not for 1348 * normal or transparent huge pages. 1349 */ 1350 int PageHeadHuge(struct page *page_head) 1351 { 1352 if (!PageHead(page_head)) 1353 return 0; 1354 1355 return get_compound_page_dtor(page_head) == free_huge_page; 1356 } 1357 1358 pgoff_t __basepage_index(struct page *page) 1359 { 1360 struct page *page_head = compound_head(page); 1361 pgoff_t index = page_index(page_head); 1362 unsigned long compound_idx; 1363 1364 if (!PageHuge(page_head)) 1365 return page_index(page); 1366 1367 if (compound_order(page_head) >= MAX_ORDER) 1368 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 1369 else 1370 compound_idx = page - page_head; 1371 1372 return (index << compound_order(page_head)) + compound_idx; 1373 } 1374 1375 static struct page *alloc_buddy_huge_page(struct hstate *h, 1376 gfp_t gfp_mask, int nid, nodemask_t *nmask) 1377 { 1378 int order = huge_page_order(h); 1379 struct page *page; 1380 1381 gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; 1382 if (nid == NUMA_NO_NODE) 1383 nid = numa_mem_id(); 1384 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); 1385 if (page) 1386 __count_vm_event(HTLB_BUDDY_PGALLOC); 1387 else 1388 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1389 1390 return page; 1391 } 1392 1393 /* 1394 * Common helper to allocate a fresh hugetlb page. All specific allocators 1395 * should use this function to get new hugetlb pages 1396 */ 1397 static struct page *alloc_fresh_huge_page(struct hstate *h, 1398 gfp_t gfp_mask, int nid, nodemask_t *nmask) 1399 { 1400 struct page *page; 1401 1402 if (hstate_is_gigantic(h)) 1403 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); 1404 else 1405 page = alloc_buddy_huge_page(h, gfp_mask, 1406 nid, nmask); 1407 if (!page) 1408 return NULL; 1409 1410 if (hstate_is_gigantic(h)) 1411 prep_compound_gigantic_page(page, huge_page_order(h)); 1412 prep_new_huge_page(h, page, page_to_nid(page)); 1413 1414 return page; 1415 } 1416 1417 /* 1418 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved 1419 * manner. 1420 */ 1421 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 1422 { 1423 struct page *page; 1424 int nr_nodes, node; 1425 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 1426 1427 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1428 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed); 1429 if (page) 1430 break; 1431 } 1432 1433 if (!page) 1434 return 0; 1435 1436 put_page(page); /* free it into the hugepage allocator */ 1437 1438 return 1; 1439 } 1440 1441 /* 1442 * Free huge page from pool from next node to free. 1443 * Attempt to keep persistent huge pages more or less 1444 * balanced over allowed nodes. 1445 * Called with hugetlb_lock locked. 1446 */ 1447 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 1448 bool acct_surplus) 1449 { 1450 int nr_nodes, node; 1451 int ret = 0; 1452 1453 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 1454 /* 1455 * If we're returning unused surplus pages, only examine 1456 * nodes with surplus pages. 1457 */ 1458 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 1459 !list_empty(&h->hugepage_freelists[node])) { 1460 struct page *page = 1461 list_entry(h->hugepage_freelists[node].next, 1462 struct page, lru); 1463 list_del(&page->lru); 1464 h->free_huge_pages--; 1465 h->free_huge_pages_node[node]--; 1466 if (acct_surplus) { 1467 h->surplus_huge_pages--; 1468 h->surplus_huge_pages_node[node]--; 1469 } 1470 update_and_free_page(h, page); 1471 ret = 1; 1472 break; 1473 } 1474 } 1475 1476 return ret; 1477 } 1478 1479 /* 1480 * Dissolve a given free hugepage into free buddy pages. This function does 1481 * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the 1482 * dissolution fails because a give page is not a free hugepage, or because 1483 * free hugepages are fully reserved. 1484 */ 1485 int dissolve_free_huge_page(struct page *page) 1486 { 1487 int rc = -EBUSY; 1488 1489 spin_lock(&hugetlb_lock); 1490 if (PageHuge(page) && !page_count(page)) { 1491 struct page *head = compound_head(page); 1492 struct hstate *h = page_hstate(head); 1493 int nid = page_to_nid(head); 1494 if (h->free_huge_pages - h->resv_huge_pages == 0) 1495 goto out; 1496 /* 1497 * Move PageHWPoison flag from head page to the raw error page, 1498 * which makes any subpages rather than the error page reusable. 1499 */ 1500 if (PageHWPoison(head) && page != head) { 1501 SetPageHWPoison(page); 1502 ClearPageHWPoison(head); 1503 } 1504 list_del(&head->lru); 1505 h->free_huge_pages--; 1506 h->free_huge_pages_node[nid]--; 1507 h->max_huge_pages--; 1508 update_and_free_page(h, head); 1509 rc = 0; 1510 } 1511 out: 1512 spin_unlock(&hugetlb_lock); 1513 return rc; 1514 } 1515 1516 /* 1517 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 1518 * make specified memory blocks removable from the system. 1519 * Note that this will dissolve a free gigantic hugepage completely, if any 1520 * part of it lies within the given range. 1521 * Also note that if dissolve_free_huge_page() returns with an error, all 1522 * free hugepages that were dissolved before that error are lost. 1523 */ 1524 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 1525 { 1526 unsigned long pfn; 1527 struct page *page; 1528 int rc = 0; 1529 1530 if (!hugepages_supported()) 1531 return rc; 1532 1533 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { 1534 page = pfn_to_page(pfn); 1535 if (PageHuge(page) && !page_count(page)) { 1536 rc = dissolve_free_huge_page(page); 1537 if (rc) 1538 break; 1539 } 1540 } 1541 1542 return rc; 1543 } 1544 1545 /* 1546 * Allocates a fresh surplus page from the page allocator. 1547 */ 1548 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, 1549 int nid, nodemask_t *nmask) 1550 { 1551 struct page *page = NULL; 1552 1553 if (hstate_is_gigantic(h)) 1554 return NULL; 1555 1556 spin_lock(&hugetlb_lock); 1557 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 1558 goto out_unlock; 1559 spin_unlock(&hugetlb_lock); 1560 1561 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask); 1562 if (!page) 1563 return NULL; 1564 1565 spin_lock(&hugetlb_lock); 1566 /* 1567 * We could have raced with the pool size change. 1568 * Double check that and simply deallocate the new page 1569 * if we would end up overcommiting the surpluses. Abuse 1570 * temporary page to workaround the nasty free_huge_page 1571 * codeflow 1572 */ 1573 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 1574 SetPageHugeTemporary(page); 1575 put_page(page); 1576 page = NULL; 1577 } else { 1578 h->surplus_huge_pages++; 1579 h->surplus_huge_pages_node[page_to_nid(page)]++; 1580 } 1581 1582 out_unlock: 1583 spin_unlock(&hugetlb_lock); 1584 1585 return page; 1586 } 1587 1588 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, 1589 int nid, nodemask_t *nmask) 1590 { 1591 struct page *page; 1592 1593 if (hstate_is_gigantic(h)) 1594 return NULL; 1595 1596 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask); 1597 if (!page) 1598 return NULL; 1599 1600 /* 1601 * We do not account these pages as surplus because they are only 1602 * temporary and will be released properly on the last reference 1603 */ 1604 SetPageHugeTemporary(page); 1605 1606 return page; 1607 } 1608 1609 /* 1610 * Use the VMA's mpolicy to allocate a huge page from the buddy. 1611 */ 1612 static 1613 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, 1614 struct vm_area_struct *vma, unsigned long addr) 1615 { 1616 struct page *page; 1617 struct mempolicy *mpol; 1618 gfp_t gfp_mask = htlb_alloc_mask(h); 1619 int nid; 1620 nodemask_t *nodemask; 1621 1622 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 1623 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); 1624 mpol_cond_put(mpol); 1625 1626 return page; 1627 } 1628 1629 /* page migration callback function */ 1630 struct page *alloc_huge_page_node(struct hstate *h, int nid) 1631 { 1632 gfp_t gfp_mask = htlb_alloc_mask(h); 1633 struct page *page = NULL; 1634 1635 if (nid != NUMA_NO_NODE) 1636 gfp_mask |= __GFP_THISNODE; 1637 1638 spin_lock(&hugetlb_lock); 1639 if (h->free_huge_pages - h->resv_huge_pages > 0) 1640 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL); 1641 spin_unlock(&hugetlb_lock); 1642 1643 if (!page) 1644 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); 1645 1646 return page; 1647 } 1648 1649 /* page migration callback function */ 1650 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 1651 nodemask_t *nmask) 1652 { 1653 gfp_t gfp_mask = htlb_alloc_mask(h); 1654 1655 spin_lock(&hugetlb_lock); 1656 if (h->free_huge_pages - h->resv_huge_pages > 0) { 1657 struct page *page; 1658 1659 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); 1660 if (page) { 1661 spin_unlock(&hugetlb_lock); 1662 return page; 1663 } 1664 } 1665 spin_unlock(&hugetlb_lock); 1666 1667 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); 1668 } 1669 1670 /* mempolicy aware migration callback */ 1671 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 1672 unsigned long address) 1673 { 1674 struct mempolicy *mpol; 1675 nodemask_t *nodemask; 1676 struct page *page; 1677 gfp_t gfp_mask; 1678 int node; 1679 1680 gfp_mask = htlb_alloc_mask(h); 1681 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1682 page = alloc_huge_page_nodemask(h, node, nodemask); 1683 mpol_cond_put(mpol); 1684 1685 return page; 1686 } 1687 1688 /* 1689 * Increase the hugetlb pool such that it can accommodate a reservation 1690 * of size 'delta'. 1691 */ 1692 static int gather_surplus_pages(struct hstate *h, int delta) 1693 { 1694 struct list_head surplus_list; 1695 struct page *page, *tmp; 1696 int ret, i; 1697 int needed, allocated; 1698 bool alloc_ok = true; 1699 1700 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 1701 if (needed <= 0) { 1702 h->resv_huge_pages += delta; 1703 return 0; 1704 } 1705 1706 allocated = 0; 1707 INIT_LIST_HEAD(&surplus_list); 1708 1709 ret = -ENOMEM; 1710 retry: 1711 spin_unlock(&hugetlb_lock); 1712 for (i = 0; i < needed; i++) { 1713 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), 1714 NUMA_NO_NODE, NULL); 1715 if (!page) { 1716 alloc_ok = false; 1717 break; 1718 } 1719 list_add(&page->lru, &surplus_list); 1720 cond_resched(); 1721 } 1722 allocated += i; 1723 1724 /* 1725 * After retaking hugetlb_lock, we need to recalculate 'needed' 1726 * because either resv_huge_pages or free_huge_pages may have changed. 1727 */ 1728 spin_lock(&hugetlb_lock); 1729 needed = (h->resv_huge_pages + delta) - 1730 (h->free_huge_pages + allocated); 1731 if (needed > 0) { 1732 if (alloc_ok) 1733 goto retry; 1734 /* 1735 * We were not able to allocate enough pages to 1736 * satisfy the entire reservation so we free what 1737 * we've allocated so far. 1738 */ 1739 goto free; 1740 } 1741 /* 1742 * The surplus_list now contains _at_least_ the number of extra pages 1743 * needed to accommodate the reservation. Add the appropriate number 1744 * of pages to the hugetlb pool and free the extras back to the buddy 1745 * allocator. Commit the entire reservation here to prevent another 1746 * process from stealing the pages as they are added to the pool but 1747 * before they are reserved. 1748 */ 1749 needed += allocated; 1750 h->resv_huge_pages += delta; 1751 ret = 0; 1752 1753 /* Free the needed pages to the hugetlb pool */ 1754 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 1755 if ((--needed) < 0) 1756 break; 1757 /* 1758 * This page is now managed by the hugetlb allocator and has 1759 * no users -- drop the buddy allocator's reference. 1760 */ 1761 put_page_testzero(page); 1762 VM_BUG_ON_PAGE(page_count(page), page); 1763 enqueue_huge_page(h, page); 1764 } 1765 free: 1766 spin_unlock(&hugetlb_lock); 1767 1768 /* Free unnecessary surplus pages to the buddy allocator */ 1769 list_for_each_entry_safe(page, tmp, &surplus_list, lru) 1770 put_page(page); 1771 spin_lock(&hugetlb_lock); 1772 1773 return ret; 1774 } 1775 1776 /* 1777 * This routine has two main purposes: 1778 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 1779 * in unused_resv_pages. This corresponds to the prior adjustments made 1780 * to the associated reservation map. 1781 * 2) Free any unused surplus pages that may have been allocated to satisfy 1782 * the reservation. As many as unused_resv_pages may be freed. 1783 * 1784 * Called with hugetlb_lock held. However, the lock could be dropped (and 1785 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, 1786 * we must make sure nobody else can claim pages we are in the process of 1787 * freeing. Do this by ensuring resv_huge_page always is greater than the 1788 * number of huge pages we plan to free when dropping the lock. 1789 */ 1790 static void return_unused_surplus_pages(struct hstate *h, 1791 unsigned long unused_resv_pages) 1792 { 1793 unsigned long nr_pages; 1794 1795 /* Cannot return gigantic pages currently */ 1796 if (hstate_is_gigantic(h)) 1797 goto out; 1798 1799 /* 1800 * Part (or even all) of the reservation could have been backed 1801 * by pre-allocated pages. Only free surplus pages. 1802 */ 1803 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 1804 1805 /* 1806 * We want to release as many surplus pages as possible, spread 1807 * evenly across all nodes with memory. Iterate across these nodes 1808 * until we can no longer free unreserved surplus pages. This occurs 1809 * when the nodes with surplus pages have no free pages. 1810 * free_pool_huge_page() will balance the the freed pages across the 1811 * on-line nodes with memory and will handle the hstate accounting. 1812 * 1813 * Note that we decrement resv_huge_pages as we free the pages. If 1814 * we drop the lock, resv_huge_pages will still be sufficiently large 1815 * to cover subsequent pages we may free. 1816 */ 1817 while (nr_pages--) { 1818 h->resv_huge_pages--; 1819 unused_resv_pages--; 1820 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) 1821 goto out; 1822 cond_resched_lock(&hugetlb_lock); 1823 } 1824 1825 out: 1826 /* Fully uncommit the reservation */ 1827 h->resv_huge_pages -= unused_resv_pages; 1828 } 1829 1830 1831 /* 1832 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 1833 * are used by the huge page allocation routines to manage reservations. 1834 * 1835 * vma_needs_reservation is called to determine if the huge page at addr 1836 * within the vma has an associated reservation. If a reservation is 1837 * needed, the value 1 is returned. The caller is then responsible for 1838 * managing the global reservation and subpool usage counts. After 1839 * the huge page has been allocated, vma_commit_reservation is called 1840 * to add the page to the reservation map. If the page allocation fails, 1841 * the reservation must be ended instead of committed. vma_end_reservation 1842 * is called in such cases. 1843 * 1844 * In the normal case, vma_commit_reservation returns the same value 1845 * as the preceding vma_needs_reservation call. The only time this 1846 * is not the case is if a reserve map was changed between calls. It 1847 * is the responsibility of the caller to notice the difference and 1848 * take appropriate action. 1849 * 1850 * vma_add_reservation is used in error paths where a reservation must 1851 * be restored when a newly allocated huge page must be freed. It is 1852 * to be called after calling vma_needs_reservation to determine if a 1853 * reservation exists. 1854 */ 1855 enum vma_resv_mode { 1856 VMA_NEEDS_RESV, 1857 VMA_COMMIT_RESV, 1858 VMA_END_RESV, 1859 VMA_ADD_RESV, 1860 }; 1861 static long __vma_reservation_common(struct hstate *h, 1862 struct vm_area_struct *vma, unsigned long addr, 1863 enum vma_resv_mode mode) 1864 { 1865 struct resv_map *resv; 1866 pgoff_t idx; 1867 long ret; 1868 1869 resv = vma_resv_map(vma); 1870 if (!resv) 1871 return 1; 1872 1873 idx = vma_hugecache_offset(h, vma, addr); 1874 switch (mode) { 1875 case VMA_NEEDS_RESV: 1876 ret = region_chg(resv, idx, idx + 1); 1877 break; 1878 case VMA_COMMIT_RESV: 1879 ret = region_add(resv, idx, idx + 1); 1880 break; 1881 case VMA_END_RESV: 1882 region_abort(resv, idx, idx + 1); 1883 ret = 0; 1884 break; 1885 case VMA_ADD_RESV: 1886 if (vma->vm_flags & VM_MAYSHARE) 1887 ret = region_add(resv, idx, idx + 1); 1888 else { 1889 region_abort(resv, idx, idx + 1); 1890 ret = region_del(resv, idx, idx + 1); 1891 } 1892 break; 1893 default: 1894 BUG(); 1895 } 1896 1897 if (vma->vm_flags & VM_MAYSHARE) 1898 return ret; 1899 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { 1900 /* 1901 * In most cases, reserves always exist for private mappings. 1902 * However, a file associated with mapping could have been 1903 * hole punched or truncated after reserves were consumed. 1904 * As subsequent fault on such a range will not use reserves. 1905 * Subtle - The reserve map for private mappings has the 1906 * opposite meaning than that of shared mappings. If NO 1907 * entry is in the reserve map, it means a reservation exists. 1908 * If an entry exists in the reserve map, it means the 1909 * reservation has already been consumed. As a result, the 1910 * return value of this routine is the opposite of the 1911 * value returned from reserve map manipulation routines above. 1912 */ 1913 if (ret) 1914 return 0; 1915 else 1916 return 1; 1917 } 1918 else 1919 return ret < 0 ? ret : 0; 1920 } 1921 1922 static long vma_needs_reservation(struct hstate *h, 1923 struct vm_area_struct *vma, unsigned long addr) 1924 { 1925 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 1926 } 1927 1928 static long vma_commit_reservation(struct hstate *h, 1929 struct vm_area_struct *vma, unsigned long addr) 1930 { 1931 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 1932 } 1933 1934 static void vma_end_reservation(struct hstate *h, 1935 struct vm_area_struct *vma, unsigned long addr) 1936 { 1937 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 1938 } 1939 1940 static long vma_add_reservation(struct hstate *h, 1941 struct vm_area_struct *vma, unsigned long addr) 1942 { 1943 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 1944 } 1945 1946 /* 1947 * This routine is called to restore a reservation on error paths. In the 1948 * specific error paths, a huge page was allocated (via alloc_huge_page) 1949 * and is about to be freed. If a reservation for the page existed, 1950 * alloc_huge_page would have consumed the reservation and set PagePrivate 1951 * in the newly allocated page. When the page is freed via free_huge_page, 1952 * the global reservation count will be incremented if PagePrivate is set. 1953 * However, free_huge_page can not adjust the reserve map. Adjust the 1954 * reserve map here to be consistent with global reserve count adjustments 1955 * to be made by free_huge_page. 1956 */ 1957 static void restore_reserve_on_error(struct hstate *h, 1958 struct vm_area_struct *vma, unsigned long address, 1959 struct page *page) 1960 { 1961 if (unlikely(PagePrivate(page))) { 1962 long rc = vma_needs_reservation(h, vma, address); 1963 1964 if (unlikely(rc < 0)) { 1965 /* 1966 * Rare out of memory condition in reserve map 1967 * manipulation. Clear PagePrivate so that 1968 * global reserve count will not be incremented 1969 * by free_huge_page. This will make it appear 1970 * as though the reservation for this page was 1971 * consumed. This may prevent the task from 1972 * faulting in the page at a later time. This 1973 * is better than inconsistent global huge page 1974 * accounting of reserve counts. 1975 */ 1976 ClearPagePrivate(page); 1977 } else if (rc) { 1978 rc = vma_add_reservation(h, vma, address); 1979 if (unlikely(rc < 0)) 1980 /* 1981 * See above comment about rare out of 1982 * memory condition. 1983 */ 1984 ClearPagePrivate(page); 1985 } else 1986 vma_end_reservation(h, vma, address); 1987 } 1988 } 1989 1990 struct page *alloc_huge_page(struct vm_area_struct *vma, 1991 unsigned long addr, int avoid_reserve) 1992 { 1993 struct hugepage_subpool *spool = subpool_vma(vma); 1994 struct hstate *h = hstate_vma(vma); 1995 struct page *page; 1996 long map_chg, map_commit; 1997 long gbl_chg; 1998 int ret, idx; 1999 struct hugetlb_cgroup *h_cg; 2000 2001 idx = hstate_index(h); 2002 /* 2003 * Examine the region/reserve map to determine if the process 2004 * has a reservation for the page to be allocated. A return 2005 * code of zero indicates a reservation exists (no change). 2006 */ 2007 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 2008 if (map_chg < 0) 2009 return ERR_PTR(-ENOMEM); 2010 2011 /* 2012 * Processes that did not create the mapping will have no 2013 * reserves as indicated by the region/reserve map. Check 2014 * that the allocation will not exceed the subpool limit. 2015 * Allocations for MAP_NORESERVE mappings also need to be 2016 * checked against any subpool limit. 2017 */ 2018 if (map_chg || avoid_reserve) { 2019 gbl_chg = hugepage_subpool_get_pages(spool, 1); 2020 if (gbl_chg < 0) { 2021 vma_end_reservation(h, vma, addr); 2022 return ERR_PTR(-ENOSPC); 2023 } 2024 2025 /* 2026 * Even though there was no reservation in the region/reserve 2027 * map, there could be reservations associated with the 2028 * subpool that can be used. This would be indicated if the 2029 * return value of hugepage_subpool_get_pages() is zero. 2030 * However, if avoid_reserve is specified we still avoid even 2031 * the subpool reservations. 2032 */ 2033 if (avoid_reserve) 2034 gbl_chg = 1; 2035 } 2036 2037 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 2038 if (ret) 2039 goto out_subpool_put; 2040 2041 spin_lock(&hugetlb_lock); 2042 /* 2043 * glb_chg is passed to indicate whether or not a page must be taken 2044 * from the global free pool (global change). gbl_chg == 0 indicates 2045 * a reservation exists for the allocation. 2046 */ 2047 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); 2048 if (!page) { 2049 spin_unlock(&hugetlb_lock); 2050 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); 2051 if (!page) 2052 goto out_uncharge_cgroup; 2053 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 2054 SetPagePrivate(page); 2055 h->resv_huge_pages--; 2056 } 2057 spin_lock(&hugetlb_lock); 2058 list_move(&page->lru, &h->hugepage_activelist); 2059 /* Fall through */ 2060 } 2061 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 2062 spin_unlock(&hugetlb_lock); 2063 2064 set_page_private(page, (unsigned long)spool); 2065 2066 map_commit = vma_commit_reservation(h, vma, addr); 2067 if (unlikely(map_chg > map_commit)) { 2068 /* 2069 * The page was added to the reservation map between 2070 * vma_needs_reservation and vma_commit_reservation. 2071 * This indicates a race with hugetlb_reserve_pages. 2072 * Adjust for the subpool count incremented above AND 2073 * in hugetlb_reserve_pages for the same page. Also, 2074 * the reservation count added in hugetlb_reserve_pages 2075 * no longer applies. 2076 */ 2077 long rsv_adjust; 2078 2079 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 2080 hugetlb_acct_memory(h, -rsv_adjust); 2081 } 2082 return page; 2083 2084 out_uncharge_cgroup: 2085 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 2086 out_subpool_put: 2087 if (map_chg || avoid_reserve) 2088 hugepage_subpool_put_pages(spool, 1); 2089 vma_end_reservation(h, vma, addr); 2090 return ERR_PTR(-ENOSPC); 2091 } 2092 2093 int alloc_bootmem_huge_page(struct hstate *h) 2094 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 2095 int __alloc_bootmem_huge_page(struct hstate *h) 2096 { 2097 struct huge_bootmem_page *m; 2098 int nr_nodes, node; 2099 2100 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 2101 void *addr; 2102 2103 addr = memblock_alloc_try_nid_raw( 2104 huge_page_size(h), huge_page_size(h), 2105 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 2106 if (addr) { 2107 /* 2108 * Use the beginning of the huge page to store the 2109 * huge_bootmem_page struct (until gather_bootmem 2110 * puts them into the mem_map). 2111 */ 2112 m = addr; 2113 goto found; 2114 } 2115 } 2116 return 0; 2117 2118 found: 2119 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); 2120 /* Put them into a private list first because mem_map is not up yet */ 2121 INIT_LIST_HEAD(&m->list); 2122 list_add(&m->list, &huge_boot_pages); 2123 m->hstate = h; 2124 return 1; 2125 } 2126 2127 static void __init prep_compound_huge_page(struct page *page, 2128 unsigned int order) 2129 { 2130 if (unlikely(order > (MAX_ORDER - 1))) 2131 prep_compound_gigantic_page(page, order); 2132 else 2133 prep_compound_page(page, order); 2134 } 2135 2136 /* Put bootmem huge pages into the standard lists after mem_map is up */ 2137 static void __init gather_bootmem_prealloc(void) 2138 { 2139 struct huge_bootmem_page *m; 2140 2141 list_for_each_entry(m, &huge_boot_pages, list) { 2142 struct page *page = virt_to_page(m); 2143 struct hstate *h = m->hstate; 2144 2145 WARN_ON(page_count(page) != 1); 2146 prep_compound_huge_page(page, h->order); 2147 WARN_ON(PageReserved(page)); 2148 prep_new_huge_page(h, page, page_to_nid(page)); 2149 put_page(page); /* free it into the hugepage allocator */ 2150 2151 /* 2152 * If we had gigantic hugepages allocated at boot time, we need 2153 * to restore the 'stolen' pages to totalram_pages in order to 2154 * fix confusing memory reports from free(1) and another 2155 * side-effects, like CommitLimit going negative. 2156 */ 2157 if (hstate_is_gigantic(h)) 2158 adjust_managed_page_count(page, 1 << h->order); 2159 cond_resched(); 2160 } 2161 } 2162 2163 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 2164 { 2165 unsigned long i; 2166 2167 for (i = 0; i < h->max_huge_pages; ++i) { 2168 if (hstate_is_gigantic(h)) { 2169 if (!alloc_bootmem_huge_page(h)) 2170 break; 2171 } else if (!alloc_pool_huge_page(h, 2172 &node_states[N_MEMORY])) 2173 break; 2174 cond_resched(); 2175 } 2176 if (i < h->max_huge_pages) { 2177 char buf[32]; 2178 2179 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 2180 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 2181 h->max_huge_pages, buf, i); 2182 h->max_huge_pages = i; 2183 } 2184 } 2185 2186 static void __init hugetlb_init_hstates(void) 2187 { 2188 struct hstate *h; 2189 2190 for_each_hstate(h) { 2191 if (minimum_order > huge_page_order(h)) 2192 minimum_order = huge_page_order(h); 2193 2194 /* oversize hugepages were init'ed in early boot */ 2195 if (!hstate_is_gigantic(h)) 2196 hugetlb_hstate_alloc_pages(h); 2197 } 2198 VM_BUG_ON(minimum_order == UINT_MAX); 2199 } 2200 2201 static void __init report_hugepages(void) 2202 { 2203 struct hstate *h; 2204 2205 for_each_hstate(h) { 2206 char buf[32]; 2207 2208 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 2209 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", 2210 buf, h->free_huge_pages); 2211 } 2212 } 2213 2214 #ifdef CONFIG_HIGHMEM 2215 static void try_to_free_low(struct hstate *h, unsigned long count, 2216 nodemask_t *nodes_allowed) 2217 { 2218 int i; 2219 2220 if (hstate_is_gigantic(h)) 2221 return; 2222 2223 for_each_node_mask(i, *nodes_allowed) { 2224 struct page *page, *next; 2225 struct list_head *freel = &h->hugepage_freelists[i]; 2226 list_for_each_entry_safe(page, next, freel, lru) { 2227 if (count >= h->nr_huge_pages) 2228 return; 2229 if (PageHighMem(page)) 2230 continue; 2231 list_del(&page->lru); 2232 update_and_free_page(h, page); 2233 h->free_huge_pages--; 2234 h->free_huge_pages_node[page_to_nid(page)]--; 2235 } 2236 } 2237 } 2238 #else 2239 static inline void try_to_free_low(struct hstate *h, unsigned long count, 2240 nodemask_t *nodes_allowed) 2241 { 2242 } 2243 #endif 2244 2245 /* 2246 * Increment or decrement surplus_huge_pages. Keep node-specific counters 2247 * balanced by operating on them in a round-robin fashion. 2248 * Returns 1 if an adjustment was made. 2249 */ 2250 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 2251 int delta) 2252 { 2253 int nr_nodes, node; 2254 2255 VM_BUG_ON(delta != -1 && delta != 1); 2256 2257 if (delta < 0) { 2258 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 2259 if (h->surplus_huge_pages_node[node]) 2260 goto found; 2261 } 2262 } else { 2263 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2264 if (h->surplus_huge_pages_node[node] < 2265 h->nr_huge_pages_node[node]) 2266 goto found; 2267 } 2268 } 2269 return 0; 2270 2271 found: 2272 h->surplus_huge_pages += delta; 2273 h->surplus_huge_pages_node[node] += delta; 2274 return 1; 2275 } 2276 2277 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 2278 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, 2279 nodemask_t *nodes_allowed) 2280 { 2281 unsigned long min_count, ret; 2282 2283 if (hstate_is_gigantic(h) && !gigantic_page_supported()) 2284 return h->max_huge_pages; 2285 2286 /* 2287 * Increase the pool size 2288 * First take pages out of surplus state. Then make up the 2289 * remaining difference by allocating fresh huge pages. 2290 * 2291 * We might race with alloc_surplus_huge_page() here and be unable 2292 * to convert a surplus huge page to a normal huge page. That is 2293 * not critical, though, it just means the overall size of the 2294 * pool might be one hugepage larger than it needs to be, but 2295 * within all the constraints specified by the sysctls. 2296 */ 2297 spin_lock(&hugetlb_lock); 2298 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 2299 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 2300 break; 2301 } 2302 2303 while (count > persistent_huge_pages(h)) { 2304 /* 2305 * If this allocation races such that we no longer need the 2306 * page, free_huge_page will handle it by freeing the page 2307 * and reducing the surplus. 2308 */ 2309 spin_unlock(&hugetlb_lock); 2310 2311 /* yield cpu to avoid soft lockup */ 2312 cond_resched(); 2313 2314 ret = alloc_pool_huge_page(h, nodes_allowed); 2315 spin_lock(&hugetlb_lock); 2316 if (!ret) 2317 goto out; 2318 2319 /* Bail for signals. Probably ctrl-c from user */ 2320 if (signal_pending(current)) 2321 goto out; 2322 } 2323 2324 /* 2325 * Decrease the pool size 2326 * First return free pages to the buddy allocator (being careful 2327 * to keep enough around to satisfy reservations). Then place 2328 * pages into surplus state as needed so the pool will shrink 2329 * to the desired size as pages become free. 2330 * 2331 * By placing pages into the surplus state independent of the 2332 * overcommit value, we are allowing the surplus pool size to 2333 * exceed overcommit. There are few sane options here. Since 2334 * alloc_surplus_huge_page() is checking the global counter, 2335 * though, we'll note that we're not allowed to exceed surplus 2336 * and won't grow the pool anywhere else. Not until one of the 2337 * sysctls are changed, or the surplus pages go out of use. 2338 */ 2339 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 2340 min_count = max(count, min_count); 2341 try_to_free_low(h, min_count, nodes_allowed); 2342 while (min_count < persistent_huge_pages(h)) { 2343 if (!free_pool_huge_page(h, nodes_allowed, 0)) 2344 break; 2345 cond_resched_lock(&hugetlb_lock); 2346 } 2347 while (count < persistent_huge_pages(h)) { 2348 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 2349 break; 2350 } 2351 out: 2352 ret = persistent_huge_pages(h); 2353 spin_unlock(&hugetlb_lock); 2354 return ret; 2355 } 2356 2357 #define HSTATE_ATTR_RO(_name) \ 2358 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 2359 2360 #define HSTATE_ATTR(_name) \ 2361 static struct kobj_attribute _name##_attr = \ 2362 __ATTR(_name, 0644, _name##_show, _name##_store) 2363 2364 static struct kobject *hugepages_kobj; 2365 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 2366 2367 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 2368 2369 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 2370 { 2371 int i; 2372 2373 for (i = 0; i < HUGE_MAX_HSTATE; i++) 2374 if (hstate_kobjs[i] == kobj) { 2375 if (nidp) 2376 *nidp = NUMA_NO_NODE; 2377 return &hstates[i]; 2378 } 2379 2380 return kobj_to_node_hstate(kobj, nidp); 2381 } 2382 2383 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 2384 struct kobj_attribute *attr, char *buf) 2385 { 2386 struct hstate *h; 2387 unsigned long nr_huge_pages; 2388 int nid; 2389 2390 h = kobj_to_hstate(kobj, &nid); 2391 if (nid == NUMA_NO_NODE) 2392 nr_huge_pages = h->nr_huge_pages; 2393 else 2394 nr_huge_pages = h->nr_huge_pages_node[nid]; 2395 2396 return sprintf(buf, "%lu\n", nr_huge_pages); 2397 } 2398 2399 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 2400 struct hstate *h, int nid, 2401 unsigned long count, size_t len) 2402 { 2403 int err; 2404 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); 2405 2406 if (hstate_is_gigantic(h) && !gigantic_page_supported()) { 2407 err = -EINVAL; 2408 goto out; 2409 } 2410 2411 if (nid == NUMA_NO_NODE) { 2412 /* 2413 * global hstate attribute 2414 */ 2415 if (!(obey_mempolicy && 2416 init_nodemask_of_mempolicy(nodes_allowed))) { 2417 NODEMASK_FREE(nodes_allowed); 2418 nodes_allowed = &node_states[N_MEMORY]; 2419 } 2420 } else if (nodes_allowed) { 2421 /* 2422 * per node hstate attribute: adjust count to global, 2423 * but restrict alloc/free to the specified node. 2424 */ 2425 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 2426 init_nodemask_of_node(nodes_allowed, nid); 2427 } else 2428 nodes_allowed = &node_states[N_MEMORY]; 2429 2430 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); 2431 2432 if (nodes_allowed != &node_states[N_MEMORY]) 2433 NODEMASK_FREE(nodes_allowed); 2434 2435 return len; 2436 out: 2437 NODEMASK_FREE(nodes_allowed); 2438 return err; 2439 } 2440 2441 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 2442 struct kobject *kobj, const char *buf, 2443 size_t len) 2444 { 2445 struct hstate *h; 2446 unsigned long count; 2447 int nid; 2448 int err; 2449 2450 err = kstrtoul(buf, 10, &count); 2451 if (err) 2452 return err; 2453 2454 h = kobj_to_hstate(kobj, &nid); 2455 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 2456 } 2457 2458 static ssize_t nr_hugepages_show(struct kobject *kobj, 2459 struct kobj_attribute *attr, char *buf) 2460 { 2461 return nr_hugepages_show_common(kobj, attr, buf); 2462 } 2463 2464 static ssize_t nr_hugepages_store(struct kobject *kobj, 2465 struct kobj_attribute *attr, const char *buf, size_t len) 2466 { 2467 return nr_hugepages_store_common(false, kobj, buf, len); 2468 } 2469 HSTATE_ATTR(nr_hugepages); 2470 2471 #ifdef CONFIG_NUMA 2472 2473 /* 2474 * hstate attribute for optionally mempolicy-based constraint on persistent 2475 * huge page alloc/free. 2476 */ 2477 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 2478 struct kobj_attribute *attr, char *buf) 2479 { 2480 return nr_hugepages_show_common(kobj, attr, buf); 2481 } 2482 2483 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 2484 struct kobj_attribute *attr, const char *buf, size_t len) 2485 { 2486 return nr_hugepages_store_common(true, kobj, buf, len); 2487 } 2488 HSTATE_ATTR(nr_hugepages_mempolicy); 2489 #endif 2490 2491 2492 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 2493 struct kobj_attribute *attr, char *buf) 2494 { 2495 struct hstate *h = kobj_to_hstate(kobj, NULL); 2496 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 2497 } 2498 2499 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 2500 struct kobj_attribute *attr, const char *buf, size_t count) 2501 { 2502 int err; 2503 unsigned long input; 2504 struct hstate *h = kobj_to_hstate(kobj, NULL); 2505 2506 if (hstate_is_gigantic(h)) 2507 return -EINVAL; 2508 2509 err = kstrtoul(buf, 10, &input); 2510 if (err) 2511 return err; 2512 2513 spin_lock(&hugetlb_lock); 2514 h->nr_overcommit_huge_pages = input; 2515 spin_unlock(&hugetlb_lock); 2516 2517 return count; 2518 } 2519 HSTATE_ATTR(nr_overcommit_hugepages); 2520 2521 static ssize_t free_hugepages_show(struct kobject *kobj, 2522 struct kobj_attribute *attr, char *buf) 2523 { 2524 struct hstate *h; 2525 unsigned long free_huge_pages; 2526 int nid; 2527 2528 h = kobj_to_hstate(kobj, &nid); 2529 if (nid == NUMA_NO_NODE) 2530 free_huge_pages = h->free_huge_pages; 2531 else 2532 free_huge_pages = h->free_huge_pages_node[nid]; 2533 2534 return sprintf(buf, "%lu\n", free_huge_pages); 2535 } 2536 HSTATE_ATTR_RO(free_hugepages); 2537 2538 static ssize_t resv_hugepages_show(struct kobject *kobj, 2539 struct kobj_attribute *attr, char *buf) 2540 { 2541 struct hstate *h = kobj_to_hstate(kobj, NULL); 2542 return sprintf(buf, "%lu\n", h->resv_huge_pages); 2543 } 2544 HSTATE_ATTR_RO(resv_hugepages); 2545 2546 static ssize_t surplus_hugepages_show(struct kobject *kobj, 2547 struct kobj_attribute *attr, char *buf) 2548 { 2549 struct hstate *h; 2550 unsigned long surplus_huge_pages; 2551 int nid; 2552 2553 h = kobj_to_hstate(kobj, &nid); 2554 if (nid == NUMA_NO_NODE) 2555 surplus_huge_pages = h->surplus_huge_pages; 2556 else 2557 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 2558 2559 return sprintf(buf, "%lu\n", surplus_huge_pages); 2560 } 2561 HSTATE_ATTR_RO(surplus_hugepages); 2562 2563 static struct attribute *hstate_attrs[] = { 2564 &nr_hugepages_attr.attr, 2565 &nr_overcommit_hugepages_attr.attr, 2566 &free_hugepages_attr.attr, 2567 &resv_hugepages_attr.attr, 2568 &surplus_hugepages_attr.attr, 2569 #ifdef CONFIG_NUMA 2570 &nr_hugepages_mempolicy_attr.attr, 2571 #endif 2572 NULL, 2573 }; 2574 2575 static const struct attribute_group hstate_attr_group = { 2576 .attrs = hstate_attrs, 2577 }; 2578 2579 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 2580 struct kobject **hstate_kobjs, 2581 const struct attribute_group *hstate_attr_group) 2582 { 2583 int retval; 2584 int hi = hstate_index(h); 2585 2586 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 2587 if (!hstate_kobjs[hi]) 2588 return -ENOMEM; 2589 2590 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 2591 if (retval) 2592 kobject_put(hstate_kobjs[hi]); 2593 2594 return retval; 2595 } 2596 2597 static void __init hugetlb_sysfs_init(void) 2598 { 2599 struct hstate *h; 2600 int err; 2601 2602 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 2603 if (!hugepages_kobj) 2604 return; 2605 2606 for_each_hstate(h) { 2607 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 2608 hstate_kobjs, &hstate_attr_group); 2609 if (err) 2610 pr_err("Hugetlb: Unable to add hstate %s", h->name); 2611 } 2612 } 2613 2614 #ifdef CONFIG_NUMA 2615 2616 /* 2617 * node_hstate/s - associate per node hstate attributes, via their kobjects, 2618 * with node devices in node_devices[] using a parallel array. The array 2619 * index of a node device or _hstate == node id. 2620 * This is here to avoid any static dependency of the node device driver, in 2621 * the base kernel, on the hugetlb module. 2622 */ 2623 struct node_hstate { 2624 struct kobject *hugepages_kobj; 2625 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 2626 }; 2627 static struct node_hstate node_hstates[MAX_NUMNODES]; 2628 2629 /* 2630 * A subset of global hstate attributes for node devices 2631 */ 2632 static struct attribute *per_node_hstate_attrs[] = { 2633 &nr_hugepages_attr.attr, 2634 &free_hugepages_attr.attr, 2635 &surplus_hugepages_attr.attr, 2636 NULL, 2637 }; 2638 2639 static const struct attribute_group per_node_hstate_attr_group = { 2640 .attrs = per_node_hstate_attrs, 2641 }; 2642 2643 /* 2644 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 2645 * Returns node id via non-NULL nidp. 2646 */ 2647 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 2648 { 2649 int nid; 2650 2651 for (nid = 0; nid < nr_node_ids; nid++) { 2652 struct node_hstate *nhs = &node_hstates[nid]; 2653 int i; 2654 for (i = 0; i < HUGE_MAX_HSTATE; i++) 2655 if (nhs->hstate_kobjs[i] == kobj) { 2656 if (nidp) 2657 *nidp = nid; 2658 return &hstates[i]; 2659 } 2660 } 2661 2662 BUG(); 2663 return NULL; 2664 } 2665 2666 /* 2667 * Unregister hstate attributes from a single node device. 2668 * No-op if no hstate attributes attached. 2669 */ 2670 static void hugetlb_unregister_node(struct node *node) 2671 { 2672 struct hstate *h; 2673 struct node_hstate *nhs = &node_hstates[node->dev.id]; 2674 2675 if (!nhs->hugepages_kobj) 2676 return; /* no hstate attributes */ 2677 2678 for_each_hstate(h) { 2679 int idx = hstate_index(h); 2680 if (nhs->hstate_kobjs[idx]) { 2681 kobject_put(nhs->hstate_kobjs[idx]); 2682 nhs->hstate_kobjs[idx] = NULL; 2683 } 2684 } 2685 2686 kobject_put(nhs->hugepages_kobj); 2687 nhs->hugepages_kobj = NULL; 2688 } 2689 2690 2691 /* 2692 * Register hstate attributes for a single node device. 2693 * No-op if attributes already registered. 2694 */ 2695 static void hugetlb_register_node(struct node *node) 2696 { 2697 struct hstate *h; 2698 struct node_hstate *nhs = &node_hstates[node->dev.id]; 2699 int err; 2700 2701 if (nhs->hugepages_kobj) 2702 return; /* already allocated */ 2703 2704 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 2705 &node->dev.kobj); 2706 if (!nhs->hugepages_kobj) 2707 return; 2708 2709 for_each_hstate(h) { 2710 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 2711 nhs->hstate_kobjs, 2712 &per_node_hstate_attr_group); 2713 if (err) { 2714 pr_err("Hugetlb: Unable to add hstate %s for node %d\n", 2715 h->name, node->dev.id); 2716 hugetlb_unregister_node(node); 2717 break; 2718 } 2719 } 2720 } 2721 2722 /* 2723 * hugetlb init time: register hstate attributes for all registered node 2724 * devices of nodes that have memory. All on-line nodes should have 2725 * registered their associated device by this time. 2726 */ 2727 static void __init hugetlb_register_all_nodes(void) 2728 { 2729 int nid; 2730 2731 for_each_node_state(nid, N_MEMORY) { 2732 struct node *node = node_devices[nid]; 2733 if (node->dev.id == nid) 2734 hugetlb_register_node(node); 2735 } 2736 2737 /* 2738 * Let the node device driver know we're here so it can 2739 * [un]register hstate attributes on node hotplug. 2740 */ 2741 register_hugetlbfs_with_node(hugetlb_register_node, 2742 hugetlb_unregister_node); 2743 } 2744 #else /* !CONFIG_NUMA */ 2745 2746 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 2747 { 2748 BUG(); 2749 if (nidp) 2750 *nidp = -1; 2751 return NULL; 2752 } 2753 2754 static void hugetlb_register_all_nodes(void) { } 2755 2756 #endif 2757 2758 static int __init hugetlb_init(void) 2759 { 2760 int i; 2761 2762 if (!hugepages_supported()) 2763 return 0; 2764 2765 if (!size_to_hstate(default_hstate_size)) { 2766 if (default_hstate_size != 0) { 2767 pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n", 2768 default_hstate_size, HPAGE_SIZE); 2769 } 2770 2771 default_hstate_size = HPAGE_SIZE; 2772 if (!size_to_hstate(default_hstate_size)) 2773 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 2774 } 2775 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 2776 if (default_hstate_max_huge_pages) { 2777 if (!default_hstate.max_huge_pages) 2778 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 2779 } 2780 2781 hugetlb_init_hstates(); 2782 gather_bootmem_prealloc(); 2783 report_hugepages(); 2784 2785 hugetlb_sysfs_init(); 2786 hugetlb_register_all_nodes(); 2787 hugetlb_cgroup_file_init(); 2788 2789 #ifdef CONFIG_SMP 2790 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 2791 #else 2792 num_fault_mutexes = 1; 2793 #endif 2794 hugetlb_fault_mutex_table = 2795 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 2796 GFP_KERNEL); 2797 BUG_ON(!hugetlb_fault_mutex_table); 2798 2799 for (i = 0; i < num_fault_mutexes; i++) 2800 mutex_init(&hugetlb_fault_mutex_table[i]); 2801 return 0; 2802 } 2803 subsys_initcall(hugetlb_init); 2804 2805 /* Should be called on processing a hugepagesz=... option */ 2806 void __init hugetlb_bad_size(void) 2807 { 2808 parsed_valid_hugepagesz = false; 2809 } 2810 2811 void __init hugetlb_add_hstate(unsigned int order) 2812 { 2813 struct hstate *h; 2814 unsigned long i; 2815 2816 if (size_to_hstate(PAGE_SIZE << order)) { 2817 pr_warn("hugepagesz= specified twice, ignoring\n"); 2818 return; 2819 } 2820 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 2821 BUG_ON(order == 0); 2822 h = &hstates[hugetlb_max_hstate++]; 2823 h->order = order; 2824 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 2825 h->nr_huge_pages = 0; 2826 h->free_huge_pages = 0; 2827 for (i = 0; i < MAX_NUMNODES; ++i) 2828 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 2829 INIT_LIST_HEAD(&h->hugepage_activelist); 2830 h->next_nid_to_alloc = first_memory_node; 2831 h->next_nid_to_free = first_memory_node; 2832 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 2833 huge_page_size(h)/1024); 2834 2835 parsed_hstate = h; 2836 } 2837 2838 static int __init hugetlb_nrpages_setup(char *s) 2839 { 2840 unsigned long *mhp; 2841 static unsigned long *last_mhp; 2842 2843 if (!parsed_valid_hugepagesz) { 2844 pr_warn("hugepages = %s preceded by " 2845 "an unsupported hugepagesz, ignoring\n", s); 2846 parsed_valid_hugepagesz = true; 2847 return 1; 2848 } 2849 /* 2850 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, 2851 * so this hugepages= parameter goes to the "default hstate". 2852 */ 2853 else if (!hugetlb_max_hstate) 2854 mhp = &default_hstate_max_huge_pages; 2855 else 2856 mhp = &parsed_hstate->max_huge_pages; 2857 2858 if (mhp == last_mhp) { 2859 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n"); 2860 return 1; 2861 } 2862 2863 if (sscanf(s, "%lu", mhp) <= 0) 2864 *mhp = 0; 2865 2866 /* 2867 * Global state is always initialized later in hugetlb_init. 2868 * But we need to allocate >= MAX_ORDER hstates here early to still 2869 * use the bootmem allocator. 2870 */ 2871 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) 2872 hugetlb_hstate_alloc_pages(parsed_hstate); 2873 2874 last_mhp = mhp; 2875 2876 return 1; 2877 } 2878 __setup("hugepages=", hugetlb_nrpages_setup); 2879 2880 static int __init hugetlb_default_setup(char *s) 2881 { 2882 default_hstate_size = memparse(s, &s); 2883 return 1; 2884 } 2885 __setup("default_hugepagesz=", hugetlb_default_setup); 2886 2887 static unsigned int cpuset_mems_nr(unsigned int *array) 2888 { 2889 int node; 2890 unsigned int nr = 0; 2891 2892 for_each_node_mask(node, cpuset_current_mems_allowed) 2893 nr += array[node]; 2894 2895 return nr; 2896 } 2897 2898 #ifdef CONFIG_SYSCTL 2899 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 2900 struct ctl_table *table, int write, 2901 void __user *buffer, size_t *length, loff_t *ppos) 2902 { 2903 struct hstate *h = &default_hstate; 2904 unsigned long tmp = h->max_huge_pages; 2905 int ret; 2906 2907 if (!hugepages_supported()) 2908 return -EOPNOTSUPP; 2909 2910 table->data = &tmp; 2911 table->maxlen = sizeof(unsigned long); 2912 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 2913 if (ret) 2914 goto out; 2915 2916 if (write) 2917 ret = __nr_hugepages_store_common(obey_mempolicy, h, 2918 NUMA_NO_NODE, tmp, *length); 2919 out: 2920 return ret; 2921 } 2922 2923 int hugetlb_sysctl_handler(struct ctl_table *table, int write, 2924 void __user *buffer, size_t *length, loff_t *ppos) 2925 { 2926 2927 return hugetlb_sysctl_handler_common(false, table, write, 2928 buffer, length, ppos); 2929 } 2930 2931 #ifdef CONFIG_NUMA 2932 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 2933 void __user *buffer, size_t *length, loff_t *ppos) 2934 { 2935 return hugetlb_sysctl_handler_common(true, table, write, 2936 buffer, length, ppos); 2937 } 2938 #endif /* CONFIG_NUMA */ 2939 2940 int hugetlb_overcommit_handler(struct ctl_table *table, int write, 2941 void __user *buffer, 2942 size_t *length, loff_t *ppos) 2943 { 2944 struct hstate *h = &default_hstate; 2945 unsigned long tmp; 2946 int ret; 2947 2948 if (!hugepages_supported()) 2949 return -EOPNOTSUPP; 2950 2951 tmp = h->nr_overcommit_huge_pages; 2952 2953 if (write && hstate_is_gigantic(h)) 2954 return -EINVAL; 2955 2956 table->data = &tmp; 2957 table->maxlen = sizeof(unsigned long); 2958 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 2959 if (ret) 2960 goto out; 2961 2962 if (write) { 2963 spin_lock(&hugetlb_lock); 2964 h->nr_overcommit_huge_pages = tmp; 2965 spin_unlock(&hugetlb_lock); 2966 } 2967 out: 2968 return ret; 2969 } 2970 2971 #endif /* CONFIG_SYSCTL */ 2972 2973 void hugetlb_report_meminfo(struct seq_file *m) 2974 { 2975 struct hstate *h; 2976 unsigned long total = 0; 2977 2978 if (!hugepages_supported()) 2979 return; 2980 2981 for_each_hstate(h) { 2982 unsigned long count = h->nr_huge_pages; 2983 2984 total += (PAGE_SIZE << huge_page_order(h)) * count; 2985 2986 if (h == &default_hstate) 2987 seq_printf(m, 2988 "HugePages_Total: %5lu\n" 2989 "HugePages_Free: %5lu\n" 2990 "HugePages_Rsvd: %5lu\n" 2991 "HugePages_Surp: %5lu\n" 2992 "Hugepagesize: %8lu kB\n", 2993 count, 2994 h->free_huge_pages, 2995 h->resv_huge_pages, 2996 h->surplus_huge_pages, 2997 (PAGE_SIZE << huge_page_order(h)) / 1024); 2998 } 2999 3000 seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024); 3001 } 3002 3003 int hugetlb_report_node_meminfo(int nid, char *buf) 3004 { 3005 struct hstate *h = &default_hstate; 3006 if (!hugepages_supported()) 3007 return 0; 3008 return sprintf(buf, 3009 "Node %d HugePages_Total: %5u\n" 3010 "Node %d HugePages_Free: %5u\n" 3011 "Node %d HugePages_Surp: %5u\n", 3012 nid, h->nr_huge_pages_node[nid], 3013 nid, h->free_huge_pages_node[nid], 3014 nid, h->surplus_huge_pages_node[nid]); 3015 } 3016 3017 void hugetlb_show_meminfo(void) 3018 { 3019 struct hstate *h; 3020 int nid; 3021 3022 if (!hugepages_supported()) 3023 return; 3024 3025 for_each_node_state(nid, N_MEMORY) 3026 for_each_hstate(h) 3027 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 3028 nid, 3029 h->nr_huge_pages_node[nid], 3030 h->free_huge_pages_node[nid], 3031 h->surplus_huge_pages_node[nid], 3032 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 3033 } 3034 3035 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 3036 { 3037 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 3038 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); 3039 } 3040 3041 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 3042 unsigned long hugetlb_total_pages(void) 3043 { 3044 struct hstate *h; 3045 unsigned long nr_total_pages = 0; 3046 3047 for_each_hstate(h) 3048 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 3049 return nr_total_pages; 3050 } 3051 3052 static int hugetlb_acct_memory(struct hstate *h, long delta) 3053 { 3054 int ret = -ENOMEM; 3055 3056 spin_lock(&hugetlb_lock); 3057 /* 3058 * When cpuset is configured, it breaks the strict hugetlb page 3059 * reservation as the accounting is done on a global variable. Such 3060 * reservation is completely rubbish in the presence of cpuset because 3061 * the reservation is not checked against page availability for the 3062 * current cpuset. Application can still potentially OOM'ed by kernel 3063 * with lack of free htlb page in cpuset that the task is in. 3064 * Attempt to enforce strict accounting with cpuset is almost 3065 * impossible (or too ugly) because cpuset is too fluid that 3066 * task or memory node can be dynamically moved between cpusets. 3067 * 3068 * The change of semantics for shared hugetlb mapping with cpuset is 3069 * undesirable. However, in order to preserve some of the semantics, 3070 * we fall back to check against current free page availability as 3071 * a best attempt and hopefully to minimize the impact of changing 3072 * semantics that cpuset has. 3073 */ 3074 if (delta > 0) { 3075 if (gather_surplus_pages(h, delta) < 0) 3076 goto out; 3077 3078 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 3079 return_unused_surplus_pages(h, delta); 3080 goto out; 3081 } 3082 } 3083 3084 ret = 0; 3085 if (delta < 0) 3086 return_unused_surplus_pages(h, (unsigned long) -delta); 3087 3088 out: 3089 spin_unlock(&hugetlb_lock); 3090 return ret; 3091 } 3092 3093 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 3094 { 3095 struct resv_map *resv = vma_resv_map(vma); 3096 3097 /* 3098 * This new VMA should share its siblings reservation map if present. 3099 * The VMA will only ever have a valid reservation map pointer where 3100 * it is being copied for another still existing VMA. As that VMA 3101 * has a reference to the reservation map it cannot disappear until 3102 * after this open call completes. It is therefore safe to take a 3103 * new reference here without additional locking. 3104 */ 3105 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3106 kref_get(&resv->refs); 3107 } 3108 3109 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 3110 { 3111 struct hstate *h = hstate_vma(vma); 3112 struct resv_map *resv = vma_resv_map(vma); 3113 struct hugepage_subpool *spool = subpool_vma(vma); 3114 unsigned long reserve, start, end; 3115 long gbl_reserve; 3116 3117 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3118 return; 3119 3120 start = vma_hugecache_offset(h, vma, vma->vm_start); 3121 end = vma_hugecache_offset(h, vma, vma->vm_end); 3122 3123 reserve = (end - start) - region_count(resv, start, end); 3124 3125 kref_put(&resv->refs, resv_map_release); 3126 3127 if (reserve) { 3128 /* 3129 * Decrement reserve counts. The global reserve count may be 3130 * adjusted if the subpool has a minimum size. 3131 */ 3132 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 3133 hugetlb_acct_memory(h, -gbl_reserve); 3134 } 3135 } 3136 3137 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 3138 { 3139 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 3140 return -EINVAL; 3141 return 0; 3142 } 3143 3144 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 3145 { 3146 struct hstate *hstate = hstate_vma(vma); 3147 3148 return 1UL << huge_page_shift(hstate); 3149 } 3150 3151 /* 3152 * We cannot handle pagefaults against hugetlb pages at all. They cause 3153 * handle_mm_fault() to try to instantiate regular-sized pages in the 3154 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 3155 * this far. 3156 */ 3157 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 3158 { 3159 BUG(); 3160 return 0; 3161 } 3162 3163 /* 3164 * When a new function is introduced to vm_operations_struct and added 3165 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 3166 * This is because under System V memory model, mappings created via 3167 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 3168 * their original vm_ops are overwritten with shm_vm_ops. 3169 */ 3170 const struct vm_operations_struct hugetlb_vm_ops = { 3171 .fault = hugetlb_vm_op_fault, 3172 .open = hugetlb_vm_op_open, 3173 .close = hugetlb_vm_op_close, 3174 .split = hugetlb_vm_op_split, 3175 .pagesize = hugetlb_vm_op_pagesize, 3176 }; 3177 3178 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 3179 int writable) 3180 { 3181 pte_t entry; 3182 3183 if (writable) { 3184 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 3185 vma->vm_page_prot))); 3186 } else { 3187 entry = huge_pte_wrprotect(mk_huge_pte(page, 3188 vma->vm_page_prot)); 3189 } 3190 entry = pte_mkyoung(entry); 3191 entry = pte_mkhuge(entry); 3192 entry = arch_make_huge_pte(entry, vma, page, writable); 3193 3194 return entry; 3195 } 3196 3197 static void set_huge_ptep_writable(struct vm_area_struct *vma, 3198 unsigned long address, pte_t *ptep) 3199 { 3200 pte_t entry; 3201 3202 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 3203 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 3204 update_mmu_cache(vma, address, ptep); 3205 } 3206 3207 bool is_hugetlb_entry_migration(pte_t pte) 3208 { 3209 swp_entry_t swp; 3210 3211 if (huge_pte_none(pte) || pte_present(pte)) 3212 return false; 3213 swp = pte_to_swp_entry(pte); 3214 if (non_swap_entry(swp) && is_migration_entry(swp)) 3215 return true; 3216 else 3217 return false; 3218 } 3219 3220 static int is_hugetlb_entry_hwpoisoned(pte_t pte) 3221 { 3222 swp_entry_t swp; 3223 3224 if (huge_pte_none(pte) || pte_present(pte)) 3225 return 0; 3226 swp = pte_to_swp_entry(pte); 3227 if (non_swap_entry(swp) && is_hwpoison_entry(swp)) 3228 return 1; 3229 else 3230 return 0; 3231 } 3232 3233 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 3234 struct vm_area_struct *vma) 3235 { 3236 pte_t *src_pte, *dst_pte, entry, dst_entry; 3237 struct page *ptepage; 3238 unsigned long addr; 3239 int cow; 3240 struct hstate *h = hstate_vma(vma); 3241 unsigned long sz = huge_page_size(h); 3242 unsigned long mmun_start; /* For mmu_notifiers */ 3243 unsigned long mmun_end; /* For mmu_notifiers */ 3244 int ret = 0; 3245 3246 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 3247 3248 mmun_start = vma->vm_start; 3249 mmun_end = vma->vm_end; 3250 if (cow) 3251 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end); 3252 3253 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 3254 spinlock_t *src_ptl, *dst_ptl; 3255 src_pte = huge_pte_offset(src, addr, sz); 3256 if (!src_pte) 3257 continue; 3258 dst_pte = huge_pte_alloc(dst, addr, sz); 3259 if (!dst_pte) { 3260 ret = -ENOMEM; 3261 break; 3262 } 3263 3264 /* 3265 * If the pagetables are shared don't copy or take references. 3266 * dst_pte == src_pte is the common case of src/dest sharing. 3267 * 3268 * However, src could have 'unshared' and dst shares with 3269 * another vma. If dst_pte !none, this implies sharing. 3270 * Check here before taking page table lock, and once again 3271 * after taking the lock below. 3272 */ 3273 dst_entry = huge_ptep_get(dst_pte); 3274 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) 3275 continue; 3276 3277 dst_ptl = huge_pte_lock(h, dst, dst_pte); 3278 src_ptl = huge_pte_lockptr(h, src, src_pte); 3279 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 3280 entry = huge_ptep_get(src_pte); 3281 dst_entry = huge_ptep_get(dst_pte); 3282 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) { 3283 /* 3284 * Skip if src entry none. Also, skip in the 3285 * unlikely case dst entry !none as this implies 3286 * sharing with another vma. 3287 */ 3288 ; 3289 } else if (unlikely(is_hugetlb_entry_migration(entry) || 3290 is_hugetlb_entry_hwpoisoned(entry))) { 3291 swp_entry_t swp_entry = pte_to_swp_entry(entry); 3292 3293 if (is_write_migration_entry(swp_entry) && cow) { 3294 /* 3295 * COW mappings require pages in both 3296 * parent and child to be set to read. 3297 */ 3298 make_migration_entry_read(&swp_entry); 3299 entry = swp_entry_to_pte(swp_entry); 3300 set_huge_swap_pte_at(src, addr, src_pte, 3301 entry, sz); 3302 } 3303 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz); 3304 } else { 3305 if (cow) { 3306 /* 3307 * No need to notify as we are downgrading page 3308 * table protection not changing it to point 3309 * to a new page. 3310 * 3311 * See Documentation/vm/mmu_notifier.rst 3312 */ 3313 huge_ptep_set_wrprotect(src, addr, src_pte); 3314 } 3315 entry = huge_ptep_get(src_pte); 3316 ptepage = pte_page(entry); 3317 get_page(ptepage); 3318 page_dup_rmap(ptepage, true); 3319 set_huge_pte_at(dst, addr, dst_pte, entry); 3320 hugetlb_count_add(pages_per_huge_page(h), dst); 3321 } 3322 spin_unlock(src_ptl); 3323 spin_unlock(dst_ptl); 3324 } 3325 3326 if (cow) 3327 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end); 3328 3329 return ret; 3330 } 3331 3332 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 3333 unsigned long start, unsigned long end, 3334 struct page *ref_page) 3335 { 3336 struct mm_struct *mm = vma->vm_mm; 3337 unsigned long address; 3338 pte_t *ptep; 3339 pte_t pte; 3340 spinlock_t *ptl; 3341 struct page *page; 3342 struct hstate *h = hstate_vma(vma); 3343 unsigned long sz = huge_page_size(h); 3344 unsigned long mmun_start = start; /* For mmu_notifiers */ 3345 unsigned long mmun_end = end; /* For mmu_notifiers */ 3346 3347 WARN_ON(!is_vm_hugetlb_page(vma)); 3348 BUG_ON(start & ~huge_page_mask(h)); 3349 BUG_ON(end & ~huge_page_mask(h)); 3350 3351 /* 3352 * This is a hugetlb vma, all the pte entries should point 3353 * to huge page. 3354 */ 3355 tlb_remove_check_page_size_change(tlb, sz); 3356 tlb_start_vma(tlb, vma); 3357 3358 /* 3359 * If sharing possible, alert mmu notifiers of worst case. 3360 */ 3361 adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end); 3362 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 3363 address = start; 3364 for (; address < end; address += sz) { 3365 ptep = huge_pte_offset(mm, address, sz); 3366 if (!ptep) 3367 continue; 3368 3369 ptl = huge_pte_lock(h, mm, ptep); 3370 if (huge_pmd_unshare(mm, &address, ptep)) { 3371 spin_unlock(ptl); 3372 /* 3373 * We just unmapped a page of PMDs by clearing a PUD. 3374 * The caller's TLB flush range should cover this area. 3375 */ 3376 continue; 3377 } 3378 3379 pte = huge_ptep_get(ptep); 3380 if (huge_pte_none(pte)) { 3381 spin_unlock(ptl); 3382 continue; 3383 } 3384 3385 /* 3386 * Migrating hugepage or HWPoisoned hugepage is already 3387 * unmapped and its refcount is dropped, so just clear pte here. 3388 */ 3389 if (unlikely(!pte_present(pte))) { 3390 huge_pte_clear(mm, address, ptep, sz); 3391 spin_unlock(ptl); 3392 continue; 3393 } 3394 3395 page = pte_page(pte); 3396 /* 3397 * If a reference page is supplied, it is because a specific 3398 * page is being unmapped, not a range. Ensure the page we 3399 * are about to unmap is the actual page of interest. 3400 */ 3401 if (ref_page) { 3402 if (page != ref_page) { 3403 spin_unlock(ptl); 3404 continue; 3405 } 3406 /* 3407 * Mark the VMA as having unmapped its page so that 3408 * future faults in this VMA will fail rather than 3409 * looking like data was lost 3410 */ 3411 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 3412 } 3413 3414 pte = huge_ptep_get_and_clear(mm, address, ptep); 3415 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 3416 if (huge_pte_dirty(pte)) 3417 set_page_dirty(page); 3418 3419 hugetlb_count_sub(pages_per_huge_page(h), mm); 3420 page_remove_rmap(page, true); 3421 3422 spin_unlock(ptl); 3423 tlb_remove_page_size(tlb, page, huge_page_size(h)); 3424 /* 3425 * Bail out after unmapping reference page if supplied 3426 */ 3427 if (ref_page) 3428 break; 3429 } 3430 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 3431 tlb_end_vma(tlb, vma); 3432 } 3433 3434 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 3435 struct vm_area_struct *vma, unsigned long start, 3436 unsigned long end, struct page *ref_page) 3437 { 3438 __unmap_hugepage_range(tlb, vma, start, end, ref_page); 3439 3440 /* 3441 * Clear this flag so that x86's huge_pmd_share page_table_shareable 3442 * test will fail on a vma being torn down, and not grab a page table 3443 * on its way out. We're lucky that the flag has such an appropriate 3444 * name, and can in fact be safely cleared here. We could clear it 3445 * before the __unmap_hugepage_range above, but all that's necessary 3446 * is to clear it before releasing the i_mmap_rwsem. This works 3447 * because in the context this is called, the VMA is about to be 3448 * destroyed and the i_mmap_rwsem is held. 3449 */ 3450 vma->vm_flags &= ~VM_MAYSHARE; 3451 } 3452 3453 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 3454 unsigned long end, struct page *ref_page) 3455 { 3456 struct mm_struct *mm; 3457 struct mmu_gather tlb; 3458 unsigned long tlb_start = start; 3459 unsigned long tlb_end = end; 3460 3461 /* 3462 * If shared PMDs were possibly used within this vma range, adjust 3463 * start/end for worst case tlb flushing. 3464 * Note that we can not be sure if PMDs are shared until we try to 3465 * unmap pages. However, we want to make sure TLB flushing covers 3466 * the largest possible range. 3467 */ 3468 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end); 3469 3470 mm = vma->vm_mm; 3471 3472 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end); 3473 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 3474 tlb_finish_mmu(&tlb, tlb_start, tlb_end); 3475 } 3476 3477 /* 3478 * This is called when the original mapper is failing to COW a MAP_PRIVATE 3479 * mappping it owns the reserve page for. The intention is to unmap the page 3480 * from other VMAs and let the children be SIGKILLed if they are faulting the 3481 * same region. 3482 */ 3483 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 3484 struct page *page, unsigned long address) 3485 { 3486 struct hstate *h = hstate_vma(vma); 3487 struct vm_area_struct *iter_vma; 3488 struct address_space *mapping; 3489 pgoff_t pgoff; 3490 3491 /* 3492 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 3493 * from page cache lookup which is in HPAGE_SIZE units. 3494 */ 3495 address = address & huge_page_mask(h); 3496 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 3497 vma->vm_pgoff; 3498 mapping = vma->vm_file->f_mapping; 3499 3500 /* 3501 * Take the mapping lock for the duration of the table walk. As 3502 * this mapping should be shared between all the VMAs, 3503 * __unmap_hugepage_range() is called as the lock is already held 3504 */ 3505 i_mmap_lock_write(mapping); 3506 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 3507 /* Do not unmap the current VMA */ 3508 if (iter_vma == vma) 3509 continue; 3510 3511 /* 3512 * Shared VMAs have their own reserves and do not affect 3513 * MAP_PRIVATE accounting but it is possible that a shared 3514 * VMA is using the same page so check and skip such VMAs. 3515 */ 3516 if (iter_vma->vm_flags & VM_MAYSHARE) 3517 continue; 3518 3519 /* 3520 * Unmap the page from other VMAs without their own reserves. 3521 * They get marked to be SIGKILLed if they fault in these 3522 * areas. This is because a future no-page fault on this VMA 3523 * could insert a zeroed page instead of the data existing 3524 * from the time of fork. This would look like data corruption 3525 */ 3526 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 3527 unmap_hugepage_range(iter_vma, address, 3528 address + huge_page_size(h), page); 3529 } 3530 i_mmap_unlock_write(mapping); 3531 } 3532 3533 /* 3534 * Hugetlb_cow() should be called with page lock of the original hugepage held. 3535 * Called with hugetlb_instantiation_mutex held and pte_page locked so we 3536 * cannot race with other handlers or page migration. 3537 * Keep the pte_same checks anyway to make transition from the mutex easier. 3538 */ 3539 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 3540 unsigned long address, pte_t *ptep, 3541 struct page *pagecache_page, spinlock_t *ptl) 3542 { 3543 pte_t pte; 3544 struct hstate *h = hstate_vma(vma); 3545 struct page *old_page, *new_page; 3546 int outside_reserve = 0; 3547 vm_fault_t ret = 0; 3548 unsigned long mmun_start; /* For mmu_notifiers */ 3549 unsigned long mmun_end; /* For mmu_notifiers */ 3550 unsigned long haddr = address & huge_page_mask(h); 3551 3552 pte = huge_ptep_get(ptep); 3553 old_page = pte_page(pte); 3554 3555 retry_avoidcopy: 3556 /* If no-one else is actually using this page, avoid the copy 3557 * and just make the page writable */ 3558 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 3559 page_move_anon_rmap(old_page, vma); 3560 set_huge_ptep_writable(vma, haddr, ptep); 3561 return 0; 3562 } 3563 3564 /* 3565 * If the process that created a MAP_PRIVATE mapping is about to 3566 * perform a COW due to a shared page count, attempt to satisfy 3567 * the allocation without using the existing reserves. The pagecache 3568 * page is used to determine if the reserve at this address was 3569 * consumed or not. If reserves were used, a partial faulted mapping 3570 * at the time of fork() could consume its reserves on COW instead 3571 * of the full address range. 3572 */ 3573 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 3574 old_page != pagecache_page) 3575 outside_reserve = 1; 3576 3577 get_page(old_page); 3578 3579 /* 3580 * Drop page table lock as buddy allocator may be called. It will 3581 * be acquired again before returning to the caller, as expected. 3582 */ 3583 spin_unlock(ptl); 3584 new_page = alloc_huge_page(vma, haddr, outside_reserve); 3585 3586 if (IS_ERR(new_page)) { 3587 /* 3588 * If a process owning a MAP_PRIVATE mapping fails to COW, 3589 * it is due to references held by a child and an insufficient 3590 * huge page pool. To guarantee the original mappers 3591 * reliability, unmap the page from child processes. The child 3592 * may get SIGKILLed if it later faults. 3593 */ 3594 if (outside_reserve) { 3595 put_page(old_page); 3596 BUG_ON(huge_pte_none(pte)); 3597 unmap_ref_private(mm, vma, old_page, haddr); 3598 BUG_ON(huge_pte_none(pte)); 3599 spin_lock(ptl); 3600 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 3601 if (likely(ptep && 3602 pte_same(huge_ptep_get(ptep), pte))) 3603 goto retry_avoidcopy; 3604 /* 3605 * race occurs while re-acquiring page table 3606 * lock, and our job is done. 3607 */ 3608 return 0; 3609 } 3610 3611 ret = vmf_error(PTR_ERR(new_page)); 3612 goto out_release_old; 3613 } 3614 3615 /* 3616 * When the original hugepage is shared one, it does not have 3617 * anon_vma prepared. 3618 */ 3619 if (unlikely(anon_vma_prepare(vma))) { 3620 ret = VM_FAULT_OOM; 3621 goto out_release_all; 3622 } 3623 3624 copy_user_huge_page(new_page, old_page, address, vma, 3625 pages_per_huge_page(h)); 3626 __SetPageUptodate(new_page); 3627 set_page_huge_active(new_page); 3628 3629 mmun_start = haddr; 3630 mmun_end = mmun_start + huge_page_size(h); 3631 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 3632 3633 /* 3634 * Retake the page table lock to check for racing updates 3635 * before the page tables are altered 3636 */ 3637 spin_lock(ptl); 3638 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 3639 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 3640 ClearPagePrivate(new_page); 3641 3642 /* Break COW */ 3643 huge_ptep_clear_flush(vma, haddr, ptep); 3644 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); 3645 set_huge_pte_at(mm, haddr, ptep, 3646 make_huge_pte(vma, new_page, 1)); 3647 page_remove_rmap(old_page, true); 3648 hugepage_add_new_anon_rmap(new_page, vma, haddr); 3649 /* Make the old page be freed below */ 3650 new_page = old_page; 3651 } 3652 spin_unlock(ptl); 3653 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 3654 out_release_all: 3655 restore_reserve_on_error(h, vma, haddr, new_page); 3656 put_page(new_page); 3657 out_release_old: 3658 put_page(old_page); 3659 3660 spin_lock(ptl); /* Caller expects lock to be held */ 3661 return ret; 3662 } 3663 3664 /* Return the pagecache page at a given address within a VMA */ 3665 static struct page *hugetlbfs_pagecache_page(struct hstate *h, 3666 struct vm_area_struct *vma, unsigned long address) 3667 { 3668 struct address_space *mapping; 3669 pgoff_t idx; 3670 3671 mapping = vma->vm_file->f_mapping; 3672 idx = vma_hugecache_offset(h, vma, address); 3673 3674 return find_lock_page(mapping, idx); 3675 } 3676 3677 /* 3678 * Return whether there is a pagecache page to back given address within VMA. 3679 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 3680 */ 3681 static bool hugetlbfs_pagecache_present(struct hstate *h, 3682 struct vm_area_struct *vma, unsigned long address) 3683 { 3684 struct address_space *mapping; 3685 pgoff_t idx; 3686 struct page *page; 3687 3688 mapping = vma->vm_file->f_mapping; 3689 idx = vma_hugecache_offset(h, vma, address); 3690 3691 page = find_get_page(mapping, idx); 3692 if (page) 3693 put_page(page); 3694 return page != NULL; 3695 } 3696 3697 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 3698 pgoff_t idx) 3699 { 3700 struct inode *inode = mapping->host; 3701 struct hstate *h = hstate_inode(inode); 3702 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 3703 3704 if (err) 3705 return err; 3706 ClearPagePrivate(page); 3707 3708 /* 3709 * set page dirty so that it will not be removed from cache/file 3710 * by non-hugetlbfs specific code paths. 3711 */ 3712 set_page_dirty(page); 3713 3714 spin_lock(&inode->i_lock); 3715 inode->i_blocks += blocks_per_huge_page(h); 3716 spin_unlock(&inode->i_lock); 3717 return 0; 3718 } 3719 3720 static vm_fault_t hugetlb_no_page(struct mm_struct *mm, 3721 struct vm_area_struct *vma, 3722 struct address_space *mapping, pgoff_t idx, 3723 unsigned long address, pte_t *ptep, unsigned int flags) 3724 { 3725 struct hstate *h = hstate_vma(vma); 3726 vm_fault_t ret = VM_FAULT_SIGBUS; 3727 int anon_rmap = 0; 3728 unsigned long size; 3729 struct page *page; 3730 pte_t new_pte; 3731 spinlock_t *ptl; 3732 unsigned long haddr = address & huge_page_mask(h); 3733 3734 /* 3735 * Currently, we are forced to kill the process in the event the 3736 * original mapper has unmapped pages from the child due to a failed 3737 * COW. Warn that such a situation has occurred as it may not be obvious 3738 */ 3739 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 3740 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 3741 current->pid); 3742 return ret; 3743 } 3744 3745 /* 3746 * Use page lock to guard against racing truncation 3747 * before we get page_table_lock. 3748 */ 3749 retry: 3750 page = find_lock_page(mapping, idx); 3751 if (!page) { 3752 size = i_size_read(mapping->host) >> huge_page_shift(h); 3753 if (idx >= size) 3754 goto out; 3755 3756 /* 3757 * Check for page in userfault range 3758 */ 3759 if (userfaultfd_missing(vma)) { 3760 u32 hash; 3761 struct vm_fault vmf = { 3762 .vma = vma, 3763 .address = haddr, 3764 .flags = flags, 3765 /* 3766 * Hard to debug if it ends up being 3767 * used by a callee that assumes 3768 * something about the other 3769 * uninitialized fields... same as in 3770 * memory.c 3771 */ 3772 }; 3773 3774 /* 3775 * hugetlb_fault_mutex must be dropped before 3776 * handling userfault. Reacquire after handling 3777 * fault to make calling code simpler. 3778 */ 3779 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, 3780 idx, haddr); 3781 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 3782 ret = handle_userfault(&vmf, VM_UFFD_MISSING); 3783 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3784 goto out; 3785 } 3786 3787 page = alloc_huge_page(vma, haddr, 0); 3788 if (IS_ERR(page)) { 3789 ret = vmf_error(PTR_ERR(page)); 3790 goto out; 3791 } 3792 clear_huge_page(page, address, pages_per_huge_page(h)); 3793 __SetPageUptodate(page); 3794 set_page_huge_active(page); 3795 3796 if (vma->vm_flags & VM_MAYSHARE) { 3797 int err = huge_add_to_page_cache(page, mapping, idx); 3798 if (err) { 3799 put_page(page); 3800 if (err == -EEXIST) 3801 goto retry; 3802 goto out; 3803 } 3804 } else { 3805 lock_page(page); 3806 if (unlikely(anon_vma_prepare(vma))) { 3807 ret = VM_FAULT_OOM; 3808 goto backout_unlocked; 3809 } 3810 anon_rmap = 1; 3811 } 3812 } else { 3813 /* 3814 * If memory error occurs between mmap() and fault, some process 3815 * don't have hwpoisoned swap entry for errored virtual address. 3816 * So we need to block hugepage fault by PG_hwpoison bit check. 3817 */ 3818 if (unlikely(PageHWPoison(page))) { 3819 ret = VM_FAULT_HWPOISON | 3820 VM_FAULT_SET_HINDEX(hstate_index(h)); 3821 goto backout_unlocked; 3822 } 3823 } 3824 3825 /* 3826 * If we are going to COW a private mapping later, we examine the 3827 * pending reservations for this page now. This will ensure that 3828 * any allocations necessary to record that reservation occur outside 3829 * the spinlock. 3830 */ 3831 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 3832 if (vma_needs_reservation(h, vma, haddr) < 0) { 3833 ret = VM_FAULT_OOM; 3834 goto backout_unlocked; 3835 } 3836 /* Just decrements count, does not deallocate */ 3837 vma_end_reservation(h, vma, haddr); 3838 } 3839 3840 ptl = huge_pte_lock(h, mm, ptep); 3841 size = i_size_read(mapping->host) >> huge_page_shift(h); 3842 if (idx >= size) 3843 goto backout; 3844 3845 ret = 0; 3846 if (!huge_pte_none(huge_ptep_get(ptep))) 3847 goto backout; 3848 3849 if (anon_rmap) { 3850 ClearPagePrivate(page); 3851 hugepage_add_new_anon_rmap(page, vma, haddr); 3852 } else 3853 page_dup_rmap(page, true); 3854 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 3855 && (vma->vm_flags & VM_SHARED))); 3856 set_huge_pte_at(mm, haddr, ptep, new_pte); 3857 3858 hugetlb_count_add(pages_per_huge_page(h), mm); 3859 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 3860 /* Optimization, do the COW without a second fault */ 3861 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); 3862 } 3863 3864 spin_unlock(ptl); 3865 unlock_page(page); 3866 out: 3867 return ret; 3868 3869 backout: 3870 spin_unlock(ptl); 3871 backout_unlocked: 3872 unlock_page(page); 3873 restore_reserve_on_error(h, vma, haddr, page); 3874 put_page(page); 3875 goto out; 3876 } 3877 3878 #ifdef CONFIG_SMP 3879 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, 3880 struct vm_area_struct *vma, 3881 struct address_space *mapping, 3882 pgoff_t idx, unsigned long address) 3883 { 3884 unsigned long key[2]; 3885 u32 hash; 3886 3887 if (vma->vm_flags & VM_SHARED) { 3888 key[0] = (unsigned long) mapping; 3889 key[1] = idx; 3890 } else { 3891 key[0] = (unsigned long) mm; 3892 key[1] = address >> huge_page_shift(h); 3893 } 3894 3895 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); 3896 3897 return hash & (num_fault_mutexes - 1); 3898 } 3899 #else 3900 /* 3901 * For uniprocesor systems we always use a single mutex, so just 3902 * return 0 and avoid the hashing overhead. 3903 */ 3904 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, 3905 struct vm_area_struct *vma, 3906 struct address_space *mapping, 3907 pgoff_t idx, unsigned long address) 3908 { 3909 return 0; 3910 } 3911 #endif 3912 3913 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 3914 unsigned long address, unsigned int flags) 3915 { 3916 pte_t *ptep, entry; 3917 spinlock_t *ptl; 3918 vm_fault_t ret; 3919 u32 hash; 3920 pgoff_t idx; 3921 struct page *page = NULL; 3922 struct page *pagecache_page = NULL; 3923 struct hstate *h = hstate_vma(vma); 3924 struct address_space *mapping; 3925 int need_wait_lock = 0; 3926 unsigned long haddr = address & huge_page_mask(h); 3927 3928 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 3929 if (ptep) { 3930 entry = huge_ptep_get(ptep); 3931 if (unlikely(is_hugetlb_entry_migration(entry))) { 3932 migration_entry_wait_huge(vma, mm, ptep); 3933 return 0; 3934 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 3935 return VM_FAULT_HWPOISON_LARGE | 3936 VM_FAULT_SET_HINDEX(hstate_index(h)); 3937 } else { 3938 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); 3939 if (!ptep) 3940 return VM_FAULT_OOM; 3941 } 3942 3943 mapping = vma->vm_file->f_mapping; 3944 idx = vma_hugecache_offset(h, vma, haddr); 3945 3946 /* 3947 * Serialize hugepage allocation and instantiation, so that we don't 3948 * get spurious allocation failures if two CPUs race to instantiate 3949 * the same page in the page cache. 3950 */ 3951 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); 3952 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3953 3954 entry = huge_ptep_get(ptep); 3955 if (huge_pte_none(entry)) { 3956 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); 3957 goto out_mutex; 3958 } 3959 3960 ret = 0; 3961 3962 /* 3963 * entry could be a migration/hwpoison entry at this point, so this 3964 * check prevents the kernel from going below assuming that we have 3965 * a active hugepage in pagecache. This goto expects the 2nd page fault, 3966 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly 3967 * handle it. 3968 */ 3969 if (!pte_present(entry)) 3970 goto out_mutex; 3971 3972 /* 3973 * If we are going to COW the mapping later, we examine the pending 3974 * reservations for this page now. This will ensure that any 3975 * allocations necessary to record that reservation occur outside the 3976 * spinlock. For private mappings, we also lookup the pagecache 3977 * page now as it is used to determine if a reservation has been 3978 * consumed. 3979 */ 3980 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 3981 if (vma_needs_reservation(h, vma, haddr) < 0) { 3982 ret = VM_FAULT_OOM; 3983 goto out_mutex; 3984 } 3985 /* Just decrements count, does not deallocate */ 3986 vma_end_reservation(h, vma, haddr); 3987 3988 if (!(vma->vm_flags & VM_MAYSHARE)) 3989 pagecache_page = hugetlbfs_pagecache_page(h, 3990 vma, haddr); 3991 } 3992 3993 ptl = huge_pte_lock(h, mm, ptep); 3994 3995 /* Check for a racing update before calling hugetlb_cow */ 3996 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 3997 goto out_ptl; 3998 3999 /* 4000 * hugetlb_cow() requires page locks of pte_page(entry) and 4001 * pagecache_page, so here we need take the former one 4002 * when page != pagecache_page or !pagecache_page. 4003 */ 4004 page = pte_page(entry); 4005 if (page != pagecache_page) 4006 if (!trylock_page(page)) { 4007 need_wait_lock = 1; 4008 goto out_ptl; 4009 } 4010 4011 get_page(page); 4012 4013 if (flags & FAULT_FLAG_WRITE) { 4014 if (!huge_pte_write(entry)) { 4015 ret = hugetlb_cow(mm, vma, address, ptep, 4016 pagecache_page, ptl); 4017 goto out_put_page; 4018 } 4019 entry = huge_pte_mkdirty(entry); 4020 } 4021 entry = pte_mkyoung(entry); 4022 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 4023 flags & FAULT_FLAG_WRITE)) 4024 update_mmu_cache(vma, haddr, ptep); 4025 out_put_page: 4026 if (page != pagecache_page) 4027 unlock_page(page); 4028 put_page(page); 4029 out_ptl: 4030 spin_unlock(ptl); 4031 4032 if (pagecache_page) { 4033 unlock_page(pagecache_page); 4034 put_page(pagecache_page); 4035 } 4036 out_mutex: 4037 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 4038 /* 4039 * Generally it's safe to hold refcount during waiting page lock. But 4040 * here we just wait to defer the next page fault to avoid busy loop and 4041 * the page is not used after unlocked before returning from the current 4042 * page fault. So we are safe from accessing freed page, even if we wait 4043 * here without taking refcount. 4044 */ 4045 if (need_wait_lock) 4046 wait_on_page_locked(page); 4047 return ret; 4048 } 4049 4050 /* 4051 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with 4052 * modifications for huge pages. 4053 */ 4054 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 4055 pte_t *dst_pte, 4056 struct vm_area_struct *dst_vma, 4057 unsigned long dst_addr, 4058 unsigned long src_addr, 4059 struct page **pagep) 4060 { 4061 struct address_space *mapping; 4062 pgoff_t idx; 4063 unsigned long size; 4064 int vm_shared = dst_vma->vm_flags & VM_SHARED; 4065 struct hstate *h = hstate_vma(dst_vma); 4066 pte_t _dst_pte; 4067 spinlock_t *ptl; 4068 int ret; 4069 struct page *page; 4070 4071 if (!*pagep) { 4072 ret = -ENOMEM; 4073 page = alloc_huge_page(dst_vma, dst_addr, 0); 4074 if (IS_ERR(page)) 4075 goto out; 4076 4077 ret = copy_huge_page_from_user(page, 4078 (const void __user *) src_addr, 4079 pages_per_huge_page(h), false); 4080 4081 /* fallback to copy_from_user outside mmap_sem */ 4082 if (unlikely(ret)) { 4083 ret = -EFAULT; 4084 *pagep = page; 4085 /* don't free the page */ 4086 goto out; 4087 } 4088 } else { 4089 page = *pagep; 4090 *pagep = NULL; 4091 } 4092 4093 /* 4094 * The memory barrier inside __SetPageUptodate makes sure that 4095 * preceding stores to the page contents become visible before 4096 * the set_pte_at() write. 4097 */ 4098 __SetPageUptodate(page); 4099 set_page_huge_active(page); 4100 4101 mapping = dst_vma->vm_file->f_mapping; 4102 idx = vma_hugecache_offset(h, dst_vma, dst_addr); 4103 4104 /* 4105 * If shared, add to page cache 4106 */ 4107 if (vm_shared) { 4108 size = i_size_read(mapping->host) >> huge_page_shift(h); 4109 ret = -EFAULT; 4110 if (idx >= size) 4111 goto out_release_nounlock; 4112 4113 /* 4114 * Serialization between remove_inode_hugepages() and 4115 * huge_add_to_page_cache() below happens through the 4116 * hugetlb_fault_mutex_table that here must be hold by 4117 * the caller. 4118 */ 4119 ret = huge_add_to_page_cache(page, mapping, idx); 4120 if (ret) 4121 goto out_release_nounlock; 4122 } 4123 4124 ptl = huge_pte_lockptr(h, dst_mm, dst_pte); 4125 spin_lock(ptl); 4126 4127 /* 4128 * Recheck the i_size after holding PT lock to make sure not 4129 * to leave any page mapped (as page_mapped()) beyond the end 4130 * of the i_size (remove_inode_hugepages() is strict about 4131 * enforcing that). If we bail out here, we'll also leave a 4132 * page in the radix tree in the vm_shared case beyond the end 4133 * of the i_size, but remove_inode_hugepages() will take care 4134 * of it as soon as we drop the hugetlb_fault_mutex_table. 4135 */ 4136 size = i_size_read(mapping->host) >> huge_page_shift(h); 4137 ret = -EFAULT; 4138 if (idx >= size) 4139 goto out_release_unlock; 4140 4141 ret = -EEXIST; 4142 if (!huge_pte_none(huge_ptep_get(dst_pte))) 4143 goto out_release_unlock; 4144 4145 if (vm_shared) { 4146 page_dup_rmap(page, true); 4147 } else { 4148 ClearPagePrivate(page); 4149 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); 4150 } 4151 4152 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE); 4153 if (dst_vma->vm_flags & VM_WRITE) 4154 _dst_pte = huge_pte_mkdirty(_dst_pte); 4155 _dst_pte = pte_mkyoung(_dst_pte); 4156 4157 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 4158 4159 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte, 4160 dst_vma->vm_flags & VM_WRITE); 4161 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 4162 4163 /* No need to invalidate - it was non-present before */ 4164 update_mmu_cache(dst_vma, dst_addr, dst_pte); 4165 4166 spin_unlock(ptl); 4167 if (vm_shared) 4168 unlock_page(page); 4169 ret = 0; 4170 out: 4171 return ret; 4172 out_release_unlock: 4173 spin_unlock(ptl); 4174 if (vm_shared) 4175 unlock_page(page); 4176 out_release_nounlock: 4177 put_page(page); 4178 goto out; 4179 } 4180 4181 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 4182 struct page **pages, struct vm_area_struct **vmas, 4183 unsigned long *position, unsigned long *nr_pages, 4184 long i, unsigned int flags, int *nonblocking) 4185 { 4186 unsigned long pfn_offset; 4187 unsigned long vaddr = *position; 4188 unsigned long remainder = *nr_pages; 4189 struct hstate *h = hstate_vma(vma); 4190 int err = -EFAULT; 4191 4192 while (vaddr < vma->vm_end && remainder) { 4193 pte_t *pte; 4194 spinlock_t *ptl = NULL; 4195 int absent; 4196 struct page *page; 4197 4198 /* 4199 * If we have a pending SIGKILL, don't keep faulting pages and 4200 * potentially allocating memory. 4201 */ 4202 if (unlikely(fatal_signal_pending(current))) { 4203 remainder = 0; 4204 break; 4205 } 4206 4207 /* 4208 * Some archs (sparc64, sh*) have multiple pte_ts to 4209 * each hugepage. We have to make sure we get the 4210 * first, for the page indexing below to work. 4211 * 4212 * Note that page table lock is not held when pte is null. 4213 */ 4214 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), 4215 huge_page_size(h)); 4216 if (pte) 4217 ptl = huge_pte_lock(h, mm, pte); 4218 absent = !pte || huge_pte_none(huge_ptep_get(pte)); 4219 4220 /* 4221 * When coredumping, it suits get_dump_page if we just return 4222 * an error where there's an empty slot with no huge pagecache 4223 * to back it. This way, we avoid allocating a hugepage, and 4224 * the sparse dumpfile avoids allocating disk blocks, but its 4225 * huge holes still show up with zeroes where they need to be. 4226 */ 4227 if (absent && (flags & FOLL_DUMP) && 4228 !hugetlbfs_pagecache_present(h, vma, vaddr)) { 4229 if (pte) 4230 spin_unlock(ptl); 4231 remainder = 0; 4232 break; 4233 } 4234 4235 /* 4236 * We need call hugetlb_fault for both hugepages under migration 4237 * (in which case hugetlb_fault waits for the migration,) and 4238 * hwpoisoned hugepages (in which case we need to prevent the 4239 * caller from accessing to them.) In order to do this, we use 4240 * here is_swap_pte instead of is_hugetlb_entry_migration and 4241 * is_hugetlb_entry_hwpoisoned. This is because it simply covers 4242 * both cases, and because we can't follow correct pages 4243 * directly from any kind of swap entries. 4244 */ 4245 if (absent || is_swap_pte(huge_ptep_get(pte)) || 4246 ((flags & FOLL_WRITE) && 4247 !huge_pte_write(huge_ptep_get(pte)))) { 4248 vm_fault_t ret; 4249 unsigned int fault_flags = 0; 4250 4251 if (pte) 4252 spin_unlock(ptl); 4253 if (flags & FOLL_WRITE) 4254 fault_flags |= FAULT_FLAG_WRITE; 4255 if (nonblocking) 4256 fault_flags |= FAULT_FLAG_ALLOW_RETRY; 4257 if (flags & FOLL_NOWAIT) 4258 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 4259 FAULT_FLAG_RETRY_NOWAIT; 4260 if (flags & FOLL_TRIED) { 4261 VM_WARN_ON_ONCE(fault_flags & 4262 FAULT_FLAG_ALLOW_RETRY); 4263 fault_flags |= FAULT_FLAG_TRIED; 4264 } 4265 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); 4266 if (ret & VM_FAULT_ERROR) { 4267 err = vm_fault_to_errno(ret, flags); 4268 remainder = 0; 4269 break; 4270 } 4271 if (ret & VM_FAULT_RETRY) { 4272 if (nonblocking) 4273 *nonblocking = 0; 4274 *nr_pages = 0; 4275 /* 4276 * VM_FAULT_RETRY must not return an 4277 * error, it will return zero 4278 * instead. 4279 * 4280 * No need to update "position" as the 4281 * caller will not check it after 4282 * *nr_pages is set to 0. 4283 */ 4284 return i; 4285 } 4286 continue; 4287 } 4288 4289 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 4290 page = pte_page(huge_ptep_get(pte)); 4291 same_page: 4292 if (pages) { 4293 pages[i] = mem_map_offset(page, pfn_offset); 4294 get_page(pages[i]); 4295 } 4296 4297 if (vmas) 4298 vmas[i] = vma; 4299 4300 vaddr += PAGE_SIZE; 4301 ++pfn_offset; 4302 --remainder; 4303 ++i; 4304 if (vaddr < vma->vm_end && remainder && 4305 pfn_offset < pages_per_huge_page(h)) { 4306 /* 4307 * We use pfn_offset to avoid touching the pageframes 4308 * of this compound page. 4309 */ 4310 goto same_page; 4311 } 4312 spin_unlock(ptl); 4313 } 4314 *nr_pages = remainder; 4315 /* 4316 * setting position is actually required only if remainder is 4317 * not zero but it's faster not to add a "if (remainder)" 4318 * branch. 4319 */ 4320 *position = vaddr; 4321 4322 return i ? i : err; 4323 } 4324 4325 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 4326 /* 4327 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can 4328 * implement this. 4329 */ 4330 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 4331 #endif 4332 4333 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 4334 unsigned long address, unsigned long end, pgprot_t newprot) 4335 { 4336 struct mm_struct *mm = vma->vm_mm; 4337 unsigned long start = address; 4338 pte_t *ptep; 4339 pte_t pte; 4340 struct hstate *h = hstate_vma(vma); 4341 unsigned long pages = 0; 4342 unsigned long f_start = start; 4343 unsigned long f_end = end; 4344 bool shared_pmd = false; 4345 4346 /* 4347 * In the case of shared PMDs, the area to flush could be beyond 4348 * start/end. Set f_start/f_end to cover the maximum possible 4349 * range if PMD sharing is possible. 4350 */ 4351 adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end); 4352 4353 BUG_ON(address >= end); 4354 flush_cache_range(vma, f_start, f_end); 4355 4356 mmu_notifier_invalidate_range_start(mm, f_start, f_end); 4357 i_mmap_lock_write(vma->vm_file->f_mapping); 4358 for (; address < end; address += huge_page_size(h)) { 4359 spinlock_t *ptl; 4360 ptep = huge_pte_offset(mm, address, huge_page_size(h)); 4361 if (!ptep) 4362 continue; 4363 ptl = huge_pte_lock(h, mm, ptep); 4364 if (huge_pmd_unshare(mm, &address, ptep)) { 4365 pages++; 4366 spin_unlock(ptl); 4367 shared_pmd = true; 4368 continue; 4369 } 4370 pte = huge_ptep_get(ptep); 4371 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 4372 spin_unlock(ptl); 4373 continue; 4374 } 4375 if (unlikely(is_hugetlb_entry_migration(pte))) { 4376 swp_entry_t entry = pte_to_swp_entry(pte); 4377 4378 if (is_write_migration_entry(entry)) { 4379 pte_t newpte; 4380 4381 make_migration_entry_read(&entry); 4382 newpte = swp_entry_to_pte(entry); 4383 set_huge_swap_pte_at(mm, address, ptep, 4384 newpte, huge_page_size(h)); 4385 pages++; 4386 } 4387 spin_unlock(ptl); 4388 continue; 4389 } 4390 if (!huge_pte_none(pte)) { 4391 pte = huge_ptep_get_and_clear(mm, address, ptep); 4392 pte = pte_mkhuge(huge_pte_modify(pte, newprot)); 4393 pte = arch_make_huge_pte(pte, vma, NULL, 0); 4394 set_huge_pte_at(mm, address, ptep, pte); 4395 pages++; 4396 } 4397 spin_unlock(ptl); 4398 } 4399 /* 4400 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 4401 * may have cleared our pud entry and done put_page on the page table: 4402 * once we release i_mmap_rwsem, another task can do the final put_page 4403 * and that page table be reused and filled with junk. If we actually 4404 * did unshare a page of pmds, flush the range corresponding to the pud. 4405 */ 4406 if (shared_pmd) 4407 flush_hugetlb_tlb_range(vma, f_start, f_end); 4408 else 4409 flush_hugetlb_tlb_range(vma, start, end); 4410 /* 4411 * No need to call mmu_notifier_invalidate_range() we are downgrading 4412 * page table protection not changing it to point to a new page. 4413 * 4414 * See Documentation/vm/mmu_notifier.rst 4415 */ 4416 i_mmap_unlock_write(vma->vm_file->f_mapping); 4417 mmu_notifier_invalidate_range_end(mm, f_start, f_end); 4418 4419 return pages << h->order; 4420 } 4421 4422 int hugetlb_reserve_pages(struct inode *inode, 4423 long from, long to, 4424 struct vm_area_struct *vma, 4425 vm_flags_t vm_flags) 4426 { 4427 long ret, chg; 4428 struct hstate *h = hstate_inode(inode); 4429 struct hugepage_subpool *spool = subpool_inode(inode); 4430 struct resv_map *resv_map; 4431 long gbl_reserve; 4432 4433 /* This should never happen */ 4434 if (from > to) { 4435 VM_WARN(1, "%s called with a negative range\n", __func__); 4436 return -EINVAL; 4437 } 4438 4439 /* 4440 * Only apply hugepage reservation if asked. At fault time, an 4441 * attempt will be made for VM_NORESERVE to allocate a page 4442 * without using reserves 4443 */ 4444 if (vm_flags & VM_NORESERVE) 4445 return 0; 4446 4447 /* 4448 * Shared mappings base their reservation on the number of pages that 4449 * are already allocated on behalf of the file. Private mappings need 4450 * to reserve the full area even if read-only as mprotect() may be 4451 * called to make the mapping read-write. Assume !vma is a shm mapping 4452 */ 4453 if (!vma || vma->vm_flags & VM_MAYSHARE) { 4454 resv_map = inode_resv_map(inode); 4455 4456 chg = region_chg(resv_map, from, to); 4457 4458 } else { 4459 resv_map = resv_map_alloc(); 4460 if (!resv_map) 4461 return -ENOMEM; 4462 4463 chg = to - from; 4464 4465 set_vma_resv_map(vma, resv_map); 4466 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 4467 } 4468 4469 if (chg < 0) { 4470 ret = chg; 4471 goto out_err; 4472 } 4473 4474 /* 4475 * There must be enough pages in the subpool for the mapping. If 4476 * the subpool has a minimum size, there may be some global 4477 * reservations already in place (gbl_reserve). 4478 */ 4479 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 4480 if (gbl_reserve < 0) { 4481 ret = -ENOSPC; 4482 goto out_err; 4483 } 4484 4485 /* 4486 * Check enough hugepages are available for the reservation. 4487 * Hand the pages back to the subpool if there are not 4488 */ 4489 ret = hugetlb_acct_memory(h, gbl_reserve); 4490 if (ret < 0) { 4491 /* put back original number of pages, chg */ 4492 (void)hugepage_subpool_put_pages(spool, chg); 4493 goto out_err; 4494 } 4495 4496 /* 4497 * Account for the reservations made. Shared mappings record regions 4498 * that have reservations as they are shared by multiple VMAs. 4499 * When the last VMA disappears, the region map says how much 4500 * the reservation was and the page cache tells how much of 4501 * the reservation was consumed. Private mappings are per-VMA and 4502 * only the consumed reservations are tracked. When the VMA 4503 * disappears, the original reservation is the VMA size and the 4504 * consumed reservations are stored in the map. Hence, nothing 4505 * else has to be done for private mappings here 4506 */ 4507 if (!vma || vma->vm_flags & VM_MAYSHARE) { 4508 long add = region_add(resv_map, from, to); 4509 4510 if (unlikely(chg > add)) { 4511 /* 4512 * pages in this range were added to the reserve 4513 * map between region_chg and region_add. This 4514 * indicates a race with alloc_huge_page. Adjust 4515 * the subpool and reserve counts modified above 4516 * based on the difference. 4517 */ 4518 long rsv_adjust; 4519 4520 rsv_adjust = hugepage_subpool_put_pages(spool, 4521 chg - add); 4522 hugetlb_acct_memory(h, -rsv_adjust); 4523 } 4524 } 4525 return 0; 4526 out_err: 4527 if (!vma || vma->vm_flags & VM_MAYSHARE) 4528 /* Don't call region_abort if region_chg failed */ 4529 if (chg >= 0) 4530 region_abort(resv_map, from, to); 4531 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4532 kref_put(&resv_map->refs, resv_map_release); 4533 return ret; 4534 } 4535 4536 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 4537 long freed) 4538 { 4539 struct hstate *h = hstate_inode(inode); 4540 struct resv_map *resv_map = inode_resv_map(inode); 4541 long chg = 0; 4542 struct hugepage_subpool *spool = subpool_inode(inode); 4543 long gbl_reserve; 4544 4545 if (resv_map) { 4546 chg = region_del(resv_map, start, end); 4547 /* 4548 * region_del() can fail in the rare case where a region 4549 * must be split and another region descriptor can not be 4550 * allocated. If end == LONG_MAX, it will not fail. 4551 */ 4552 if (chg < 0) 4553 return chg; 4554 } 4555 4556 spin_lock(&inode->i_lock); 4557 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 4558 spin_unlock(&inode->i_lock); 4559 4560 /* 4561 * If the subpool has a minimum size, the number of global 4562 * reservations to be released may be adjusted. 4563 */ 4564 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 4565 hugetlb_acct_memory(h, -gbl_reserve); 4566 4567 return 0; 4568 } 4569 4570 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 4571 static unsigned long page_table_shareable(struct vm_area_struct *svma, 4572 struct vm_area_struct *vma, 4573 unsigned long addr, pgoff_t idx) 4574 { 4575 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 4576 svma->vm_start; 4577 unsigned long sbase = saddr & PUD_MASK; 4578 unsigned long s_end = sbase + PUD_SIZE; 4579 4580 /* Allow segments to share if only one is marked locked */ 4581 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; 4582 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; 4583 4584 /* 4585 * match the virtual addresses, permission and the alignment of the 4586 * page table page. 4587 */ 4588 if (pmd_index(addr) != pmd_index(saddr) || 4589 vm_flags != svm_flags || 4590 sbase < svma->vm_start || svma->vm_end < s_end) 4591 return 0; 4592 4593 return saddr; 4594 } 4595 4596 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) 4597 { 4598 unsigned long base = addr & PUD_MASK; 4599 unsigned long end = base + PUD_SIZE; 4600 4601 /* 4602 * check on proper vm_flags and page table alignment 4603 */ 4604 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) 4605 return true; 4606 return false; 4607 } 4608 4609 /* 4610 * Determine if start,end range within vma could be mapped by shared pmd. 4611 * If yes, adjust start and end to cover range associated with possible 4612 * shared pmd mappings. 4613 */ 4614 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 4615 unsigned long *start, unsigned long *end) 4616 { 4617 unsigned long check_addr = *start; 4618 4619 if (!(vma->vm_flags & VM_MAYSHARE)) 4620 return; 4621 4622 for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) { 4623 unsigned long a_start = check_addr & PUD_MASK; 4624 unsigned long a_end = a_start + PUD_SIZE; 4625 4626 /* 4627 * If sharing is possible, adjust start/end if necessary. 4628 */ 4629 if (range_in_vma(vma, a_start, a_end)) { 4630 if (a_start < *start) 4631 *start = a_start; 4632 if (a_end > *end) 4633 *end = a_end; 4634 } 4635 } 4636 } 4637 4638 /* 4639 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 4640 * and returns the corresponding pte. While this is not necessary for the 4641 * !shared pmd case because we can allocate the pmd later as well, it makes the 4642 * code much cleaner. pmd allocation is essential for the shared case because 4643 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 4644 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 4645 * bad pmd for sharing. 4646 */ 4647 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 4648 { 4649 struct vm_area_struct *vma = find_vma(mm, addr); 4650 struct address_space *mapping = vma->vm_file->f_mapping; 4651 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 4652 vma->vm_pgoff; 4653 struct vm_area_struct *svma; 4654 unsigned long saddr; 4655 pte_t *spte = NULL; 4656 pte_t *pte; 4657 spinlock_t *ptl; 4658 4659 if (!vma_shareable(vma, addr)) 4660 return (pte_t *)pmd_alloc(mm, pud, addr); 4661 4662 i_mmap_lock_write(mapping); 4663 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 4664 if (svma == vma) 4665 continue; 4666 4667 saddr = page_table_shareable(svma, vma, addr, idx); 4668 if (saddr) { 4669 spte = huge_pte_offset(svma->vm_mm, saddr, 4670 vma_mmu_pagesize(svma)); 4671 if (spte) { 4672 get_page(virt_to_page(spte)); 4673 break; 4674 } 4675 } 4676 } 4677 4678 if (!spte) 4679 goto out; 4680 4681 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); 4682 if (pud_none(*pud)) { 4683 pud_populate(mm, pud, 4684 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 4685 mm_inc_nr_pmds(mm); 4686 } else { 4687 put_page(virt_to_page(spte)); 4688 } 4689 spin_unlock(ptl); 4690 out: 4691 pte = (pte_t *)pmd_alloc(mm, pud, addr); 4692 i_mmap_unlock_write(mapping); 4693 return pte; 4694 } 4695 4696 /* 4697 * unmap huge page backed by shared pte. 4698 * 4699 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 4700 * indicated by page_count > 1, unmap is achieved by clearing pud and 4701 * decrementing the ref count. If count == 1, the pte page is not shared. 4702 * 4703 * called with page table lock held. 4704 * 4705 * returns: 1 successfully unmapped a shared pte page 4706 * 0 the underlying pte page is not shared, or it is the last user 4707 */ 4708 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 4709 { 4710 pgd_t *pgd = pgd_offset(mm, *addr); 4711 p4d_t *p4d = p4d_offset(pgd, *addr); 4712 pud_t *pud = pud_offset(p4d, *addr); 4713 4714 BUG_ON(page_count(virt_to_page(ptep)) == 0); 4715 if (page_count(virt_to_page(ptep)) == 1) 4716 return 0; 4717 4718 pud_clear(pud); 4719 put_page(virt_to_page(ptep)); 4720 mm_dec_nr_pmds(mm); 4721 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 4722 return 1; 4723 } 4724 #define want_pmd_share() (1) 4725 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 4726 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 4727 { 4728 return NULL; 4729 } 4730 4731 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 4732 { 4733 return 0; 4734 } 4735 4736 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 4737 unsigned long *start, unsigned long *end) 4738 { 4739 } 4740 #define want_pmd_share() (0) 4741 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 4742 4743 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 4744 pte_t *huge_pte_alloc(struct mm_struct *mm, 4745 unsigned long addr, unsigned long sz) 4746 { 4747 pgd_t *pgd; 4748 p4d_t *p4d; 4749 pud_t *pud; 4750 pte_t *pte = NULL; 4751 4752 pgd = pgd_offset(mm, addr); 4753 p4d = p4d_alloc(mm, pgd, addr); 4754 if (!p4d) 4755 return NULL; 4756 pud = pud_alloc(mm, p4d, addr); 4757 if (pud) { 4758 if (sz == PUD_SIZE) { 4759 pte = (pte_t *)pud; 4760 } else { 4761 BUG_ON(sz != PMD_SIZE); 4762 if (want_pmd_share() && pud_none(*pud)) 4763 pte = huge_pmd_share(mm, addr, pud); 4764 else 4765 pte = (pte_t *)pmd_alloc(mm, pud, addr); 4766 } 4767 } 4768 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); 4769 4770 return pte; 4771 } 4772 4773 /* 4774 * huge_pte_offset() - Walk the page table to resolve the hugepage 4775 * entry at address @addr 4776 * 4777 * Return: Pointer to page table or swap entry (PUD or PMD) for 4778 * address @addr, or NULL if a p*d_none() entry is encountered and the 4779 * size @sz doesn't match the hugepage size at this level of the page 4780 * table. 4781 */ 4782 pte_t *huge_pte_offset(struct mm_struct *mm, 4783 unsigned long addr, unsigned long sz) 4784 { 4785 pgd_t *pgd; 4786 p4d_t *p4d; 4787 pud_t *pud; 4788 pmd_t *pmd; 4789 4790 pgd = pgd_offset(mm, addr); 4791 if (!pgd_present(*pgd)) 4792 return NULL; 4793 p4d = p4d_offset(pgd, addr); 4794 if (!p4d_present(*p4d)) 4795 return NULL; 4796 4797 pud = pud_offset(p4d, addr); 4798 if (sz != PUD_SIZE && pud_none(*pud)) 4799 return NULL; 4800 /* hugepage or swap? */ 4801 if (pud_huge(*pud) || !pud_present(*pud)) 4802 return (pte_t *)pud; 4803 4804 pmd = pmd_offset(pud, addr); 4805 if (sz != PMD_SIZE && pmd_none(*pmd)) 4806 return NULL; 4807 /* hugepage or swap? */ 4808 if (pmd_huge(*pmd) || !pmd_present(*pmd)) 4809 return (pte_t *)pmd; 4810 4811 return NULL; 4812 } 4813 4814 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 4815 4816 /* 4817 * These functions are overwritable if your architecture needs its own 4818 * behavior. 4819 */ 4820 struct page * __weak 4821 follow_huge_addr(struct mm_struct *mm, unsigned long address, 4822 int write) 4823 { 4824 return ERR_PTR(-EINVAL); 4825 } 4826 4827 struct page * __weak 4828 follow_huge_pd(struct vm_area_struct *vma, 4829 unsigned long address, hugepd_t hpd, int flags, int pdshift) 4830 { 4831 WARN(1, "hugepd follow called with no support for hugepage directory format\n"); 4832 return NULL; 4833 } 4834 4835 struct page * __weak 4836 follow_huge_pmd(struct mm_struct *mm, unsigned long address, 4837 pmd_t *pmd, int flags) 4838 { 4839 struct page *page = NULL; 4840 spinlock_t *ptl; 4841 pte_t pte; 4842 retry: 4843 ptl = pmd_lockptr(mm, pmd); 4844 spin_lock(ptl); 4845 /* 4846 * make sure that the address range covered by this pmd is not 4847 * unmapped from other threads. 4848 */ 4849 if (!pmd_huge(*pmd)) 4850 goto out; 4851 pte = huge_ptep_get((pte_t *)pmd); 4852 if (pte_present(pte)) { 4853 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 4854 if (flags & FOLL_GET) 4855 get_page(page); 4856 } else { 4857 if (is_hugetlb_entry_migration(pte)) { 4858 spin_unlock(ptl); 4859 __migration_entry_wait(mm, (pte_t *)pmd, ptl); 4860 goto retry; 4861 } 4862 /* 4863 * hwpoisoned entry is treated as no_page_table in 4864 * follow_page_mask(). 4865 */ 4866 } 4867 out: 4868 spin_unlock(ptl); 4869 return page; 4870 } 4871 4872 struct page * __weak 4873 follow_huge_pud(struct mm_struct *mm, unsigned long address, 4874 pud_t *pud, int flags) 4875 { 4876 if (flags & FOLL_GET) 4877 return NULL; 4878 4879 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); 4880 } 4881 4882 struct page * __weak 4883 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags) 4884 { 4885 if (flags & FOLL_GET) 4886 return NULL; 4887 4888 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); 4889 } 4890 4891 bool isolate_huge_page(struct page *page, struct list_head *list) 4892 { 4893 bool ret = true; 4894 4895 VM_BUG_ON_PAGE(!PageHead(page), page); 4896 spin_lock(&hugetlb_lock); 4897 if (!page_huge_active(page) || !get_page_unless_zero(page)) { 4898 ret = false; 4899 goto unlock; 4900 } 4901 clear_page_huge_active(page); 4902 list_move_tail(&page->lru, list); 4903 unlock: 4904 spin_unlock(&hugetlb_lock); 4905 return ret; 4906 } 4907 4908 void putback_active_hugepage(struct page *page) 4909 { 4910 VM_BUG_ON_PAGE(!PageHead(page), page); 4911 spin_lock(&hugetlb_lock); 4912 set_page_huge_active(page); 4913 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 4914 spin_unlock(&hugetlb_lock); 4915 put_page(page); 4916 } 4917 4918 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) 4919 { 4920 struct hstate *h = page_hstate(oldpage); 4921 4922 hugetlb_cgroup_migrate(oldpage, newpage); 4923 set_page_owner_migrate_reason(newpage, reason); 4924 4925 /* 4926 * transfer temporary state of the new huge page. This is 4927 * reverse to other transitions because the newpage is going to 4928 * be final while the old one will be freed so it takes over 4929 * the temporary status. 4930 * 4931 * Also note that we have to transfer the per-node surplus state 4932 * here as well otherwise the global surplus count will not match 4933 * the per-node's. 4934 */ 4935 if (PageHugeTemporary(newpage)) { 4936 int old_nid = page_to_nid(oldpage); 4937 int new_nid = page_to_nid(newpage); 4938 4939 SetPageHugeTemporary(oldpage); 4940 ClearPageHugeTemporary(newpage); 4941 4942 spin_lock(&hugetlb_lock); 4943 if (h->surplus_huge_pages_node[old_nid]) { 4944 h->surplus_huge_pages_node[old_nid]--; 4945 h->surplus_huge_pages_node[new_nid]++; 4946 } 4947 spin_unlock(&hugetlb_lock); 4948 } 4949 } 4950