1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/mmdebug.h> 23 #include <linux/sched/signal.h> 24 #include <linux/rmap.h> 25 #include <linux/string_helpers.h> 26 #include <linux/swap.h> 27 #include <linux/swapops.h> 28 #include <linux/jhash.h> 29 #include <linux/numa.h> 30 31 #include <asm/page.h> 32 #include <asm/pgtable.h> 33 #include <asm/tlb.h> 34 35 #include <linux/io.h> 36 #include <linux/hugetlb.h> 37 #include <linux/hugetlb_cgroup.h> 38 #include <linux/node.h> 39 #include <linux/userfaultfd_k.h> 40 #include <linux/page_owner.h> 41 #include "internal.h" 42 43 int hugetlb_max_hstate __read_mostly; 44 unsigned int default_hstate_idx; 45 struct hstate hstates[HUGE_MAX_HSTATE]; 46 /* 47 * Minimum page order among possible hugepage sizes, set to a proper value 48 * at boot time. 49 */ 50 static unsigned int minimum_order __read_mostly = UINT_MAX; 51 52 __initdata LIST_HEAD(huge_boot_pages); 53 54 /* for command line parsing */ 55 static struct hstate * __initdata parsed_hstate; 56 static unsigned long __initdata default_hstate_max_huge_pages; 57 static unsigned long __initdata default_hstate_size; 58 static bool __initdata parsed_valid_hugepagesz = true; 59 60 /* 61 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 62 * free_huge_pages, and surplus_huge_pages. 63 */ 64 DEFINE_SPINLOCK(hugetlb_lock); 65 66 /* 67 * Serializes faults on the same logical page. This is used to 68 * prevent spurious OOMs when the hugepage pool is fully utilized. 69 */ 70 static int num_fault_mutexes; 71 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; 72 73 /* Forward declaration */ 74 static int hugetlb_acct_memory(struct hstate *h, long delta); 75 76 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) 77 { 78 bool free = (spool->count == 0) && (spool->used_hpages == 0); 79 80 spin_unlock(&spool->lock); 81 82 /* If no pages are used, and no other handles to the subpool 83 * remain, give up any reservations mased on minimum size and 84 * free the subpool */ 85 if (free) { 86 if (spool->min_hpages != -1) 87 hugetlb_acct_memory(spool->hstate, 88 -spool->min_hpages); 89 kfree(spool); 90 } 91 } 92 93 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 94 long min_hpages) 95 { 96 struct hugepage_subpool *spool; 97 98 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 99 if (!spool) 100 return NULL; 101 102 spin_lock_init(&spool->lock); 103 spool->count = 1; 104 spool->max_hpages = max_hpages; 105 spool->hstate = h; 106 spool->min_hpages = min_hpages; 107 108 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 109 kfree(spool); 110 return NULL; 111 } 112 spool->rsv_hpages = min_hpages; 113 114 return spool; 115 } 116 117 void hugepage_put_subpool(struct hugepage_subpool *spool) 118 { 119 spin_lock(&spool->lock); 120 BUG_ON(!spool->count); 121 spool->count--; 122 unlock_or_release_subpool(spool); 123 } 124 125 /* 126 * Subpool accounting for allocating and reserving pages. 127 * Return -ENOMEM if there are not enough resources to satisfy the 128 * the request. Otherwise, return the number of pages by which the 129 * global pools must be adjusted (upward). The returned value may 130 * only be different than the passed value (delta) in the case where 131 * a subpool minimum size must be manitained. 132 */ 133 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 134 long delta) 135 { 136 long ret = delta; 137 138 if (!spool) 139 return ret; 140 141 spin_lock(&spool->lock); 142 143 if (spool->max_hpages != -1) { /* maximum size accounting */ 144 if ((spool->used_hpages + delta) <= spool->max_hpages) 145 spool->used_hpages += delta; 146 else { 147 ret = -ENOMEM; 148 goto unlock_ret; 149 } 150 } 151 152 /* minimum size accounting */ 153 if (spool->min_hpages != -1 && spool->rsv_hpages) { 154 if (delta > spool->rsv_hpages) { 155 /* 156 * Asking for more reserves than those already taken on 157 * behalf of subpool. Return difference. 158 */ 159 ret = delta - spool->rsv_hpages; 160 spool->rsv_hpages = 0; 161 } else { 162 ret = 0; /* reserves already accounted for */ 163 spool->rsv_hpages -= delta; 164 } 165 } 166 167 unlock_ret: 168 spin_unlock(&spool->lock); 169 return ret; 170 } 171 172 /* 173 * Subpool accounting for freeing and unreserving pages. 174 * Return the number of global page reservations that must be dropped. 175 * The return value may only be different than the passed value (delta) 176 * in the case where a subpool minimum size must be maintained. 177 */ 178 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 179 long delta) 180 { 181 long ret = delta; 182 183 if (!spool) 184 return delta; 185 186 spin_lock(&spool->lock); 187 188 if (spool->max_hpages != -1) /* maximum size accounting */ 189 spool->used_hpages -= delta; 190 191 /* minimum size accounting */ 192 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 193 if (spool->rsv_hpages + delta <= spool->min_hpages) 194 ret = 0; 195 else 196 ret = spool->rsv_hpages + delta - spool->min_hpages; 197 198 spool->rsv_hpages += delta; 199 if (spool->rsv_hpages > spool->min_hpages) 200 spool->rsv_hpages = spool->min_hpages; 201 } 202 203 /* 204 * If hugetlbfs_put_super couldn't free spool due to an outstanding 205 * quota reference, free it now. 206 */ 207 unlock_or_release_subpool(spool); 208 209 return ret; 210 } 211 212 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 213 { 214 return HUGETLBFS_SB(inode->i_sb)->spool; 215 } 216 217 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 218 { 219 return subpool_inode(file_inode(vma->vm_file)); 220 } 221 222 /* 223 * Region tracking -- allows tracking of reservations and instantiated pages 224 * across the pages in a mapping. 225 * 226 * The region data structures are embedded into a resv_map and protected 227 * by a resv_map's lock. The set of regions within the resv_map represent 228 * reservations for huge pages, or huge pages that have already been 229 * instantiated within the map. The from and to elements are huge page 230 * indicies into the associated mapping. from indicates the starting index 231 * of the region. to represents the first index past the end of the region. 232 * 233 * For example, a file region structure with from == 0 and to == 4 represents 234 * four huge pages in a mapping. It is important to note that the to element 235 * represents the first element past the end of the region. This is used in 236 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 237 * 238 * Interval notation of the form [from, to) will be used to indicate that 239 * the endpoint from is inclusive and to is exclusive. 240 */ 241 struct file_region { 242 struct list_head link; 243 long from; 244 long to; 245 }; 246 247 /* 248 * Add the huge page range represented by [f, t) to the reserve 249 * map. In the normal case, existing regions will be expanded 250 * to accommodate the specified range. Sufficient regions should 251 * exist for expansion due to the previous call to region_chg 252 * with the same range. However, it is possible that region_del 253 * could have been called after region_chg and modifed the map 254 * in such a way that no region exists to be expanded. In this 255 * case, pull a region descriptor from the cache associated with 256 * the map and use that for the new range. 257 * 258 * Return the number of new huge pages added to the map. This 259 * number is greater than or equal to zero. 260 */ 261 static long region_add(struct resv_map *resv, long f, long t) 262 { 263 struct list_head *head = &resv->regions; 264 struct file_region *rg, *nrg, *trg; 265 long add = 0; 266 267 spin_lock(&resv->lock); 268 /* Locate the region we are either in or before. */ 269 list_for_each_entry(rg, head, link) 270 if (f <= rg->to) 271 break; 272 273 /* 274 * If no region exists which can be expanded to include the 275 * specified range, the list must have been modified by an 276 * interleving call to region_del(). Pull a region descriptor 277 * from the cache and use it for this range. 278 */ 279 if (&rg->link == head || t < rg->from) { 280 VM_BUG_ON(resv->region_cache_count <= 0); 281 282 resv->region_cache_count--; 283 nrg = list_first_entry(&resv->region_cache, struct file_region, 284 link); 285 list_del(&nrg->link); 286 287 nrg->from = f; 288 nrg->to = t; 289 list_add(&nrg->link, rg->link.prev); 290 291 add += t - f; 292 goto out_locked; 293 } 294 295 /* Round our left edge to the current segment if it encloses us. */ 296 if (f > rg->from) 297 f = rg->from; 298 299 /* Check for and consume any regions we now overlap with. */ 300 nrg = rg; 301 list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 302 if (&rg->link == head) 303 break; 304 if (rg->from > t) 305 break; 306 307 /* If this area reaches higher then extend our area to 308 * include it completely. If this is not the first area 309 * which we intend to reuse, free it. */ 310 if (rg->to > t) 311 t = rg->to; 312 if (rg != nrg) { 313 /* Decrement return value by the deleted range. 314 * Another range will span this area so that by 315 * end of routine add will be >= zero 316 */ 317 add -= (rg->to - rg->from); 318 list_del(&rg->link); 319 kfree(rg); 320 } 321 } 322 323 add += (nrg->from - f); /* Added to beginning of region */ 324 nrg->from = f; 325 add += t - nrg->to; /* Added to end of region */ 326 nrg->to = t; 327 328 out_locked: 329 resv->adds_in_progress--; 330 spin_unlock(&resv->lock); 331 VM_BUG_ON(add < 0); 332 return add; 333 } 334 335 /* 336 * Examine the existing reserve map and determine how many 337 * huge pages in the specified range [f, t) are NOT currently 338 * represented. This routine is called before a subsequent 339 * call to region_add that will actually modify the reserve 340 * map to add the specified range [f, t). region_chg does 341 * not change the number of huge pages represented by the 342 * map. However, if the existing regions in the map can not 343 * be expanded to represent the new range, a new file_region 344 * structure is added to the map as a placeholder. This is 345 * so that the subsequent region_add call will have all the 346 * regions it needs and will not fail. 347 * 348 * Upon entry, region_chg will also examine the cache of region descriptors 349 * associated with the map. If there are not enough descriptors cached, one 350 * will be allocated for the in progress add operation. 351 * 352 * Returns the number of huge pages that need to be added to the existing 353 * reservation map for the range [f, t). This number is greater or equal to 354 * zero. -ENOMEM is returned if a new file_region structure or cache entry 355 * is needed and can not be allocated. 356 */ 357 static long region_chg(struct resv_map *resv, long f, long t) 358 { 359 struct list_head *head = &resv->regions; 360 struct file_region *rg, *nrg = NULL; 361 long chg = 0; 362 363 retry: 364 spin_lock(&resv->lock); 365 retry_locked: 366 resv->adds_in_progress++; 367 368 /* 369 * Check for sufficient descriptors in the cache to accommodate 370 * the number of in progress add operations. 371 */ 372 if (resv->adds_in_progress > resv->region_cache_count) { 373 struct file_region *trg; 374 375 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1); 376 /* Must drop lock to allocate a new descriptor. */ 377 resv->adds_in_progress--; 378 spin_unlock(&resv->lock); 379 380 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 381 if (!trg) { 382 kfree(nrg); 383 return -ENOMEM; 384 } 385 386 spin_lock(&resv->lock); 387 list_add(&trg->link, &resv->region_cache); 388 resv->region_cache_count++; 389 goto retry_locked; 390 } 391 392 /* Locate the region we are before or in. */ 393 list_for_each_entry(rg, head, link) 394 if (f <= rg->to) 395 break; 396 397 /* If we are below the current region then a new region is required. 398 * Subtle, allocate a new region at the position but make it zero 399 * size such that we can guarantee to record the reservation. */ 400 if (&rg->link == head || t < rg->from) { 401 if (!nrg) { 402 resv->adds_in_progress--; 403 spin_unlock(&resv->lock); 404 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 405 if (!nrg) 406 return -ENOMEM; 407 408 nrg->from = f; 409 nrg->to = f; 410 INIT_LIST_HEAD(&nrg->link); 411 goto retry; 412 } 413 414 list_add(&nrg->link, rg->link.prev); 415 chg = t - f; 416 goto out_nrg; 417 } 418 419 /* Round our left edge to the current segment if it encloses us. */ 420 if (f > rg->from) 421 f = rg->from; 422 chg = t - f; 423 424 /* Check for and consume any regions we now overlap with. */ 425 list_for_each_entry(rg, rg->link.prev, link) { 426 if (&rg->link == head) 427 break; 428 if (rg->from > t) 429 goto out; 430 431 /* We overlap with this area, if it extends further than 432 * us then we must extend ourselves. Account for its 433 * existing reservation. */ 434 if (rg->to > t) { 435 chg += rg->to - t; 436 t = rg->to; 437 } 438 chg -= rg->to - rg->from; 439 } 440 441 out: 442 spin_unlock(&resv->lock); 443 /* We already know we raced and no longer need the new region */ 444 kfree(nrg); 445 return chg; 446 out_nrg: 447 spin_unlock(&resv->lock); 448 return chg; 449 } 450 451 /* 452 * Abort the in progress add operation. The adds_in_progress field 453 * of the resv_map keeps track of the operations in progress between 454 * calls to region_chg and region_add. Operations are sometimes 455 * aborted after the call to region_chg. In such cases, region_abort 456 * is called to decrement the adds_in_progress counter. 457 * 458 * NOTE: The range arguments [f, t) are not needed or used in this 459 * routine. They are kept to make reading the calling code easier as 460 * arguments will match the associated region_chg call. 461 */ 462 static void region_abort(struct resv_map *resv, long f, long t) 463 { 464 spin_lock(&resv->lock); 465 VM_BUG_ON(!resv->region_cache_count); 466 resv->adds_in_progress--; 467 spin_unlock(&resv->lock); 468 } 469 470 /* 471 * Delete the specified range [f, t) from the reserve map. If the 472 * t parameter is LONG_MAX, this indicates that ALL regions after f 473 * should be deleted. Locate the regions which intersect [f, t) 474 * and either trim, delete or split the existing regions. 475 * 476 * Returns the number of huge pages deleted from the reserve map. 477 * In the normal case, the return value is zero or more. In the 478 * case where a region must be split, a new region descriptor must 479 * be allocated. If the allocation fails, -ENOMEM will be returned. 480 * NOTE: If the parameter t == LONG_MAX, then we will never split 481 * a region and possibly return -ENOMEM. Callers specifying 482 * t == LONG_MAX do not need to check for -ENOMEM error. 483 */ 484 static long region_del(struct resv_map *resv, long f, long t) 485 { 486 struct list_head *head = &resv->regions; 487 struct file_region *rg, *trg; 488 struct file_region *nrg = NULL; 489 long del = 0; 490 491 retry: 492 spin_lock(&resv->lock); 493 list_for_each_entry_safe(rg, trg, head, link) { 494 /* 495 * Skip regions before the range to be deleted. file_region 496 * ranges are normally of the form [from, to). However, there 497 * may be a "placeholder" entry in the map which is of the form 498 * (from, to) with from == to. Check for placeholder entries 499 * at the beginning of the range to be deleted. 500 */ 501 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 502 continue; 503 504 if (rg->from >= t) 505 break; 506 507 if (f > rg->from && t < rg->to) { /* Must split region */ 508 /* 509 * Check for an entry in the cache before dropping 510 * lock and attempting allocation. 511 */ 512 if (!nrg && 513 resv->region_cache_count > resv->adds_in_progress) { 514 nrg = list_first_entry(&resv->region_cache, 515 struct file_region, 516 link); 517 list_del(&nrg->link); 518 resv->region_cache_count--; 519 } 520 521 if (!nrg) { 522 spin_unlock(&resv->lock); 523 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 524 if (!nrg) 525 return -ENOMEM; 526 goto retry; 527 } 528 529 del += t - f; 530 531 /* New entry for end of split region */ 532 nrg->from = t; 533 nrg->to = rg->to; 534 INIT_LIST_HEAD(&nrg->link); 535 536 /* Original entry is trimmed */ 537 rg->to = f; 538 539 list_add(&nrg->link, &rg->link); 540 nrg = NULL; 541 break; 542 } 543 544 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 545 del += rg->to - rg->from; 546 list_del(&rg->link); 547 kfree(rg); 548 continue; 549 } 550 551 if (f <= rg->from) { /* Trim beginning of region */ 552 del += t - rg->from; 553 rg->from = t; 554 } else { /* Trim end of region */ 555 del += rg->to - f; 556 rg->to = f; 557 } 558 } 559 560 spin_unlock(&resv->lock); 561 kfree(nrg); 562 return del; 563 } 564 565 /* 566 * A rare out of memory error was encountered which prevented removal of 567 * the reserve map region for a page. The huge page itself was free'ed 568 * and removed from the page cache. This routine will adjust the subpool 569 * usage count, and the global reserve count if needed. By incrementing 570 * these counts, the reserve map entry which could not be deleted will 571 * appear as a "reserved" entry instead of simply dangling with incorrect 572 * counts. 573 */ 574 void hugetlb_fix_reserve_counts(struct inode *inode) 575 { 576 struct hugepage_subpool *spool = subpool_inode(inode); 577 long rsv_adjust; 578 579 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 580 if (rsv_adjust) { 581 struct hstate *h = hstate_inode(inode); 582 583 hugetlb_acct_memory(h, 1); 584 } 585 } 586 587 /* 588 * Count and return the number of huge pages in the reserve map 589 * that intersect with the range [f, t). 590 */ 591 static long region_count(struct resv_map *resv, long f, long t) 592 { 593 struct list_head *head = &resv->regions; 594 struct file_region *rg; 595 long chg = 0; 596 597 spin_lock(&resv->lock); 598 /* Locate each segment we overlap with, and count that overlap. */ 599 list_for_each_entry(rg, head, link) { 600 long seg_from; 601 long seg_to; 602 603 if (rg->to <= f) 604 continue; 605 if (rg->from >= t) 606 break; 607 608 seg_from = max(rg->from, f); 609 seg_to = min(rg->to, t); 610 611 chg += seg_to - seg_from; 612 } 613 spin_unlock(&resv->lock); 614 615 return chg; 616 } 617 618 /* 619 * Convert the address within this vma to the page offset within 620 * the mapping, in pagecache page units; huge pages here. 621 */ 622 static pgoff_t vma_hugecache_offset(struct hstate *h, 623 struct vm_area_struct *vma, unsigned long address) 624 { 625 return ((address - vma->vm_start) >> huge_page_shift(h)) + 626 (vma->vm_pgoff >> huge_page_order(h)); 627 } 628 629 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 630 unsigned long address) 631 { 632 return vma_hugecache_offset(hstate_vma(vma), vma, address); 633 } 634 EXPORT_SYMBOL_GPL(linear_hugepage_index); 635 636 /* 637 * Return the size of the pages allocated when backing a VMA. In the majority 638 * cases this will be same size as used by the page table entries. 639 */ 640 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 641 { 642 if (vma->vm_ops && vma->vm_ops->pagesize) 643 return vma->vm_ops->pagesize(vma); 644 return PAGE_SIZE; 645 } 646 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 647 648 /* 649 * Return the page size being used by the MMU to back a VMA. In the majority 650 * of cases, the page size used by the kernel matches the MMU size. On 651 * architectures where it differs, an architecture-specific 'strong' 652 * version of this symbol is required. 653 */ 654 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 655 { 656 return vma_kernel_pagesize(vma); 657 } 658 659 /* 660 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 661 * bits of the reservation map pointer, which are always clear due to 662 * alignment. 663 */ 664 #define HPAGE_RESV_OWNER (1UL << 0) 665 #define HPAGE_RESV_UNMAPPED (1UL << 1) 666 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 667 668 /* 669 * These helpers are used to track how many pages are reserved for 670 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 671 * is guaranteed to have their future faults succeed. 672 * 673 * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 674 * the reserve counters are updated with the hugetlb_lock held. It is safe 675 * to reset the VMA at fork() time as it is not in use yet and there is no 676 * chance of the global counters getting corrupted as a result of the values. 677 * 678 * The private mapping reservation is represented in a subtly different 679 * manner to a shared mapping. A shared mapping has a region map associated 680 * with the underlying file, this region map represents the backing file 681 * pages which have ever had a reservation assigned which this persists even 682 * after the page is instantiated. A private mapping has a region map 683 * associated with the original mmap which is attached to all VMAs which 684 * reference it, this region map represents those offsets which have consumed 685 * reservation ie. where pages have been instantiated. 686 */ 687 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 688 { 689 return (unsigned long)vma->vm_private_data; 690 } 691 692 static void set_vma_private_data(struct vm_area_struct *vma, 693 unsigned long value) 694 { 695 vma->vm_private_data = (void *)value; 696 } 697 698 struct resv_map *resv_map_alloc(void) 699 { 700 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 701 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 702 703 if (!resv_map || !rg) { 704 kfree(resv_map); 705 kfree(rg); 706 return NULL; 707 } 708 709 kref_init(&resv_map->refs); 710 spin_lock_init(&resv_map->lock); 711 INIT_LIST_HEAD(&resv_map->regions); 712 713 resv_map->adds_in_progress = 0; 714 715 INIT_LIST_HEAD(&resv_map->region_cache); 716 list_add(&rg->link, &resv_map->region_cache); 717 resv_map->region_cache_count = 1; 718 719 return resv_map; 720 } 721 722 void resv_map_release(struct kref *ref) 723 { 724 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 725 struct list_head *head = &resv_map->region_cache; 726 struct file_region *rg, *trg; 727 728 /* Clear out any active regions before we release the map. */ 729 region_del(resv_map, 0, LONG_MAX); 730 731 /* ... and any entries left in the cache */ 732 list_for_each_entry_safe(rg, trg, head, link) { 733 list_del(&rg->link); 734 kfree(rg); 735 } 736 737 VM_BUG_ON(resv_map->adds_in_progress); 738 739 kfree(resv_map); 740 } 741 742 static inline struct resv_map *inode_resv_map(struct inode *inode) 743 { 744 /* 745 * At inode evict time, i_mapping may not point to the original 746 * address space within the inode. This original address space 747 * contains the pointer to the resv_map. So, always use the 748 * address space embedded within the inode. 749 * The VERY common case is inode->mapping == &inode->i_data but, 750 * this may not be true for device special inodes. 751 */ 752 return (struct resv_map *)(&inode->i_data)->private_data; 753 } 754 755 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 756 { 757 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 758 if (vma->vm_flags & VM_MAYSHARE) { 759 struct address_space *mapping = vma->vm_file->f_mapping; 760 struct inode *inode = mapping->host; 761 762 return inode_resv_map(inode); 763 764 } else { 765 return (struct resv_map *)(get_vma_private_data(vma) & 766 ~HPAGE_RESV_MASK); 767 } 768 } 769 770 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 771 { 772 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 773 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 774 775 set_vma_private_data(vma, (get_vma_private_data(vma) & 776 HPAGE_RESV_MASK) | (unsigned long)map); 777 } 778 779 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 780 { 781 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 782 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 783 784 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 785 } 786 787 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 788 { 789 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 790 791 return (get_vma_private_data(vma) & flag) != 0; 792 } 793 794 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 795 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 796 { 797 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 798 if (!(vma->vm_flags & VM_MAYSHARE)) 799 vma->vm_private_data = (void *)0; 800 } 801 802 /* Returns true if the VMA has associated reserve pages */ 803 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 804 { 805 if (vma->vm_flags & VM_NORESERVE) { 806 /* 807 * This address is already reserved by other process(chg == 0), 808 * so, we should decrement reserved count. Without decrementing, 809 * reserve count remains after releasing inode, because this 810 * allocated page will go into page cache and is regarded as 811 * coming from reserved pool in releasing step. Currently, we 812 * don't have any other solution to deal with this situation 813 * properly, so add work-around here. 814 */ 815 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 816 return true; 817 else 818 return false; 819 } 820 821 /* Shared mappings always use reserves */ 822 if (vma->vm_flags & VM_MAYSHARE) { 823 /* 824 * We know VM_NORESERVE is not set. Therefore, there SHOULD 825 * be a region map for all pages. The only situation where 826 * there is no region map is if a hole was punched via 827 * fallocate. In this case, there really are no reverves to 828 * use. This situation is indicated if chg != 0. 829 */ 830 if (chg) 831 return false; 832 else 833 return true; 834 } 835 836 /* 837 * Only the process that called mmap() has reserves for 838 * private mappings. 839 */ 840 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 841 /* 842 * Like the shared case above, a hole punch or truncate 843 * could have been performed on the private mapping. 844 * Examine the value of chg to determine if reserves 845 * actually exist or were previously consumed. 846 * Very Subtle - The value of chg comes from a previous 847 * call to vma_needs_reserves(). The reserve map for 848 * private mappings has different (opposite) semantics 849 * than that of shared mappings. vma_needs_reserves() 850 * has already taken this difference in semantics into 851 * account. Therefore, the meaning of chg is the same 852 * as in the shared case above. Code could easily be 853 * combined, but keeping it separate draws attention to 854 * subtle differences. 855 */ 856 if (chg) 857 return false; 858 else 859 return true; 860 } 861 862 return false; 863 } 864 865 static void enqueue_huge_page(struct hstate *h, struct page *page) 866 { 867 int nid = page_to_nid(page); 868 list_move(&page->lru, &h->hugepage_freelists[nid]); 869 h->free_huge_pages++; 870 h->free_huge_pages_node[nid]++; 871 } 872 873 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) 874 { 875 struct page *page; 876 877 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) 878 if (!PageHWPoison(page)) 879 break; 880 /* 881 * if 'non-isolated free hugepage' not found on the list, 882 * the allocation fails. 883 */ 884 if (&h->hugepage_freelists[nid] == &page->lru) 885 return NULL; 886 list_move(&page->lru, &h->hugepage_activelist); 887 set_page_refcounted(page); 888 h->free_huge_pages--; 889 h->free_huge_pages_node[nid]--; 890 return page; 891 } 892 893 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, 894 nodemask_t *nmask) 895 { 896 unsigned int cpuset_mems_cookie; 897 struct zonelist *zonelist; 898 struct zone *zone; 899 struct zoneref *z; 900 int node = NUMA_NO_NODE; 901 902 zonelist = node_zonelist(nid, gfp_mask); 903 904 retry_cpuset: 905 cpuset_mems_cookie = read_mems_allowed_begin(); 906 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 907 struct page *page; 908 909 if (!cpuset_zone_allowed(zone, gfp_mask)) 910 continue; 911 /* 912 * no need to ask again on the same node. Pool is node rather than 913 * zone aware 914 */ 915 if (zone_to_nid(zone) == node) 916 continue; 917 node = zone_to_nid(zone); 918 919 page = dequeue_huge_page_node_exact(h, node); 920 if (page) 921 return page; 922 } 923 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 924 goto retry_cpuset; 925 926 return NULL; 927 } 928 929 /* Movability of hugepages depends on migration support. */ 930 static inline gfp_t htlb_alloc_mask(struct hstate *h) 931 { 932 if (hugepage_movable_supported(h)) 933 return GFP_HIGHUSER_MOVABLE; 934 else 935 return GFP_HIGHUSER; 936 } 937 938 static struct page *dequeue_huge_page_vma(struct hstate *h, 939 struct vm_area_struct *vma, 940 unsigned long address, int avoid_reserve, 941 long chg) 942 { 943 struct page *page; 944 struct mempolicy *mpol; 945 gfp_t gfp_mask; 946 nodemask_t *nodemask; 947 int nid; 948 949 /* 950 * A child process with MAP_PRIVATE mappings created by their parent 951 * have no page reserves. This check ensures that reservations are 952 * not "stolen". The child may still get SIGKILLed 953 */ 954 if (!vma_has_reserves(vma, chg) && 955 h->free_huge_pages - h->resv_huge_pages == 0) 956 goto err; 957 958 /* If reserves cannot be used, ensure enough pages are in the pool */ 959 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 960 goto err; 961 962 gfp_mask = htlb_alloc_mask(h); 963 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 964 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 965 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { 966 SetPagePrivate(page); 967 h->resv_huge_pages--; 968 } 969 970 mpol_cond_put(mpol); 971 return page; 972 973 err: 974 return NULL; 975 } 976 977 /* 978 * common helper functions for hstate_next_node_to_{alloc|free}. 979 * We may have allocated or freed a huge page based on a different 980 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 981 * be outside of *nodes_allowed. Ensure that we use an allowed 982 * node for alloc or free. 983 */ 984 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 985 { 986 nid = next_node_in(nid, *nodes_allowed); 987 VM_BUG_ON(nid >= MAX_NUMNODES); 988 989 return nid; 990 } 991 992 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 993 { 994 if (!node_isset(nid, *nodes_allowed)) 995 nid = next_node_allowed(nid, nodes_allowed); 996 return nid; 997 } 998 999 /* 1000 * returns the previously saved node ["this node"] from which to 1001 * allocate a persistent huge page for the pool and advance the 1002 * next node from which to allocate, handling wrap at end of node 1003 * mask. 1004 */ 1005 static int hstate_next_node_to_alloc(struct hstate *h, 1006 nodemask_t *nodes_allowed) 1007 { 1008 int nid; 1009 1010 VM_BUG_ON(!nodes_allowed); 1011 1012 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 1013 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 1014 1015 return nid; 1016 } 1017 1018 /* 1019 * helper for free_pool_huge_page() - return the previously saved 1020 * node ["this node"] from which to free a huge page. Advance the 1021 * next node id whether or not we find a free huge page to free so 1022 * that the next attempt to free addresses the next node. 1023 */ 1024 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1025 { 1026 int nid; 1027 1028 VM_BUG_ON(!nodes_allowed); 1029 1030 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1031 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1032 1033 return nid; 1034 } 1035 1036 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 1037 for (nr_nodes = nodes_weight(*mask); \ 1038 nr_nodes > 0 && \ 1039 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1040 nr_nodes--) 1041 1042 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1043 for (nr_nodes = nodes_weight(*mask); \ 1044 nr_nodes > 0 && \ 1045 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1046 nr_nodes--) 1047 1048 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1049 static void destroy_compound_gigantic_page(struct page *page, 1050 unsigned int order) 1051 { 1052 int i; 1053 int nr_pages = 1 << order; 1054 struct page *p = page + 1; 1055 1056 atomic_set(compound_mapcount_ptr(page), 0); 1057 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1058 clear_compound_head(p); 1059 set_page_refcounted(p); 1060 } 1061 1062 set_compound_order(page, 0); 1063 __ClearPageHead(page); 1064 } 1065 1066 static void free_gigantic_page(struct page *page, unsigned int order) 1067 { 1068 free_contig_range(page_to_pfn(page), 1 << order); 1069 } 1070 1071 #ifdef CONFIG_CONTIG_ALLOC 1072 static int __alloc_gigantic_page(unsigned long start_pfn, 1073 unsigned long nr_pages, gfp_t gfp_mask) 1074 { 1075 unsigned long end_pfn = start_pfn + nr_pages; 1076 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 1077 gfp_mask); 1078 } 1079 1080 static bool pfn_range_valid_gigantic(struct zone *z, 1081 unsigned long start_pfn, unsigned long nr_pages) 1082 { 1083 unsigned long i, end_pfn = start_pfn + nr_pages; 1084 struct page *page; 1085 1086 for (i = start_pfn; i < end_pfn; i++) { 1087 if (!pfn_valid(i)) 1088 return false; 1089 1090 page = pfn_to_page(i); 1091 1092 if (page_zone(page) != z) 1093 return false; 1094 1095 if (PageReserved(page)) 1096 return false; 1097 1098 if (page_count(page) > 0) 1099 return false; 1100 1101 if (PageHuge(page)) 1102 return false; 1103 } 1104 1105 return true; 1106 } 1107 1108 static bool zone_spans_last_pfn(const struct zone *zone, 1109 unsigned long start_pfn, unsigned long nr_pages) 1110 { 1111 unsigned long last_pfn = start_pfn + nr_pages - 1; 1112 return zone_spans_pfn(zone, last_pfn); 1113 } 1114 1115 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1116 int nid, nodemask_t *nodemask) 1117 { 1118 unsigned int order = huge_page_order(h); 1119 unsigned long nr_pages = 1 << order; 1120 unsigned long ret, pfn, flags; 1121 struct zonelist *zonelist; 1122 struct zone *zone; 1123 struct zoneref *z; 1124 1125 zonelist = node_zonelist(nid, gfp_mask); 1126 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) { 1127 spin_lock_irqsave(&zone->lock, flags); 1128 1129 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 1130 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 1131 if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) { 1132 /* 1133 * We release the zone lock here because 1134 * alloc_contig_range() will also lock the zone 1135 * at some point. If there's an allocation 1136 * spinning on this lock, it may win the race 1137 * and cause alloc_contig_range() to fail... 1138 */ 1139 spin_unlock_irqrestore(&zone->lock, flags); 1140 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask); 1141 if (!ret) 1142 return pfn_to_page(pfn); 1143 spin_lock_irqsave(&zone->lock, flags); 1144 } 1145 pfn += nr_pages; 1146 } 1147 1148 spin_unlock_irqrestore(&zone->lock, flags); 1149 } 1150 1151 return NULL; 1152 } 1153 1154 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); 1155 static void prep_compound_gigantic_page(struct page *page, unsigned int order); 1156 #else /* !CONFIG_CONTIG_ALLOC */ 1157 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1158 int nid, nodemask_t *nodemask) 1159 { 1160 return NULL; 1161 } 1162 #endif /* CONFIG_CONTIG_ALLOC */ 1163 1164 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1165 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1166 int nid, nodemask_t *nodemask) 1167 { 1168 return NULL; 1169 } 1170 static inline void free_gigantic_page(struct page *page, unsigned int order) { } 1171 static inline void destroy_compound_gigantic_page(struct page *page, 1172 unsigned int order) { } 1173 #endif 1174 1175 static void update_and_free_page(struct hstate *h, struct page *page) 1176 { 1177 int i; 1178 1179 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1180 return; 1181 1182 h->nr_huge_pages--; 1183 h->nr_huge_pages_node[page_to_nid(page)]--; 1184 for (i = 0; i < pages_per_huge_page(h); i++) { 1185 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1186 1 << PG_referenced | 1 << PG_dirty | 1187 1 << PG_active | 1 << PG_private | 1188 1 << PG_writeback); 1189 } 1190 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); 1191 set_compound_page_dtor(page, NULL_COMPOUND_DTOR); 1192 set_page_refcounted(page); 1193 if (hstate_is_gigantic(h)) { 1194 destroy_compound_gigantic_page(page, huge_page_order(h)); 1195 free_gigantic_page(page, huge_page_order(h)); 1196 } else { 1197 __free_pages(page, huge_page_order(h)); 1198 } 1199 } 1200 1201 struct hstate *size_to_hstate(unsigned long size) 1202 { 1203 struct hstate *h; 1204 1205 for_each_hstate(h) { 1206 if (huge_page_size(h) == size) 1207 return h; 1208 } 1209 return NULL; 1210 } 1211 1212 /* 1213 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked 1214 * to hstate->hugepage_activelist.) 1215 * 1216 * This function can be called for tail pages, but never returns true for them. 1217 */ 1218 bool page_huge_active(struct page *page) 1219 { 1220 VM_BUG_ON_PAGE(!PageHuge(page), page); 1221 return PageHead(page) && PagePrivate(&page[1]); 1222 } 1223 1224 /* never called for tail page */ 1225 static void set_page_huge_active(struct page *page) 1226 { 1227 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); 1228 SetPagePrivate(&page[1]); 1229 } 1230 1231 static void clear_page_huge_active(struct page *page) 1232 { 1233 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); 1234 ClearPagePrivate(&page[1]); 1235 } 1236 1237 /* 1238 * Internal hugetlb specific page flag. Do not use outside of the hugetlb 1239 * code 1240 */ 1241 static inline bool PageHugeTemporary(struct page *page) 1242 { 1243 if (!PageHuge(page)) 1244 return false; 1245 1246 return (unsigned long)page[2].mapping == -1U; 1247 } 1248 1249 static inline void SetPageHugeTemporary(struct page *page) 1250 { 1251 page[2].mapping = (void *)-1U; 1252 } 1253 1254 static inline void ClearPageHugeTemporary(struct page *page) 1255 { 1256 page[2].mapping = NULL; 1257 } 1258 1259 void free_huge_page(struct page *page) 1260 { 1261 /* 1262 * Can't pass hstate in here because it is called from the 1263 * compound page destructor. 1264 */ 1265 struct hstate *h = page_hstate(page); 1266 int nid = page_to_nid(page); 1267 struct hugepage_subpool *spool = 1268 (struct hugepage_subpool *)page_private(page); 1269 bool restore_reserve; 1270 1271 VM_BUG_ON_PAGE(page_count(page), page); 1272 VM_BUG_ON_PAGE(page_mapcount(page), page); 1273 1274 set_page_private(page, 0); 1275 page->mapping = NULL; 1276 restore_reserve = PagePrivate(page); 1277 ClearPagePrivate(page); 1278 1279 /* 1280 * If PagePrivate() was set on page, page allocation consumed a 1281 * reservation. If the page was associated with a subpool, there 1282 * would have been a page reserved in the subpool before allocation 1283 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1284 * reservtion, do not call hugepage_subpool_put_pages() as this will 1285 * remove the reserved page from the subpool. 1286 */ 1287 if (!restore_reserve) { 1288 /* 1289 * A return code of zero implies that the subpool will be 1290 * under its minimum size if the reservation is not restored 1291 * after page is free. Therefore, force restore_reserve 1292 * operation. 1293 */ 1294 if (hugepage_subpool_put_pages(spool, 1) == 0) 1295 restore_reserve = true; 1296 } 1297 1298 spin_lock(&hugetlb_lock); 1299 clear_page_huge_active(page); 1300 hugetlb_cgroup_uncharge_page(hstate_index(h), 1301 pages_per_huge_page(h), page); 1302 if (restore_reserve) 1303 h->resv_huge_pages++; 1304 1305 if (PageHugeTemporary(page)) { 1306 list_del(&page->lru); 1307 ClearPageHugeTemporary(page); 1308 update_and_free_page(h, page); 1309 } else if (h->surplus_huge_pages_node[nid]) { 1310 /* remove the page from active list */ 1311 list_del(&page->lru); 1312 update_and_free_page(h, page); 1313 h->surplus_huge_pages--; 1314 h->surplus_huge_pages_node[nid]--; 1315 } else { 1316 arch_clear_hugepage_flags(page); 1317 enqueue_huge_page(h, page); 1318 } 1319 spin_unlock(&hugetlb_lock); 1320 } 1321 1322 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 1323 { 1324 INIT_LIST_HEAD(&page->lru); 1325 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); 1326 spin_lock(&hugetlb_lock); 1327 set_hugetlb_cgroup(page, NULL); 1328 h->nr_huge_pages++; 1329 h->nr_huge_pages_node[nid]++; 1330 spin_unlock(&hugetlb_lock); 1331 } 1332 1333 static void prep_compound_gigantic_page(struct page *page, unsigned int order) 1334 { 1335 int i; 1336 int nr_pages = 1 << order; 1337 struct page *p = page + 1; 1338 1339 /* we rely on prep_new_huge_page to set the destructor */ 1340 set_compound_order(page, order); 1341 __ClearPageReserved(page); 1342 __SetPageHead(page); 1343 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1344 /* 1345 * For gigantic hugepages allocated through bootmem at 1346 * boot, it's safer to be consistent with the not-gigantic 1347 * hugepages and clear the PG_reserved bit from all tail pages 1348 * too. Otherwse drivers using get_user_pages() to access tail 1349 * pages may get the reference counting wrong if they see 1350 * PG_reserved set on a tail page (despite the head page not 1351 * having PG_reserved set). Enforcing this consistency between 1352 * head and tail pages allows drivers to optimize away a check 1353 * on the head page when they need know if put_page() is needed 1354 * after get_user_pages(). 1355 */ 1356 __ClearPageReserved(p); 1357 set_page_count(p, 0); 1358 set_compound_head(p, page); 1359 } 1360 atomic_set(compound_mapcount_ptr(page), -1); 1361 } 1362 1363 /* 1364 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 1365 * transparent huge pages. See the PageTransHuge() documentation for more 1366 * details. 1367 */ 1368 int PageHuge(struct page *page) 1369 { 1370 if (!PageCompound(page)) 1371 return 0; 1372 1373 page = compound_head(page); 1374 return page[1].compound_dtor == HUGETLB_PAGE_DTOR; 1375 } 1376 EXPORT_SYMBOL_GPL(PageHuge); 1377 1378 /* 1379 * PageHeadHuge() only returns true for hugetlbfs head page, but not for 1380 * normal or transparent huge pages. 1381 */ 1382 int PageHeadHuge(struct page *page_head) 1383 { 1384 if (!PageHead(page_head)) 1385 return 0; 1386 1387 return get_compound_page_dtor(page_head) == free_huge_page; 1388 } 1389 1390 pgoff_t __basepage_index(struct page *page) 1391 { 1392 struct page *page_head = compound_head(page); 1393 pgoff_t index = page_index(page_head); 1394 unsigned long compound_idx; 1395 1396 if (!PageHuge(page_head)) 1397 return page_index(page); 1398 1399 if (compound_order(page_head) >= MAX_ORDER) 1400 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 1401 else 1402 compound_idx = page - page_head; 1403 1404 return (index << compound_order(page_head)) + compound_idx; 1405 } 1406 1407 static struct page *alloc_buddy_huge_page(struct hstate *h, 1408 gfp_t gfp_mask, int nid, nodemask_t *nmask) 1409 { 1410 int order = huge_page_order(h); 1411 struct page *page; 1412 1413 gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; 1414 if (nid == NUMA_NO_NODE) 1415 nid = numa_mem_id(); 1416 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); 1417 if (page) 1418 __count_vm_event(HTLB_BUDDY_PGALLOC); 1419 else 1420 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1421 1422 return page; 1423 } 1424 1425 /* 1426 * Common helper to allocate a fresh hugetlb page. All specific allocators 1427 * should use this function to get new hugetlb pages 1428 */ 1429 static struct page *alloc_fresh_huge_page(struct hstate *h, 1430 gfp_t gfp_mask, int nid, nodemask_t *nmask) 1431 { 1432 struct page *page; 1433 1434 if (hstate_is_gigantic(h)) 1435 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); 1436 else 1437 page = alloc_buddy_huge_page(h, gfp_mask, 1438 nid, nmask); 1439 if (!page) 1440 return NULL; 1441 1442 if (hstate_is_gigantic(h)) 1443 prep_compound_gigantic_page(page, huge_page_order(h)); 1444 prep_new_huge_page(h, page, page_to_nid(page)); 1445 1446 return page; 1447 } 1448 1449 /* 1450 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved 1451 * manner. 1452 */ 1453 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 1454 { 1455 struct page *page; 1456 int nr_nodes, node; 1457 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 1458 1459 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1460 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed); 1461 if (page) 1462 break; 1463 } 1464 1465 if (!page) 1466 return 0; 1467 1468 put_page(page); /* free it into the hugepage allocator */ 1469 1470 return 1; 1471 } 1472 1473 /* 1474 * Free huge page from pool from next node to free. 1475 * Attempt to keep persistent huge pages more or less 1476 * balanced over allowed nodes. 1477 * Called with hugetlb_lock locked. 1478 */ 1479 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 1480 bool acct_surplus) 1481 { 1482 int nr_nodes, node; 1483 int ret = 0; 1484 1485 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 1486 /* 1487 * If we're returning unused surplus pages, only examine 1488 * nodes with surplus pages. 1489 */ 1490 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 1491 !list_empty(&h->hugepage_freelists[node])) { 1492 struct page *page = 1493 list_entry(h->hugepage_freelists[node].next, 1494 struct page, lru); 1495 list_del(&page->lru); 1496 h->free_huge_pages--; 1497 h->free_huge_pages_node[node]--; 1498 if (acct_surplus) { 1499 h->surplus_huge_pages--; 1500 h->surplus_huge_pages_node[node]--; 1501 } 1502 update_and_free_page(h, page); 1503 ret = 1; 1504 break; 1505 } 1506 } 1507 1508 return ret; 1509 } 1510 1511 /* 1512 * Dissolve a given free hugepage into free buddy pages. This function does 1513 * nothing for in-use hugepages and non-hugepages. 1514 * This function returns values like below: 1515 * 1516 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 1517 * (allocated or reserved.) 1518 * 0: successfully dissolved free hugepages or the page is not a 1519 * hugepage (considered as already dissolved) 1520 */ 1521 int dissolve_free_huge_page(struct page *page) 1522 { 1523 int rc = -EBUSY; 1524 1525 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 1526 if (!PageHuge(page)) 1527 return 0; 1528 1529 spin_lock(&hugetlb_lock); 1530 if (!PageHuge(page)) { 1531 rc = 0; 1532 goto out; 1533 } 1534 1535 if (!page_count(page)) { 1536 struct page *head = compound_head(page); 1537 struct hstate *h = page_hstate(head); 1538 int nid = page_to_nid(head); 1539 if (h->free_huge_pages - h->resv_huge_pages == 0) 1540 goto out; 1541 /* 1542 * Move PageHWPoison flag from head page to the raw error page, 1543 * which makes any subpages rather than the error page reusable. 1544 */ 1545 if (PageHWPoison(head) && page != head) { 1546 SetPageHWPoison(page); 1547 ClearPageHWPoison(head); 1548 } 1549 list_del(&head->lru); 1550 h->free_huge_pages--; 1551 h->free_huge_pages_node[nid]--; 1552 h->max_huge_pages--; 1553 update_and_free_page(h, head); 1554 rc = 0; 1555 } 1556 out: 1557 spin_unlock(&hugetlb_lock); 1558 return rc; 1559 } 1560 1561 /* 1562 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 1563 * make specified memory blocks removable from the system. 1564 * Note that this will dissolve a free gigantic hugepage completely, if any 1565 * part of it lies within the given range. 1566 * Also note that if dissolve_free_huge_page() returns with an error, all 1567 * free hugepages that were dissolved before that error are lost. 1568 */ 1569 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 1570 { 1571 unsigned long pfn; 1572 struct page *page; 1573 int rc = 0; 1574 1575 if (!hugepages_supported()) 1576 return rc; 1577 1578 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { 1579 page = pfn_to_page(pfn); 1580 rc = dissolve_free_huge_page(page); 1581 if (rc) 1582 break; 1583 } 1584 1585 return rc; 1586 } 1587 1588 /* 1589 * Allocates a fresh surplus page from the page allocator. 1590 */ 1591 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, 1592 int nid, nodemask_t *nmask) 1593 { 1594 struct page *page = NULL; 1595 1596 if (hstate_is_gigantic(h)) 1597 return NULL; 1598 1599 spin_lock(&hugetlb_lock); 1600 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 1601 goto out_unlock; 1602 spin_unlock(&hugetlb_lock); 1603 1604 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask); 1605 if (!page) 1606 return NULL; 1607 1608 spin_lock(&hugetlb_lock); 1609 /* 1610 * We could have raced with the pool size change. 1611 * Double check that and simply deallocate the new page 1612 * if we would end up overcommiting the surpluses. Abuse 1613 * temporary page to workaround the nasty free_huge_page 1614 * codeflow 1615 */ 1616 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 1617 SetPageHugeTemporary(page); 1618 spin_unlock(&hugetlb_lock); 1619 put_page(page); 1620 return NULL; 1621 } else { 1622 h->surplus_huge_pages++; 1623 h->surplus_huge_pages_node[page_to_nid(page)]++; 1624 } 1625 1626 out_unlock: 1627 spin_unlock(&hugetlb_lock); 1628 1629 return page; 1630 } 1631 1632 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, 1633 int nid, nodemask_t *nmask) 1634 { 1635 struct page *page; 1636 1637 if (hstate_is_gigantic(h)) 1638 return NULL; 1639 1640 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask); 1641 if (!page) 1642 return NULL; 1643 1644 /* 1645 * We do not account these pages as surplus because they are only 1646 * temporary and will be released properly on the last reference 1647 */ 1648 SetPageHugeTemporary(page); 1649 1650 return page; 1651 } 1652 1653 /* 1654 * Use the VMA's mpolicy to allocate a huge page from the buddy. 1655 */ 1656 static 1657 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, 1658 struct vm_area_struct *vma, unsigned long addr) 1659 { 1660 struct page *page; 1661 struct mempolicy *mpol; 1662 gfp_t gfp_mask = htlb_alloc_mask(h); 1663 int nid; 1664 nodemask_t *nodemask; 1665 1666 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 1667 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); 1668 mpol_cond_put(mpol); 1669 1670 return page; 1671 } 1672 1673 /* page migration callback function */ 1674 struct page *alloc_huge_page_node(struct hstate *h, int nid) 1675 { 1676 gfp_t gfp_mask = htlb_alloc_mask(h); 1677 struct page *page = NULL; 1678 1679 if (nid != NUMA_NO_NODE) 1680 gfp_mask |= __GFP_THISNODE; 1681 1682 spin_lock(&hugetlb_lock); 1683 if (h->free_huge_pages - h->resv_huge_pages > 0) 1684 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL); 1685 spin_unlock(&hugetlb_lock); 1686 1687 if (!page) 1688 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); 1689 1690 return page; 1691 } 1692 1693 /* page migration callback function */ 1694 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 1695 nodemask_t *nmask) 1696 { 1697 gfp_t gfp_mask = htlb_alloc_mask(h); 1698 1699 spin_lock(&hugetlb_lock); 1700 if (h->free_huge_pages - h->resv_huge_pages > 0) { 1701 struct page *page; 1702 1703 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); 1704 if (page) { 1705 spin_unlock(&hugetlb_lock); 1706 return page; 1707 } 1708 } 1709 spin_unlock(&hugetlb_lock); 1710 1711 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); 1712 } 1713 1714 /* mempolicy aware migration callback */ 1715 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 1716 unsigned long address) 1717 { 1718 struct mempolicy *mpol; 1719 nodemask_t *nodemask; 1720 struct page *page; 1721 gfp_t gfp_mask; 1722 int node; 1723 1724 gfp_mask = htlb_alloc_mask(h); 1725 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1726 page = alloc_huge_page_nodemask(h, node, nodemask); 1727 mpol_cond_put(mpol); 1728 1729 return page; 1730 } 1731 1732 /* 1733 * Increase the hugetlb pool such that it can accommodate a reservation 1734 * of size 'delta'. 1735 */ 1736 static int gather_surplus_pages(struct hstate *h, int delta) 1737 { 1738 struct list_head surplus_list; 1739 struct page *page, *tmp; 1740 int ret, i; 1741 int needed, allocated; 1742 bool alloc_ok = true; 1743 1744 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 1745 if (needed <= 0) { 1746 h->resv_huge_pages += delta; 1747 return 0; 1748 } 1749 1750 allocated = 0; 1751 INIT_LIST_HEAD(&surplus_list); 1752 1753 ret = -ENOMEM; 1754 retry: 1755 spin_unlock(&hugetlb_lock); 1756 for (i = 0; i < needed; i++) { 1757 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), 1758 NUMA_NO_NODE, NULL); 1759 if (!page) { 1760 alloc_ok = false; 1761 break; 1762 } 1763 list_add(&page->lru, &surplus_list); 1764 cond_resched(); 1765 } 1766 allocated += i; 1767 1768 /* 1769 * After retaking hugetlb_lock, we need to recalculate 'needed' 1770 * because either resv_huge_pages or free_huge_pages may have changed. 1771 */ 1772 spin_lock(&hugetlb_lock); 1773 needed = (h->resv_huge_pages + delta) - 1774 (h->free_huge_pages + allocated); 1775 if (needed > 0) { 1776 if (alloc_ok) 1777 goto retry; 1778 /* 1779 * We were not able to allocate enough pages to 1780 * satisfy the entire reservation so we free what 1781 * we've allocated so far. 1782 */ 1783 goto free; 1784 } 1785 /* 1786 * The surplus_list now contains _at_least_ the number of extra pages 1787 * needed to accommodate the reservation. Add the appropriate number 1788 * of pages to the hugetlb pool and free the extras back to the buddy 1789 * allocator. Commit the entire reservation here to prevent another 1790 * process from stealing the pages as they are added to the pool but 1791 * before they are reserved. 1792 */ 1793 needed += allocated; 1794 h->resv_huge_pages += delta; 1795 ret = 0; 1796 1797 /* Free the needed pages to the hugetlb pool */ 1798 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 1799 if ((--needed) < 0) 1800 break; 1801 /* 1802 * This page is now managed by the hugetlb allocator and has 1803 * no users -- drop the buddy allocator's reference. 1804 */ 1805 put_page_testzero(page); 1806 VM_BUG_ON_PAGE(page_count(page), page); 1807 enqueue_huge_page(h, page); 1808 } 1809 free: 1810 spin_unlock(&hugetlb_lock); 1811 1812 /* Free unnecessary surplus pages to the buddy allocator */ 1813 list_for_each_entry_safe(page, tmp, &surplus_list, lru) 1814 put_page(page); 1815 spin_lock(&hugetlb_lock); 1816 1817 return ret; 1818 } 1819 1820 /* 1821 * This routine has two main purposes: 1822 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 1823 * in unused_resv_pages. This corresponds to the prior adjustments made 1824 * to the associated reservation map. 1825 * 2) Free any unused surplus pages that may have been allocated to satisfy 1826 * the reservation. As many as unused_resv_pages may be freed. 1827 * 1828 * Called with hugetlb_lock held. However, the lock could be dropped (and 1829 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, 1830 * we must make sure nobody else can claim pages we are in the process of 1831 * freeing. Do this by ensuring resv_huge_page always is greater than the 1832 * number of huge pages we plan to free when dropping the lock. 1833 */ 1834 static void return_unused_surplus_pages(struct hstate *h, 1835 unsigned long unused_resv_pages) 1836 { 1837 unsigned long nr_pages; 1838 1839 /* Cannot return gigantic pages currently */ 1840 if (hstate_is_gigantic(h)) 1841 goto out; 1842 1843 /* 1844 * Part (or even all) of the reservation could have been backed 1845 * by pre-allocated pages. Only free surplus pages. 1846 */ 1847 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 1848 1849 /* 1850 * We want to release as many surplus pages as possible, spread 1851 * evenly across all nodes with memory. Iterate across these nodes 1852 * until we can no longer free unreserved surplus pages. This occurs 1853 * when the nodes with surplus pages have no free pages. 1854 * free_pool_huge_page() will balance the the freed pages across the 1855 * on-line nodes with memory and will handle the hstate accounting. 1856 * 1857 * Note that we decrement resv_huge_pages as we free the pages. If 1858 * we drop the lock, resv_huge_pages will still be sufficiently large 1859 * to cover subsequent pages we may free. 1860 */ 1861 while (nr_pages--) { 1862 h->resv_huge_pages--; 1863 unused_resv_pages--; 1864 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) 1865 goto out; 1866 cond_resched_lock(&hugetlb_lock); 1867 } 1868 1869 out: 1870 /* Fully uncommit the reservation */ 1871 h->resv_huge_pages -= unused_resv_pages; 1872 } 1873 1874 1875 /* 1876 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 1877 * are used by the huge page allocation routines to manage reservations. 1878 * 1879 * vma_needs_reservation is called to determine if the huge page at addr 1880 * within the vma has an associated reservation. If a reservation is 1881 * needed, the value 1 is returned. The caller is then responsible for 1882 * managing the global reservation and subpool usage counts. After 1883 * the huge page has been allocated, vma_commit_reservation is called 1884 * to add the page to the reservation map. If the page allocation fails, 1885 * the reservation must be ended instead of committed. vma_end_reservation 1886 * is called in such cases. 1887 * 1888 * In the normal case, vma_commit_reservation returns the same value 1889 * as the preceding vma_needs_reservation call. The only time this 1890 * is not the case is if a reserve map was changed between calls. It 1891 * is the responsibility of the caller to notice the difference and 1892 * take appropriate action. 1893 * 1894 * vma_add_reservation is used in error paths where a reservation must 1895 * be restored when a newly allocated huge page must be freed. It is 1896 * to be called after calling vma_needs_reservation to determine if a 1897 * reservation exists. 1898 */ 1899 enum vma_resv_mode { 1900 VMA_NEEDS_RESV, 1901 VMA_COMMIT_RESV, 1902 VMA_END_RESV, 1903 VMA_ADD_RESV, 1904 }; 1905 static long __vma_reservation_common(struct hstate *h, 1906 struct vm_area_struct *vma, unsigned long addr, 1907 enum vma_resv_mode mode) 1908 { 1909 struct resv_map *resv; 1910 pgoff_t idx; 1911 long ret; 1912 1913 resv = vma_resv_map(vma); 1914 if (!resv) 1915 return 1; 1916 1917 idx = vma_hugecache_offset(h, vma, addr); 1918 switch (mode) { 1919 case VMA_NEEDS_RESV: 1920 ret = region_chg(resv, idx, idx + 1); 1921 break; 1922 case VMA_COMMIT_RESV: 1923 ret = region_add(resv, idx, idx + 1); 1924 break; 1925 case VMA_END_RESV: 1926 region_abort(resv, idx, idx + 1); 1927 ret = 0; 1928 break; 1929 case VMA_ADD_RESV: 1930 if (vma->vm_flags & VM_MAYSHARE) 1931 ret = region_add(resv, idx, idx + 1); 1932 else { 1933 region_abort(resv, idx, idx + 1); 1934 ret = region_del(resv, idx, idx + 1); 1935 } 1936 break; 1937 default: 1938 BUG(); 1939 } 1940 1941 if (vma->vm_flags & VM_MAYSHARE) 1942 return ret; 1943 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { 1944 /* 1945 * In most cases, reserves always exist for private mappings. 1946 * However, a file associated with mapping could have been 1947 * hole punched or truncated after reserves were consumed. 1948 * As subsequent fault on such a range will not use reserves. 1949 * Subtle - The reserve map for private mappings has the 1950 * opposite meaning than that of shared mappings. If NO 1951 * entry is in the reserve map, it means a reservation exists. 1952 * If an entry exists in the reserve map, it means the 1953 * reservation has already been consumed. As a result, the 1954 * return value of this routine is the opposite of the 1955 * value returned from reserve map manipulation routines above. 1956 */ 1957 if (ret) 1958 return 0; 1959 else 1960 return 1; 1961 } 1962 else 1963 return ret < 0 ? ret : 0; 1964 } 1965 1966 static long vma_needs_reservation(struct hstate *h, 1967 struct vm_area_struct *vma, unsigned long addr) 1968 { 1969 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 1970 } 1971 1972 static long vma_commit_reservation(struct hstate *h, 1973 struct vm_area_struct *vma, unsigned long addr) 1974 { 1975 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 1976 } 1977 1978 static void vma_end_reservation(struct hstate *h, 1979 struct vm_area_struct *vma, unsigned long addr) 1980 { 1981 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 1982 } 1983 1984 static long vma_add_reservation(struct hstate *h, 1985 struct vm_area_struct *vma, unsigned long addr) 1986 { 1987 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 1988 } 1989 1990 /* 1991 * This routine is called to restore a reservation on error paths. In the 1992 * specific error paths, a huge page was allocated (via alloc_huge_page) 1993 * and is about to be freed. If a reservation for the page existed, 1994 * alloc_huge_page would have consumed the reservation and set PagePrivate 1995 * in the newly allocated page. When the page is freed via free_huge_page, 1996 * the global reservation count will be incremented if PagePrivate is set. 1997 * However, free_huge_page can not adjust the reserve map. Adjust the 1998 * reserve map here to be consistent with global reserve count adjustments 1999 * to be made by free_huge_page. 2000 */ 2001 static void restore_reserve_on_error(struct hstate *h, 2002 struct vm_area_struct *vma, unsigned long address, 2003 struct page *page) 2004 { 2005 if (unlikely(PagePrivate(page))) { 2006 long rc = vma_needs_reservation(h, vma, address); 2007 2008 if (unlikely(rc < 0)) { 2009 /* 2010 * Rare out of memory condition in reserve map 2011 * manipulation. Clear PagePrivate so that 2012 * global reserve count will not be incremented 2013 * by free_huge_page. This will make it appear 2014 * as though the reservation for this page was 2015 * consumed. This may prevent the task from 2016 * faulting in the page at a later time. This 2017 * is better than inconsistent global huge page 2018 * accounting of reserve counts. 2019 */ 2020 ClearPagePrivate(page); 2021 } else if (rc) { 2022 rc = vma_add_reservation(h, vma, address); 2023 if (unlikely(rc < 0)) 2024 /* 2025 * See above comment about rare out of 2026 * memory condition. 2027 */ 2028 ClearPagePrivate(page); 2029 } else 2030 vma_end_reservation(h, vma, address); 2031 } 2032 } 2033 2034 struct page *alloc_huge_page(struct vm_area_struct *vma, 2035 unsigned long addr, int avoid_reserve) 2036 { 2037 struct hugepage_subpool *spool = subpool_vma(vma); 2038 struct hstate *h = hstate_vma(vma); 2039 struct page *page; 2040 long map_chg, map_commit; 2041 long gbl_chg; 2042 int ret, idx; 2043 struct hugetlb_cgroup *h_cg; 2044 2045 idx = hstate_index(h); 2046 /* 2047 * Examine the region/reserve map to determine if the process 2048 * has a reservation for the page to be allocated. A return 2049 * code of zero indicates a reservation exists (no change). 2050 */ 2051 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 2052 if (map_chg < 0) 2053 return ERR_PTR(-ENOMEM); 2054 2055 /* 2056 * Processes that did not create the mapping will have no 2057 * reserves as indicated by the region/reserve map. Check 2058 * that the allocation will not exceed the subpool limit. 2059 * Allocations for MAP_NORESERVE mappings also need to be 2060 * checked against any subpool limit. 2061 */ 2062 if (map_chg || avoid_reserve) { 2063 gbl_chg = hugepage_subpool_get_pages(spool, 1); 2064 if (gbl_chg < 0) { 2065 vma_end_reservation(h, vma, addr); 2066 return ERR_PTR(-ENOSPC); 2067 } 2068 2069 /* 2070 * Even though there was no reservation in the region/reserve 2071 * map, there could be reservations associated with the 2072 * subpool that can be used. This would be indicated if the 2073 * return value of hugepage_subpool_get_pages() is zero. 2074 * However, if avoid_reserve is specified we still avoid even 2075 * the subpool reservations. 2076 */ 2077 if (avoid_reserve) 2078 gbl_chg = 1; 2079 } 2080 2081 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 2082 if (ret) 2083 goto out_subpool_put; 2084 2085 spin_lock(&hugetlb_lock); 2086 /* 2087 * glb_chg is passed to indicate whether or not a page must be taken 2088 * from the global free pool (global change). gbl_chg == 0 indicates 2089 * a reservation exists for the allocation. 2090 */ 2091 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); 2092 if (!page) { 2093 spin_unlock(&hugetlb_lock); 2094 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); 2095 if (!page) 2096 goto out_uncharge_cgroup; 2097 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 2098 SetPagePrivate(page); 2099 h->resv_huge_pages--; 2100 } 2101 spin_lock(&hugetlb_lock); 2102 list_move(&page->lru, &h->hugepage_activelist); 2103 /* Fall through */ 2104 } 2105 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 2106 spin_unlock(&hugetlb_lock); 2107 2108 set_page_private(page, (unsigned long)spool); 2109 2110 map_commit = vma_commit_reservation(h, vma, addr); 2111 if (unlikely(map_chg > map_commit)) { 2112 /* 2113 * The page was added to the reservation map between 2114 * vma_needs_reservation and vma_commit_reservation. 2115 * This indicates a race with hugetlb_reserve_pages. 2116 * Adjust for the subpool count incremented above AND 2117 * in hugetlb_reserve_pages for the same page. Also, 2118 * the reservation count added in hugetlb_reserve_pages 2119 * no longer applies. 2120 */ 2121 long rsv_adjust; 2122 2123 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 2124 hugetlb_acct_memory(h, -rsv_adjust); 2125 } 2126 return page; 2127 2128 out_uncharge_cgroup: 2129 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 2130 out_subpool_put: 2131 if (map_chg || avoid_reserve) 2132 hugepage_subpool_put_pages(spool, 1); 2133 vma_end_reservation(h, vma, addr); 2134 return ERR_PTR(-ENOSPC); 2135 } 2136 2137 int alloc_bootmem_huge_page(struct hstate *h) 2138 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 2139 int __alloc_bootmem_huge_page(struct hstate *h) 2140 { 2141 struct huge_bootmem_page *m; 2142 int nr_nodes, node; 2143 2144 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 2145 void *addr; 2146 2147 addr = memblock_alloc_try_nid_raw( 2148 huge_page_size(h), huge_page_size(h), 2149 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 2150 if (addr) { 2151 /* 2152 * Use the beginning of the huge page to store the 2153 * huge_bootmem_page struct (until gather_bootmem 2154 * puts them into the mem_map). 2155 */ 2156 m = addr; 2157 goto found; 2158 } 2159 } 2160 return 0; 2161 2162 found: 2163 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); 2164 /* Put them into a private list first because mem_map is not up yet */ 2165 INIT_LIST_HEAD(&m->list); 2166 list_add(&m->list, &huge_boot_pages); 2167 m->hstate = h; 2168 return 1; 2169 } 2170 2171 static void __init prep_compound_huge_page(struct page *page, 2172 unsigned int order) 2173 { 2174 if (unlikely(order > (MAX_ORDER - 1))) 2175 prep_compound_gigantic_page(page, order); 2176 else 2177 prep_compound_page(page, order); 2178 } 2179 2180 /* Put bootmem huge pages into the standard lists after mem_map is up */ 2181 static void __init gather_bootmem_prealloc(void) 2182 { 2183 struct huge_bootmem_page *m; 2184 2185 list_for_each_entry(m, &huge_boot_pages, list) { 2186 struct page *page = virt_to_page(m); 2187 struct hstate *h = m->hstate; 2188 2189 WARN_ON(page_count(page) != 1); 2190 prep_compound_huge_page(page, h->order); 2191 WARN_ON(PageReserved(page)); 2192 prep_new_huge_page(h, page, page_to_nid(page)); 2193 put_page(page); /* free it into the hugepage allocator */ 2194 2195 /* 2196 * If we had gigantic hugepages allocated at boot time, we need 2197 * to restore the 'stolen' pages to totalram_pages in order to 2198 * fix confusing memory reports from free(1) and another 2199 * side-effects, like CommitLimit going negative. 2200 */ 2201 if (hstate_is_gigantic(h)) 2202 adjust_managed_page_count(page, 1 << h->order); 2203 cond_resched(); 2204 } 2205 } 2206 2207 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 2208 { 2209 unsigned long i; 2210 2211 for (i = 0; i < h->max_huge_pages; ++i) { 2212 if (hstate_is_gigantic(h)) { 2213 if (!alloc_bootmem_huge_page(h)) 2214 break; 2215 } else if (!alloc_pool_huge_page(h, 2216 &node_states[N_MEMORY])) 2217 break; 2218 cond_resched(); 2219 } 2220 if (i < h->max_huge_pages) { 2221 char buf[32]; 2222 2223 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 2224 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 2225 h->max_huge_pages, buf, i); 2226 h->max_huge_pages = i; 2227 } 2228 } 2229 2230 static void __init hugetlb_init_hstates(void) 2231 { 2232 struct hstate *h; 2233 2234 for_each_hstate(h) { 2235 if (minimum_order > huge_page_order(h)) 2236 minimum_order = huge_page_order(h); 2237 2238 /* oversize hugepages were init'ed in early boot */ 2239 if (!hstate_is_gigantic(h)) 2240 hugetlb_hstate_alloc_pages(h); 2241 } 2242 VM_BUG_ON(minimum_order == UINT_MAX); 2243 } 2244 2245 static void __init report_hugepages(void) 2246 { 2247 struct hstate *h; 2248 2249 for_each_hstate(h) { 2250 char buf[32]; 2251 2252 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 2253 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", 2254 buf, h->free_huge_pages); 2255 } 2256 } 2257 2258 #ifdef CONFIG_HIGHMEM 2259 static void try_to_free_low(struct hstate *h, unsigned long count, 2260 nodemask_t *nodes_allowed) 2261 { 2262 int i; 2263 2264 if (hstate_is_gigantic(h)) 2265 return; 2266 2267 for_each_node_mask(i, *nodes_allowed) { 2268 struct page *page, *next; 2269 struct list_head *freel = &h->hugepage_freelists[i]; 2270 list_for_each_entry_safe(page, next, freel, lru) { 2271 if (count >= h->nr_huge_pages) 2272 return; 2273 if (PageHighMem(page)) 2274 continue; 2275 list_del(&page->lru); 2276 update_and_free_page(h, page); 2277 h->free_huge_pages--; 2278 h->free_huge_pages_node[page_to_nid(page)]--; 2279 } 2280 } 2281 } 2282 #else 2283 static inline void try_to_free_low(struct hstate *h, unsigned long count, 2284 nodemask_t *nodes_allowed) 2285 { 2286 } 2287 #endif 2288 2289 /* 2290 * Increment or decrement surplus_huge_pages. Keep node-specific counters 2291 * balanced by operating on them in a round-robin fashion. 2292 * Returns 1 if an adjustment was made. 2293 */ 2294 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 2295 int delta) 2296 { 2297 int nr_nodes, node; 2298 2299 VM_BUG_ON(delta != -1 && delta != 1); 2300 2301 if (delta < 0) { 2302 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 2303 if (h->surplus_huge_pages_node[node]) 2304 goto found; 2305 } 2306 } else { 2307 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2308 if (h->surplus_huge_pages_node[node] < 2309 h->nr_huge_pages_node[node]) 2310 goto found; 2311 } 2312 } 2313 return 0; 2314 2315 found: 2316 h->surplus_huge_pages += delta; 2317 h->surplus_huge_pages_node[node] += delta; 2318 return 1; 2319 } 2320 2321 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 2322 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 2323 nodemask_t *nodes_allowed) 2324 { 2325 unsigned long min_count, ret; 2326 2327 spin_lock(&hugetlb_lock); 2328 2329 /* 2330 * Check for a node specific request. 2331 * Changing node specific huge page count may require a corresponding 2332 * change to the global count. In any case, the passed node mask 2333 * (nodes_allowed) will restrict alloc/free to the specified node. 2334 */ 2335 if (nid != NUMA_NO_NODE) { 2336 unsigned long old_count = count; 2337 2338 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 2339 /* 2340 * User may have specified a large count value which caused the 2341 * above calculation to overflow. In this case, they wanted 2342 * to allocate as many huge pages as possible. Set count to 2343 * largest possible value to align with their intention. 2344 */ 2345 if (count < old_count) 2346 count = ULONG_MAX; 2347 } 2348 2349 /* 2350 * Gigantic pages runtime allocation depend on the capability for large 2351 * page range allocation. 2352 * If the system does not provide this feature, return an error when 2353 * the user tries to allocate gigantic pages but let the user free the 2354 * boottime allocated gigantic pages. 2355 */ 2356 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 2357 if (count > persistent_huge_pages(h)) { 2358 spin_unlock(&hugetlb_lock); 2359 return -EINVAL; 2360 } 2361 /* Fall through to decrease pool */ 2362 } 2363 2364 /* 2365 * Increase the pool size 2366 * First take pages out of surplus state. Then make up the 2367 * remaining difference by allocating fresh huge pages. 2368 * 2369 * We might race with alloc_surplus_huge_page() here and be unable 2370 * to convert a surplus huge page to a normal huge page. That is 2371 * not critical, though, it just means the overall size of the 2372 * pool might be one hugepage larger than it needs to be, but 2373 * within all the constraints specified by the sysctls. 2374 */ 2375 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 2376 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 2377 break; 2378 } 2379 2380 while (count > persistent_huge_pages(h)) { 2381 /* 2382 * If this allocation races such that we no longer need the 2383 * page, free_huge_page will handle it by freeing the page 2384 * and reducing the surplus. 2385 */ 2386 spin_unlock(&hugetlb_lock); 2387 2388 /* yield cpu to avoid soft lockup */ 2389 cond_resched(); 2390 2391 ret = alloc_pool_huge_page(h, nodes_allowed); 2392 spin_lock(&hugetlb_lock); 2393 if (!ret) 2394 goto out; 2395 2396 /* Bail for signals. Probably ctrl-c from user */ 2397 if (signal_pending(current)) 2398 goto out; 2399 } 2400 2401 /* 2402 * Decrease the pool size 2403 * First return free pages to the buddy allocator (being careful 2404 * to keep enough around to satisfy reservations). Then place 2405 * pages into surplus state as needed so the pool will shrink 2406 * to the desired size as pages become free. 2407 * 2408 * By placing pages into the surplus state independent of the 2409 * overcommit value, we are allowing the surplus pool size to 2410 * exceed overcommit. There are few sane options here. Since 2411 * alloc_surplus_huge_page() is checking the global counter, 2412 * though, we'll note that we're not allowed to exceed surplus 2413 * and won't grow the pool anywhere else. Not until one of the 2414 * sysctls are changed, or the surplus pages go out of use. 2415 */ 2416 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 2417 min_count = max(count, min_count); 2418 try_to_free_low(h, min_count, nodes_allowed); 2419 while (min_count < persistent_huge_pages(h)) { 2420 if (!free_pool_huge_page(h, nodes_allowed, 0)) 2421 break; 2422 cond_resched_lock(&hugetlb_lock); 2423 } 2424 while (count < persistent_huge_pages(h)) { 2425 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 2426 break; 2427 } 2428 out: 2429 h->max_huge_pages = persistent_huge_pages(h); 2430 spin_unlock(&hugetlb_lock); 2431 2432 return 0; 2433 } 2434 2435 #define HSTATE_ATTR_RO(_name) \ 2436 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 2437 2438 #define HSTATE_ATTR(_name) \ 2439 static struct kobj_attribute _name##_attr = \ 2440 __ATTR(_name, 0644, _name##_show, _name##_store) 2441 2442 static struct kobject *hugepages_kobj; 2443 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 2444 2445 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 2446 2447 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 2448 { 2449 int i; 2450 2451 for (i = 0; i < HUGE_MAX_HSTATE; i++) 2452 if (hstate_kobjs[i] == kobj) { 2453 if (nidp) 2454 *nidp = NUMA_NO_NODE; 2455 return &hstates[i]; 2456 } 2457 2458 return kobj_to_node_hstate(kobj, nidp); 2459 } 2460 2461 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 2462 struct kobj_attribute *attr, char *buf) 2463 { 2464 struct hstate *h; 2465 unsigned long nr_huge_pages; 2466 int nid; 2467 2468 h = kobj_to_hstate(kobj, &nid); 2469 if (nid == NUMA_NO_NODE) 2470 nr_huge_pages = h->nr_huge_pages; 2471 else 2472 nr_huge_pages = h->nr_huge_pages_node[nid]; 2473 2474 return sprintf(buf, "%lu\n", nr_huge_pages); 2475 } 2476 2477 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 2478 struct hstate *h, int nid, 2479 unsigned long count, size_t len) 2480 { 2481 int err; 2482 nodemask_t nodes_allowed, *n_mask; 2483 2484 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2485 return -EINVAL; 2486 2487 if (nid == NUMA_NO_NODE) { 2488 /* 2489 * global hstate attribute 2490 */ 2491 if (!(obey_mempolicy && 2492 init_nodemask_of_mempolicy(&nodes_allowed))) 2493 n_mask = &node_states[N_MEMORY]; 2494 else 2495 n_mask = &nodes_allowed; 2496 } else { 2497 /* 2498 * Node specific request. count adjustment happens in 2499 * set_max_huge_pages() after acquiring hugetlb_lock. 2500 */ 2501 init_nodemask_of_node(&nodes_allowed, nid); 2502 n_mask = &nodes_allowed; 2503 } 2504 2505 err = set_max_huge_pages(h, count, nid, n_mask); 2506 2507 return err ? err : len; 2508 } 2509 2510 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 2511 struct kobject *kobj, const char *buf, 2512 size_t len) 2513 { 2514 struct hstate *h; 2515 unsigned long count; 2516 int nid; 2517 int err; 2518 2519 err = kstrtoul(buf, 10, &count); 2520 if (err) 2521 return err; 2522 2523 h = kobj_to_hstate(kobj, &nid); 2524 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 2525 } 2526 2527 static ssize_t nr_hugepages_show(struct kobject *kobj, 2528 struct kobj_attribute *attr, char *buf) 2529 { 2530 return nr_hugepages_show_common(kobj, attr, buf); 2531 } 2532 2533 static ssize_t nr_hugepages_store(struct kobject *kobj, 2534 struct kobj_attribute *attr, const char *buf, size_t len) 2535 { 2536 return nr_hugepages_store_common(false, kobj, buf, len); 2537 } 2538 HSTATE_ATTR(nr_hugepages); 2539 2540 #ifdef CONFIG_NUMA 2541 2542 /* 2543 * hstate attribute for optionally mempolicy-based constraint on persistent 2544 * huge page alloc/free. 2545 */ 2546 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 2547 struct kobj_attribute *attr, char *buf) 2548 { 2549 return nr_hugepages_show_common(kobj, attr, buf); 2550 } 2551 2552 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 2553 struct kobj_attribute *attr, const char *buf, size_t len) 2554 { 2555 return nr_hugepages_store_common(true, kobj, buf, len); 2556 } 2557 HSTATE_ATTR(nr_hugepages_mempolicy); 2558 #endif 2559 2560 2561 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 2562 struct kobj_attribute *attr, char *buf) 2563 { 2564 struct hstate *h = kobj_to_hstate(kobj, NULL); 2565 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 2566 } 2567 2568 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 2569 struct kobj_attribute *attr, const char *buf, size_t count) 2570 { 2571 int err; 2572 unsigned long input; 2573 struct hstate *h = kobj_to_hstate(kobj, NULL); 2574 2575 if (hstate_is_gigantic(h)) 2576 return -EINVAL; 2577 2578 err = kstrtoul(buf, 10, &input); 2579 if (err) 2580 return err; 2581 2582 spin_lock(&hugetlb_lock); 2583 h->nr_overcommit_huge_pages = input; 2584 spin_unlock(&hugetlb_lock); 2585 2586 return count; 2587 } 2588 HSTATE_ATTR(nr_overcommit_hugepages); 2589 2590 static ssize_t free_hugepages_show(struct kobject *kobj, 2591 struct kobj_attribute *attr, char *buf) 2592 { 2593 struct hstate *h; 2594 unsigned long free_huge_pages; 2595 int nid; 2596 2597 h = kobj_to_hstate(kobj, &nid); 2598 if (nid == NUMA_NO_NODE) 2599 free_huge_pages = h->free_huge_pages; 2600 else 2601 free_huge_pages = h->free_huge_pages_node[nid]; 2602 2603 return sprintf(buf, "%lu\n", free_huge_pages); 2604 } 2605 HSTATE_ATTR_RO(free_hugepages); 2606 2607 static ssize_t resv_hugepages_show(struct kobject *kobj, 2608 struct kobj_attribute *attr, char *buf) 2609 { 2610 struct hstate *h = kobj_to_hstate(kobj, NULL); 2611 return sprintf(buf, "%lu\n", h->resv_huge_pages); 2612 } 2613 HSTATE_ATTR_RO(resv_hugepages); 2614 2615 static ssize_t surplus_hugepages_show(struct kobject *kobj, 2616 struct kobj_attribute *attr, char *buf) 2617 { 2618 struct hstate *h; 2619 unsigned long surplus_huge_pages; 2620 int nid; 2621 2622 h = kobj_to_hstate(kobj, &nid); 2623 if (nid == NUMA_NO_NODE) 2624 surplus_huge_pages = h->surplus_huge_pages; 2625 else 2626 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 2627 2628 return sprintf(buf, "%lu\n", surplus_huge_pages); 2629 } 2630 HSTATE_ATTR_RO(surplus_hugepages); 2631 2632 static struct attribute *hstate_attrs[] = { 2633 &nr_hugepages_attr.attr, 2634 &nr_overcommit_hugepages_attr.attr, 2635 &free_hugepages_attr.attr, 2636 &resv_hugepages_attr.attr, 2637 &surplus_hugepages_attr.attr, 2638 #ifdef CONFIG_NUMA 2639 &nr_hugepages_mempolicy_attr.attr, 2640 #endif 2641 NULL, 2642 }; 2643 2644 static const struct attribute_group hstate_attr_group = { 2645 .attrs = hstate_attrs, 2646 }; 2647 2648 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 2649 struct kobject **hstate_kobjs, 2650 const struct attribute_group *hstate_attr_group) 2651 { 2652 int retval; 2653 int hi = hstate_index(h); 2654 2655 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 2656 if (!hstate_kobjs[hi]) 2657 return -ENOMEM; 2658 2659 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 2660 if (retval) 2661 kobject_put(hstate_kobjs[hi]); 2662 2663 return retval; 2664 } 2665 2666 static void __init hugetlb_sysfs_init(void) 2667 { 2668 struct hstate *h; 2669 int err; 2670 2671 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 2672 if (!hugepages_kobj) 2673 return; 2674 2675 for_each_hstate(h) { 2676 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 2677 hstate_kobjs, &hstate_attr_group); 2678 if (err) 2679 pr_err("Hugetlb: Unable to add hstate %s", h->name); 2680 } 2681 } 2682 2683 #ifdef CONFIG_NUMA 2684 2685 /* 2686 * node_hstate/s - associate per node hstate attributes, via their kobjects, 2687 * with node devices in node_devices[] using a parallel array. The array 2688 * index of a node device or _hstate == node id. 2689 * This is here to avoid any static dependency of the node device driver, in 2690 * the base kernel, on the hugetlb module. 2691 */ 2692 struct node_hstate { 2693 struct kobject *hugepages_kobj; 2694 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 2695 }; 2696 static struct node_hstate node_hstates[MAX_NUMNODES]; 2697 2698 /* 2699 * A subset of global hstate attributes for node devices 2700 */ 2701 static struct attribute *per_node_hstate_attrs[] = { 2702 &nr_hugepages_attr.attr, 2703 &free_hugepages_attr.attr, 2704 &surplus_hugepages_attr.attr, 2705 NULL, 2706 }; 2707 2708 static const struct attribute_group per_node_hstate_attr_group = { 2709 .attrs = per_node_hstate_attrs, 2710 }; 2711 2712 /* 2713 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 2714 * Returns node id via non-NULL nidp. 2715 */ 2716 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 2717 { 2718 int nid; 2719 2720 for (nid = 0; nid < nr_node_ids; nid++) { 2721 struct node_hstate *nhs = &node_hstates[nid]; 2722 int i; 2723 for (i = 0; i < HUGE_MAX_HSTATE; i++) 2724 if (nhs->hstate_kobjs[i] == kobj) { 2725 if (nidp) 2726 *nidp = nid; 2727 return &hstates[i]; 2728 } 2729 } 2730 2731 BUG(); 2732 return NULL; 2733 } 2734 2735 /* 2736 * Unregister hstate attributes from a single node device. 2737 * No-op if no hstate attributes attached. 2738 */ 2739 static void hugetlb_unregister_node(struct node *node) 2740 { 2741 struct hstate *h; 2742 struct node_hstate *nhs = &node_hstates[node->dev.id]; 2743 2744 if (!nhs->hugepages_kobj) 2745 return; /* no hstate attributes */ 2746 2747 for_each_hstate(h) { 2748 int idx = hstate_index(h); 2749 if (nhs->hstate_kobjs[idx]) { 2750 kobject_put(nhs->hstate_kobjs[idx]); 2751 nhs->hstate_kobjs[idx] = NULL; 2752 } 2753 } 2754 2755 kobject_put(nhs->hugepages_kobj); 2756 nhs->hugepages_kobj = NULL; 2757 } 2758 2759 2760 /* 2761 * Register hstate attributes for a single node device. 2762 * No-op if attributes already registered. 2763 */ 2764 static void hugetlb_register_node(struct node *node) 2765 { 2766 struct hstate *h; 2767 struct node_hstate *nhs = &node_hstates[node->dev.id]; 2768 int err; 2769 2770 if (nhs->hugepages_kobj) 2771 return; /* already allocated */ 2772 2773 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 2774 &node->dev.kobj); 2775 if (!nhs->hugepages_kobj) 2776 return; 2777 2778 for_each_hstate(h) { 2779 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 2780 nhs->hstate_kobjs, 2781 &per_node_hstate_attr_group); 2782 if (err) { 2783 pr_err("Hugetlb: Unable to add hstate %s for node %d\n", 2784 h->name, node->dev.id); 2785 hugetlb_unregister_node(node); 2786 break; 2787 } 2788 } 2789 } 2790 2791 /* 2792 * hugetlb init time: register hstate attributes for all registered node 2793 * devices of nodes that have memory. All on-line nodes should have 2794 * registered their associated device by this time. 2795 */ 2796 static void __init hugetlb_register_all_nodes(void) 2797 { 2798 int nid; 2799 2800 for_each_node_state(nid, N_MEMORY) { 2801 struct node *node = node_devices[nid]; 2802 if (node->dev.id == nid) 2803 hugetlb_register_node(node); 2804 } 2805 2806 /* 2807 * Let the node device driver know we're here so it can 2808 * [un]register hstate attributes on node hotplug. 2809 */ 2810 register_hugetlbfs_with_node(hugetlb_register_node, 2811 hugetlb_unregister_node); 2812 } 2813 #else /* !CONFIG_NUMA */ 2814 2815 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 2816 { 2817 BUG(); 2818 if (nidp) 2819 *nidp = -1; 2820 return NULL; 2821 } 2822 2823 static void hugetlb_register_all_nodes(void) { } 2824 2825 #endif 2826 2827 static int __init hugetlb_init(void) 2828 { 2829 int i; 2830 2831 if (!hugepages_supported()) 2832 return 0; 2833 2834 if (!size_to_hstate(default_hstate_size)) { 2835 if (default_hstate_size != 0) { 2836 pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n", 2837 default_hstate_size, HPAGE_SIZE); 2838 } 2839 2840 default_hstate_size = HPAGE_SIZE; 2841 if (!size_to_hstate(default_hstate_size)) 2842 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 2843 } 2844 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 2845 if (default_hstate_max_huge_pages) { 2846 if (!default_hstate.max_huge_pages) 2847 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 2848 } 2849 2850 hugetlb_init_hstates(); 2851 gather_bootmem_prealloc(); 2852 report_hugepages(); 2853 2854 hugetlb_sysfs_init(); 2855 hugetlb_register_all_nodes(); 2856 hugetlb_cgroup_file_init(); 2857 2858 #ifdef CONFIG_SMP 2859 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 2860 #else 2861 num_fault_mutexes = 1; 2862 #endif 2863 hugetlb_fault_mutex_table = 2864 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 2865 GFP_KERNEL); 2866 BUG_ON(!hugetlb_fault_mutex_table); 2867 2868 for (i = 0; i < num_fault_mutexes; i++) 2869 mutex_init(&hugetlb_fault_mutex_table[i]); 2870 return 0; 2871 } 2872 subsys_initcall(hugetlb_init); 2873 2874 /* Should be called on processing a hugepagesz=... option */ 2875 void __init hugetlb_bad_size(void) 2876 { 2877 parsed_valid_hugepagesz = false; 2878 } 2879 2880 void __init hugetlb_add_hstate(unsigned int order) 2881 { 2882 struct hstate *h; 2883 unsigned long i; 2884 2885 if (size_to_hstate(PAGE_SIZE << order)) { 2886 pr_warn("hugepagesz= specified twice, ignoring\n"); 2887 return; 2888 } 2889 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 2890 BUG_ON(order == 0); 2891 h = &hstates[hugetlb_max_hstate++]; 2892 h->order = order; 2893 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 2894 h->nr_huge_pages = 0; 2895 h->free_huge_pages = 0; 2896 for (i = 0; i < MAX_NUMNODES; ++i) 2897 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 2898 INIT_LIST_HEAD(&h->hugepage_activelist); 2899 h->next_nid_to_alloc = first_memory_node; 2900 h->next_nid_to_free = first_memory_node; 2901 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 2902 huge_page_size(h)/1024); 2903 2904 parsed_hstate = h; 2905 } 2906 2907 static int __init hugetlb_nrpages_setup(char *s) 2908 { 2909 unsigned long *mhp; 2910 static unsigned long *last_mhp; 2911 2912 if (!parsed_valid_hugepagesz) { 2913 pr_warn("hugepages = %s preceded by " 2914 "an unsupported hugepagesz, ignoring\n", s); 2915 parsed_valid_hugepagesz = true; 2916 return 1; 2917 } 2918 /* 2919 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, 2920 * so this hugepages= parameter goes to the "default hstate". 2921 */ 2922 else if (!hugetlb_max_hstate) 2923 mhp = &default_hstate_max_huge_pages; 2924 else 2925 mhp = &parsed_hstate->max_huge_pages; 2926 2927 if (mhp == last_mhp) { 2928 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n"); 2929 return 1; 2930 } 2931 2932 if (sscanf(s, "%lu", mhp) <= 0) 2933 *mhp = 0; 2934 2935 /* 2936 * Global state is always initialized later in hugetlb_init. 2937 * But we need to allocate >= MAX_ORDER hstates here early to still 2938 * use the bootmem allocator. 2939 */ 2940 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) 2941 hugetlb_hstate_alloc_pages(parsed_hstate); 2942 2943 last_mhp = mhp; 2944 2945 return 1; 2946 } 2947 __setup("hugepages=", hugetlb_nrpages_setup); 2948 2949 static int __init hugetlb_default_setup(char *s) 2950 { 2951 default_hstate_size = memparse(s, &s); 2952 return 1; 2953 } 2954 __setup("default_hugepagesz=", hugetlb_default_setup); 2955 2956 static unsigned int cpuset_mems_nr(unsigned int *array) 2957 { 2958 int node; 2959 unsigned int nr = 0; 2960 2961 for_each_node_mask(node, cpuset_current_mems_allowed) 2962 nr += array[node]; 2963 2964 return nr; 2965 } 2966 2967 #ifdef CONFIG_SYSCTL 2968 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 2969 struct ctl_table *table, int write, 2970 void __user *buffer, size_t *length, loff_t *ppos) 2971 { 2972 struct hstate *h = &default_hstate; 2973 unsigned long tmp = h->max_huge_pages; 2974 int ret; 2975 2976 if (!hugepages_supported()) 2977 return -EOPNOTSUPP; 2978 2979 table->data = &tmp; 2980 table->maxlen = sizeof(unsigned long); 2981 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 2982 if (ret) 2983 goto out; 2984 2985 if (write) 2986 ret = __nr_hugepages_store_common(obey_mempolicy, h, 2987 NUMA_NO_NODE, tmp, *length); 2988 out: 2989 return ret; 2990 } 2991 2992 int hugetlb_sysctl_handler(struct ctl_table *table, int write, 2993 void __user *buffer, size_t *length, loff_t *ppos) 2994 { 2995 2996 return hugetlb_sysctl_handler_common(false, table, write, 2997 buffer, length, ppos); 2998 } 2999 3000 #ifdef CONFIG_NUMA 3001 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 3002 void __user *buffer, size_t *length, loff_t *ppos) 3003 { 3004 return hugetlb_sysctl_handler_common(true, table, write, 3005 buffer, length, ppos); 3006 } 3007 #endif /* CONFIG_NUMA */ 3008 3009 int hugetlb_overcommit_handler(struct ctl_table *table, int write, 3010 void __user *buffer, 3011 size_t *length, loff_t *ppos) 3012 { 3013 struct hstate *h = &default_hstate; 3014 unsigned long tmp; 3015 int ret; 3016 3017 if (!hugepages_supported()) 3018 return -EOPNOTSUPP; 3019 3020 tmp = h->nr_overcommit_huge_pages; 3021 3022 if (write && hstate_is_gigantic(h)) 3023 return -EINVAL; 3024 3025 table->data = &tmp; 3026 table->maxlen = sizeof(unsigned long); 3027 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 3028 if (ret) 3029 goto out; 3030 3031 if (write) { 3032 spin_lock(&hugetlb_lock); 3033 h->nr_overcommit_huge_pages = tmp; 3034 spin_unlock(&hugetlb_lock); 3035 } 3036 out: 3037 return ret; 3038 } 3039 3040 #endif /* CONFIG_SYSCTL */ 3041 3042 void hugetlb_report_meminfo(struct seq_file *m) 3043 { 3044 struct hstate *h; 3045 unsigned long total = 0; 3046 3047 if (!hugepages_supported()) 3048 return; 3049 3050 for_each_hstate(h) { 3051 unsigned long count = h->nr_huge_pages; 3052 3053 total += (PAGE_SIZE << huge_page_order(h)) * count; 3054 3055 if (h == &default_hstate) 3056 seq_printf(m, 3057 "HugePages_Total: %5lu\n" 3058 "HugePages_Free: %5lu\n" 3059 "HugePages_Rsvd: %5lu\n" 3060 "HugePages_Surp: %5lu\n" 3061 "Hugepagesize: %8lu kB\n", 3062 count, 3063 h->free_huge_pages, 3064 h->resv_huge_pages, 3065 h->surplus_huge_pages, 3066 (PAGE_SIZE << huge_page_order(h)) / 1024); 3067 } 3068 3069 seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024); 3070 } 3071 3072 int hugetlb_report_node_meminfo(int nid, char *buf) 3073 { 3074 struct hstate *h = &default_hstate; 3075 if (!hugepages_supported()) 3076 return 0; 3077 return sprintf(buf, 3078 "Node %d HugePages_Total: %5u\n" 3079 "Node %d HugePages_Free: %5u\n" 3080 "Node %d HugePages_Surp: %5u\n", 3081 nid, h->nr_huge_pages_node[nid], 3082 nid, h->free_huge_pages_node[nid], 3083 nid, h->surplus_huge_pages_node[nid]); 3084 } 3085 3086 void hugetlb_show_meminfo(void) 3087 { 3088 struct hstate *h; 3089 int nid; 3090 3091 if (!hugepages_supported()) 3092 return; 3093 3094 for_each_node_state(nid, N_MEMORY) 3095 for_each_hstate(h) 3096 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 3097 nid, 3098 h->nr_huge_pages_node[nid], 3099 h->free_huge_pages_node[nid], 3100 h->surplus_huge_pages_node[nid], 3101 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 3102 } 3103 3104 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 3105 { 3106 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 3107 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); 3108 } 3109 3110 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 3111 unsigned long hugetlb_total_pages(void) 3112 { 3113 struct hstate *h; 3114 unsigned long nr_total_pages = 0; 3115 3116 for_each_hstate(h) 3117 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 3118 return nr_total_pages; 3119 } 3120 3121 static int hugetlb_acct_memory(struct hstate *h, long delta) 3122 { 3123 int ret = -ENOMEM; 3124 3125 spin_lock(&hugetlb_lock); 3126 /* 3127 * When cpuset is configured, it breaks the strict hugetlb page 3128 * reservation as the accounting is done on a global variable. Such 3129 * reservation is completely rubbish in the presence of cpuset because 3130 * the reservation is not checked against page availability for the 3131 * current cpuset. Application can still potentially OOM'ed by kernel 3132 * with lack of free htlb page in cpuset that the task is in. 3133 * Attempt to enforce strict accounting with cpuset is almost 3134 * impossible (or too ugly) because cpuset is too fluid that 3135 * task or memory node can be dynamically moved between cpusets. 3136 * 3137 * The change of semantics for shared hugetlb mapping with cpuset is 3138 * undesirable. However, in order to preserve some of the semantics, 3139 * we fall back to check against current free page availability as 3140 * a best attempt and hopefully to minimize the impact of changing 3141 * semantics that cpuset has. 3142 */ 3143 if (delta > 0) { 3144 if (gather_surplus_pages(h, delta) < 0) 3145 goto out; 3146 3147 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 3148 return_unused_surplus_pages(h, delta); 3149 goto out; 3150 } 3151 } 3152 3153 ret = 0; 3154 if (delta < 0) 3155 return_unused_surplus_pages(h, (unsigned long) -delta); 3156 3157 out: 3158 spin_unlock(&hugetlb_lock); 3159 return ret; 3160 } 3161 3162 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 3163 { 3164 struct resv_map *resv = vma_resv_map(vma); 3165 3166 /* 3167 * This new VMA should share its siblings reservation map if present. 3168 * The VMA will only ever have a valid reservation map pointer where 3169 * it is being copied for another still existing VMA. As that VMA 3170 * has a reference to the reservation map it cannot disappear until 3171 * after this open call completes. It is therefore safe to take a 3172 * new reference here without additional locking. 3173 */ 3174 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3175 kref_get(&resv->refs); 3176 } 3177 3178 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 3179 { 3180 struct hstate *h = hstate_vma(vma); 3181 struct resv_map *resv = vma_resv_map(vma); 3182 struct hugepage_subpool *spool = subpool_vma(vma); 3183 unsigned long reserve, start, end; 3184 long gbl_reserve; 3185 3186 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3187 return; 3188 3189 start = vma_hugecache_offset(h, vma, vma->vm_start); 3190 end = vma_hugecache_offset(h, vma, vma->vm_end); 3191 3192 reserve = (end - start) - region_count(resv, start, end); 3193 3194 kref_put(&resv->refs, resv_map_release); 3195 3196 if (reserve) { 3197 /* 3198 * Decrement reserve counts. The global reserve count may be 3199 * adjusted if the subpool has a minimum size. 3200 */ 3201 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 3202 hugetlb_acct_memory(h, -gbl_reserve); 3203 } 3204 } 3205 3206 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 3207 { 3208 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 3209 return -EINVAL; 3210 return 0; 3211 } 3212 3213 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 3214 { 3215 struct hstate *hstate = hstate_vma(vma); 3216 3217 return 1UL << huge_page_shift(hstate); 3218 } 3219 3220 /* 3221 * We cannot handle pagefaults against hugetlb pages at all. They cause 3222 * handle_mm_fault() to try to instantiate regular-sized pages in the 3223 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 3224 * this far. 3225 */ 3226 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 3227 { 3228 BUG(); 3229 return 0; 3230 } 3231 3232 /* 3233 * When a new function is introduced to vm_operations_struct and added 3234 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 3235 * This is because under System V memory model, mappings created via 3236 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 3237 * their original vm_ops are overwritten with shm_vm_ops. 3238 */ 3239 const struct vm_operations_struct hugetlb_vm_ops = { 3240 .fault = hugetlb_vm_op_fault, 3241 .open = hugetlb_vm_op_open, 3242 .close = hugetlb_vm_op_close, 3243 .split = hugetlb_vm_op_split, 3244 .pagesize = hugetlb_vm_op_pagesize, 3245 }; 3246 3247 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 3248 int writable) 3249 { 3250 pte_t entry; 3251 3252 if (writable) { 3253 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 3254 vma->vm_page_prot))); 3255 } else { 3256 entry = huge_pte_wrprotect(mk_huge_pte(page, 3257 vma->vm_page_prot)); 3258 } 3259 entry = pte_mkyoung(entry); 3260 entry = pte_mkhuge(entry); 3261 entry = arch_make_huge_pte(entry, vma, page, writable); 3262 3263 return entry; 3264 } 3265 3266 static void set_huge_ptep_writable(struct vm_area_struct *vma, 3267 unsigned long address, pte_t *ptep) 3268 { 3269 pte_t entry; 3270 3271 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 3272 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 3273 update_mmu_cache(vma, address, ptep); 3274 } 3275 3276 bool is_hugetlb_entry_migration(pte_t pte) 3277 { 3278 swp_entry_t swp; 3279 3280 if (huge_pte_none(pte) || pte_present(pte)) 3281 return false; 3282 swp = pte_to_swp_entry(pte); 3283 if (non_swap_entry(swp) && is_migration_entry(swp)) 3284 return true; 3285 else 3286 return false; 3287 } 3288 3289 static int is_hugetlb_entry_hwpoisoned(pte_t pte) 3290 { 3291 swp_entry_t swp; 3292 3293 if (huge_pte_none(pte) || pte_present(pte)) 3294 return 0; 3295 swp = pte_to_swp_entry(pte); 3296 if (non_swap_entry(swp) && is_hwpoison_entry(swp)) 3297 return 1; 3298 else 3299 return 0; 3300 } 3301 3302 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 3303 struct vm_area_struct *vma) 3304 { 3305 pte_t *src_pte, *dst_pte, entry, dst_entry; 3306 struct page *ptepage; 3307 unsigned long addr; 3308 int cow; 3309 struct hstate *h = hstate_vma(vma); 3310 unsigned long sz = huge_page_size(h); 3311 struct mmu_notifier_range range; 3312 int ret = 0; 3313 3314 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 3315 3316 if (cow) { 3317 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src, 3318 vma->vm_start, 3319 vma->vm_end); 3320 mmu_notifier_invalidate_range_start(&range); 3321 } 3322 3323 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 3324 spinlock_t *src_ptl, *dst_ptl; 3325 src_pte = huge_pte_offset(src, addr, sz); 3326 if (!src_pte) 3327 continue; 3328 dst_pte = huge_pte_alloc(dst, addr, sz); 3329 if (!dst_pte) { 3330 ret = -ENOMEM; 3331 break; 3332 } 3333 3334 /* 3335 * If the pagetables are shared don't copy or take references. 3336 * dst_pte == src_pte is the common case of src/dest sharing. 3337 * 3338 * However, src could have 'unshared' and dst shares with 3339 * another vma. If dst_pte !none, this implies sharing. 3340 * Check here before taking page table lock, and once again 3341 * after taking the lock below. 3342 */ 3343 dst_entry = huge_ptep_get(dst_pte); 3344 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) 3345 continue; 3346 3347 dst_ptl = huge_pte_lock(h, dst, dst_pte); 3348 src_ptl = huge_pte_lockptr(h, src, src_pte); 3349 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 3350 entry = huge_ptep_get(src_pte); 3351 dst_entry = huge_ptep_get(dst_pte); 3352 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) { 3353 /* 3354 * Skip if src entry none. Also, skip in the 3355 * unlikely case dst entry !none as this implies 3356 * sharing with another vma. 3357 */ 3358 ; 3359 } else if (unlikely(is_hugetlb_entry_migration(entry) || 3360 is_hugetlb_entry_hwpoisoned(entry))) { 3361 swp_entry_t swp_entry = pte_to_swp_entry(entry); 3362 3363 if (is_write_migration_entry(swp_entry) && cow) { 3364 /* 3365 * COW mappings require pages in both 3366 * parent and child to be set to read. 3367 */ 3368 make_migration_entry_read(&swp_entry); 3369 entry = swp_entry_to_pte(swp_entry); 3370 set_huge_swap_pte_at(src, addr, src_pte, 3371 entry, sz); 3372 } 3373 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz); 3374 } else { 3375 if (cow) { 3376 /* 3377 * No need to notify as we are downgrading page 3378 * table protection not changing it to point 3379 * to a new page. 3380 * 3381 * See Documentation/vm/mmu_notifier.rst 3382 */ 3383 huge_ptep_set_wrprotect(src, addr, src_pte); 3384 } 3385 entry = huge_ptep_get(src_pte); 3386 ptepage = pte_page(entry); 3387 get_page(ptepage); 3388 page_dup_rmap(ptepage, true); 3389 set_huge_pte_at(dst, addr, dst_pte, entry); 3390 hugetlb_count_add(pages_per_huge_page(h), dst); 3391 } 3392 spin_unlock(src_ptl); 3393 spin_unlock(dst_ptl); 3394 } 3395 3396 if (cow) 3397 mmu_notifier_invalidate_range_end(&range); 3398 3399 return ret; 3400 } 3401 3402 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 3403 unsigned long start, unsigned long end, 3404 struct page *ref_page) 3405 { 3406 struct mm_struct *mm = vma->vm_mm; 3407 unsigned long address; 3408 pte_t *ptep; 3409 pte_t pte; 3410 spinlock_t *ptl; 3411 struct page *page; 3412 struct hstate *h = hstate_vma(vma); 3413 unsigned long sz = huge_page_size(h); 3414 struct mmu_notifier_range range; 3415 3416 WARN_ON(!is_vm_hugetlb_page(vma)); 3417 BUG_ON(start & ~huge_page_mask(h)); 3418 BUG_ON(end & ~huge_page_mask(h)); 3419 3420 /* 3421 * This is a hugetlb vma, all the pte entries should point 3422 * to huge page. 3423 */ 3424 tlb_change_page_size(tlb, sz); 3425 tlb_start_vma(tlb, vma); 3426 3427 /* 3428 * If sharing possible, alert mmu notifiers of worst case. 3429 */ 3430 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start, 3431 end); 3432 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 3433 mmu_notifier_invalidate_range_start(&range); 3434 address = start; 3435 for (; address < end; address += sz) { 3436 ptep = huge_pte_offset(mm, address, sz); 3437 if (!ptep) 3438 continue; 3439 3440 ptl = huge_pte_lock(h, mm, ptep); 3441 if (huge_pmd_unshare(mm, &address, ptep)) { 3442 spin_unlock(ptl); 3443 /* 3444 * We just unmapped a page of PMDs by clearing a PUD. 3445 * The caller's TLB flush range should cover this area. 3446 */ 3447 continue; 3448 } 3449 3450 pte = huge_ptep_get(ptep); 3451 if (huge_pte_none(pte)) { 3452 spin_unlock(ptl); 3453 continue; 3454 } 3455 3456 /* 3457 * Migrating hugepage or HWPoisoned hugepage is already 3458 * unmapped and its refcount is dropped, so just clear pte here. 3459 */ 3460 if (unlikely(!pte_present(pte))) { 3461 huge_pte_clear(mm, address, ptep, sz); 3462 spin_unlock(ptl); 3463 continue; 3464 } 3465 3466 page = pte_page(pte); 3467 /* 3468 * If a reference page is supplied, it is because a specific 3469 * page is being unmapped, not a range. Ensure the page we 3470 * are about to unmap is the actual page of interest. 3471 */ 3472 if (ref_page) { 3473 if (page != ref_page) { 3474 spin_unlock(ptl); 3475 continue; 3476 } 3477 /* 3478 * Mark the VMA as having unmapped its page so that 3479 * future faults in this VMA will fail rather than 3480 * looking like data was lost 3481 */ 3482 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 3483 } 3484 3485 pte = huge_ptep_get_and_clear(mm, address, ptep); 3486 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 3487 if (huge_pte_dirty(pte)) 3488 set_page_dirty(page); 3489 3490 hugetlb_count_sub(pages_per_huge_page(h), mm); 3491 page_remove_rmap(page, true); 3492 3493 spin_unlock(ptl); 3494 tlb_remove_page_size(tlb, page, huge_page_size(h)); 3495 /* 3496 * Bail out after unmapping reference page if supplied 3497 */ 3498 if (ref_page) 3499 break; 3500 } 3501 mmu_notifier_invalidate_range_end(&range); 3502 tlb_end_vma(tlb, vma); 3503 } 3504 3505 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 3506 struct vm_area_struct *vma, unsigned long start, 3507 unsigned long end, struct page *ref_page) 3508 { 3509 __unmap_hugepage_range(tlb, vma, start, end, ref_page); 3510 3511 /* 3512 * Clear this flag so that x86's huge_pmd_share page_table_shareable 3513 * test will fail on a vma being torn down, and not grab a page table 3514 * on its way out. We're lucky that the flag has such an appropriate 3515 * name, and can in fact be safely cleared here. We could clear it 3516 * before the __unmap_hugepage_range above, but all that's necessary 3517 * is to clear it before releasing the i_mmap_rwsem. This works 3518 * because in the context this is called, the VMA is about to be 3519 * destroyed and the i_mmap_rwsem is held. 3520 */ 3521 vma->vm_flags &= ~VM_MAYSHARE; 3522 } 3523 3524 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 3525 unsigned long end, struct page *ref_page) 3526 { 3527 struct mm_struct *mm; 3528 struct mmu_gather tlb; 3529 unsigned long tlb_start = start; 3530 unsigned long tlb_end = end; 3531 3532 /* 3533 * If shared PMDs were possibly used within this vma range, adjust 3534 * start/end for worst case tlb flushing. 3535 * Note that we can not be sure if PMDs are shared until we try to 3536 * unmap pages. However, we want to make sure TLB flushing covers 3537 * the largest possible range. 3538 */ 3539 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end); 3540 3541 mm = vma->vm_mm; 3542 3543 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end); 3544 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 3545 tlb_finish_mmu(&tlb, tlb_start, tlb_end); 3546 } 3547 3548 /* 3549 * This is called when the original mapper is failing to COW a MAP_PRIVATE 3550 * mappping it owns the reserve page for. The intention is to unmap the page 3551 * from other VMAs and let the children be SIGKILLed if they are faulting the 3552 * same region. 3553 */ 3554 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 3555 struct page *page, unsigned long address) 3556 { 3557 struct hstate *h = hstate_vma(vma); 3558 struct vm_area_struct *iter_vma; 3559 struct address_space *mapping; 3560 pgoff_t pgoff; 3561 3562 /* 3563 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 3564 * from page cache lookup which is in HPAGE_SIZE units. 3565 */ 3566 address = address & huge_page_mask(h); 3567 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 3568 vma->vm_pgoff; 3569 mapping = vma->vm_file->f_mapping; 3570 3571 /* 3572 * Take the mapping lock for the duration of the table walk. As 3573 * this mapping should be shared between all the VMAs, 3574 * __unmap_hugepage_range() is called as the lock is already held 3575 */ 3576 i_mmap_lock_write(mapping); 3577 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 3578 /* Do not unmap the current VMA */ 3579 if (iter_vma == vma) 3580 continue; 3581 3582 /* 3583 * Shared VMAs have their own reserves and do not affect 3584 * MAP_PRIVATE accounting but it is possible that a shared 3585 * VMA is using the same page so check and skip such VMAs. 3586 */ 3587 if (iter_vma->vm_flags & VM_MAYSHARE) 3588 continue; 3589 3590 /* 3591 * Unmap the page from other VMAs without their own reserves. 3592 * They get marked to be SIGKILLed if they fault in these 3593 * areas. This is because a future no-page fault on this VMA 3594 * could insert a zeroed page instead of the data existing 3595 * from the time of fork. This would look like data corruption 3596 */ 3597 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 3598 unmap_hugepage_range(iter_vma, address, 3599 address + huge_page_size(h), page); 3600 } 3601 i_mmap_unlock_write(mapping); 3602 } 3603 3604 /* 3605 * Hugetlb_cow() should be called with page lock of the original hugepage held. 3606 * Called with hugetlb_instantiation_mutex held and pte_page locked so we 3607 * cannot race with other handlers or page migration. 3608 * Keep the pte_same checks anyway to make transition from the mutex easier. 3609 */ 3610 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 3611 unsigned long address, pte_t *ptep, 3612 struct page *pagecache_page, spinlock_t *ptl) 3613 { 3614 pte_t pte; 3615 struct hstate *h = hstate_vma(vma); 3616 struct page *old_page, *new_page; 3617 int outside_reserve = 0; 3618 vm_fault_t ret = 0; 3619 unsigned long haddr = address & huge_page_mask(h); 3620 struct mmu_notifier_range range; 3621 3622 pte = huge_ptep_get(ptep); 3623 old_page = pte_page(pte); 3624 3625 retry_avoidcopy: 3626 /* If no-one else is actually using this page, avoid the copy 3627 * and just make the page writable */ 3628 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 3629 page_move_anon_rmap(old_page, vma); 3630 set_huge_ptep_writable(vma, haddr, ptep); 3631 return 0; 3632 } 3633 3634 /* 3635 * If the process that created a MAP_PRIVATE mapping is about to 3636 * perform a COW due to a shared page count, attempt to satisfy 3637 * the allocation without using the existing reserves. The pagecache 3638 * page is used to determine if the reserve at this address was 3639 * consumed or not. If reserves were used, a partial faulted mapping 3640 * at the time of fork() could consume its reserves on COW instead 3641 * of the full address range. 3642 */ 3643 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 3644 old_page != pagecache_page) 3645 outside_reserve = 1; 3646 3647 get_page(old_page); 3648 3649 /* 3650 * Drop page table lock as buddy allocator may be called. It will 3651 * be acquired again before returning to the caller, as expected. 3652 */ 3653 spin_unlock(ptl); 3654 new_page = alloc_huge_page(vma, haddr, outside_reserve); 3655 3656 if (IS_ERR(new_page)) { 3657 /* 3658 * If a process owning a MAP_PRIVATE mapping fails to COW, 3659 * it is due to references held by a child and an insufficient 3660 * huge page pool. To guarantee the original mappers 3661 * reliability, unmap the page from child processes. The child 3662 * may get SIGKILLed if it later faults. 3663 */ 3664 if (outside_reserve) { 3665 put_page(old_page); 3666 BUG_ON(huge_pte_none(pte)); 3667 unmap_ref_private(mm, vma, old_page, haddr); 3668 BUG_ON(huge_pte_none(pte)); 3669 spin_lock(ptl); 3670 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 3671 if (likely(ptep && 3672 pte_same(huge_ptep_get(ptep), pte))) 3673 goto retry_avoidcopy; 3674 /* 3675 * race occurs while re-acquiring page table 3676 * lock, and our job is done. 3677 */ 3678 return 0; 3679 } 3680 3681 ret = vmf_error(PTR_ERR(new_page)); 3682 goto out_release_old; 3683 } 3684 3685 /* 3686 * When the original hugepage is shared one, it does not have 3687 * anon_vma prepared. 3688 */ 3689 if (unlikely(anon_vma_prepare(vma))) { 3690 ret = VM_FAULT_OOM; 3691 goto out_release_all; 3692 } 3693 3694 copy_user_huge_page(new_page, old_page, address, vma, 3695 pages_per_huge_page(h)); 3696 __SetPageUptodate(new_page); 3697 3698 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr, 3699 haddr + huge_page_size(h)); 3700 mmu_notifier_invalidate_range_start(&range); 3701 3702 /* 3703 * Retake the page table lock to check for racing updates 3704 * before the page tables are altered 3705 */ 3706 spin_lock(ptl); 3707 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 3708 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 3709 ClearPagePrivate(new_page); 3710 3711 /* Break COW */ 3712 huge_ptep_clear_flush(vma, haddr, ptep); 3713 mmu_notifier_invalidate_range(mm, range.start, range.end); 3714 set_huge_pte_at(mm, haddr, ptep, 3715 make_huge_pte(vma, new_page, 1)); 3716 page_remove_rmap(old_page, true); 3717 hugepage_add_new_anon_rmap(new_page, vma, haddr); 3718 set_page_huge_active(new_page); 3719 /* Make the old page be freed below */ 3720 new_page = old_page; 3721 } 3722 spin_unlock(ptl); 3723 mmu_notifier_invalidate_range_end(&range); 3724 out_release_all: 3725 restore_reserve_on_error(h, vma, haddr, new_page); 3726 put_page(new_page); 3727 out_release_old: 3728 put_page(old_page); 3729 3730 spin_lock(ptl); /* Caller expects lock to be held */ 3731 return ret; 3732 } 3733 3734 /* Return the pagecache page at a given address within a VMA */ 3735 static struct page *hugetlbfs_pagecache_page(struct hstate *h, 3736 struct vm_area_struct *vma, unsigned long address) 3737 { 3738 struct address_space *mapping; 3739 pgoff_t idx; 3740 3741 mapping = vma->vm_file->f_mapping; 3742 idx = vma_hugecache_offset(h, vma, address); 3743 3744 return find_lock_page(mapping, idx); 3745 } 3746 3747 /* 3748 * Return whether there is a pagecache page to back given address within VMA. 3749 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 3750 */ 3751 static bool hugetlbfs_pagecache_present(struct hstate *h, 3752 struct vm_area_struct *vma, unsigned long address) 3753 { 3754 struct address_space *mapping; 3755 pgoff_t idx; 3756 struct page *page; 3757 3758 mapping = vma->vm_file->f_mapping; 3759 idx = vma_hugecache_offset(h, vma, address); 3760 3761 page = find_get_page(mapping, idx); 3762 if (page) 3763 put_page(page); 3764 return page != NULL; 3765 } 3766 3767 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 3768 pgoff_t idx) 3769 { 3770 struct inode *inode = mapping->host; 3771 struct hstate *h = hstate_inode(inode); 3772 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 3773 3774 if (err) 3775 return err; 3776 ClearPagePrivate(page); 3777 3778 /* 3779 * set page dirty so that it will not be removed from cache/file 3780 * by non-hugetlbfs specific code paths. 3781 */ 3782 set_page_dirty(page); 3783 3784 spin_lock(&inode->i_lock); 3785 inode->i_blocks += blocks_per_huge_page(h); 3786 spin_unlock(&inode->i_lock); 3787 return 0; 3788 } 3789 3790 static vm_fault_t hugetlb_no_page(struct mm_struct *mm, 3791 struct vm_area_struct *vma, 3792 struct address_space *mapping, pgoff_t idx, 3793 unsigned long address, pte_t *ptep, unsigned int flags) 3794 { 3795 struct hstate *h = hstate_vma(vma); 3796 vm_fault_t ret = VM_FAULT_SIGBUS; 3797 int anon_rmap = 0; 3798 unsigned long size; 3799 struct page *page; 3800 pte_t new_pte; 3801 spinlock_t *ptl; 3802 unsigned long haddr = address & huge_page_mask(h); 3803 bool new_page = false; 3804 3805 /* 3806 * Currently, we are forced to kill the process in the event the 3807 * original mapper has unmapped pages from the child due to a failed 3808 * COW. Warn that such a situation has occurred as it may not be obvious 3809 */ 3810 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 3811 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 3812 current->pid); 3813 return ret; 3814 } 3815 3816 /* 3817 * Use page lock to guard against racing truncation 3818 * before we get page_table_lock. 3819 */ 3820 retry: 3821 page = find_lock_page(mapping, idx); 3822 if (!page) { 3823 size = i_size_read(mapping->host) >> huge_page_shift(h); 3824 if (idx >= size) 3825 goto out; 3826 3827 /* 3828 * Check for page in userfault range 3829 */ 3830 if (userfaultfd_missing(vma)) { 3831 u32 hash; 3832 struct vm_fault vmf = { 3833 .vma = vma, 3834 .address = haddr, 3835 .flags = flags, 3836 /* 3837 * Hard to debug if it ends up being 3838 * used by a callee that assumes 3839 * something about the other 3840 * uninitialized fields... same as in 3841 * memory.c 3842 */ 3843 }; 3844 3845 /* 3846 * hugetlb_fault_mutex must be dropped before 3847 * handling userfault. Reacquire after handling 3848 * fault to make calling code simpler. 3849 */ 3850 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); 3851 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 3852 ret = handle_userfault(&vmf, VM_UFFD_MISSING); 3853 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3854 goto out; 3855 } 3856 3857 page = alloc_huge_page(vma, haddr, 0); 3858 if (IS_ERR(page)) { 3859 ret = vmf_error(PTR_ERR(page)); 3860 goto out; 3861 } 3862 clear_huge_page(page, address, pages_per_huge_page(h)); 3863 __SetPageUptodate(page); 3864 new_page = true; 3865 3866 if (vma->vm_flags & VM_MAYSHARE) { 3867 int err = huge_add_to_page_cache(page, mapping, idx); 3868 if (err) { 3869 put_page(page); 3870 if (err == -EEXIST) 3871 goto retry; 3872 goto out; 3873 } 3874 } else { 3875 lock_page(page); 3876 if (unlikely(anon_vma_prepare(vma))) { 3877 ret = VM_FAULT_OOM; 3878 goto backout_unlocked; 3879 } 3880 anon_rmap = 1; 3881 } 3882 } else { 3883 /* 3884 * If memory error occurs between mmap() and fault, some process 3885 * don't have hwpoisoned swap entry for errored virtual address. 3886 * So we need to block hugepage fault by PG_hwpoison bit check. 3887 */ 3888 if (unlikely(PageHWPoison(page))) { 3889 ret = VM_FAULT_HWPOISON | 3890 VM_FAULT_SET_HINDEX(hstate_index(h)); 3891 goto backout_unlocked; 3892 } 3893 } 3894 3895 /* 3896 * If we are going to COW a private mapping later, we examine the 3897 * pending reservations for this page now. This will ensure that 3898 * any allocations necessary to record that reservation occur outside 3899 * the spinlock. 3900 */ 3901 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 3902 if (vma_needs_reservation(h, vma, haddr) < 0) { 3903 ret = VM_FAULT_OOM; 3904 goto backout_unlocked; 3905 } 3906 /* Just decrements count, does not deallocate */ 3907 vma_end_reservation(h, vma, haddr); 3908 } 3909 3910 ptl = huge_pte_lock(h, mm, ptep); 3911 size = i_size_read(mapping->host) >> huge_page_shift(h); 3912 if (idx >= size) 3913 goto backout; 3914 3915 ret = 0; 3916 if (!huge_pte_none(huge_ptep_get(ptep))) 3917 goto backout; 3918 3919 if (anon_rmap) { 3920 ClearPagePrivate(page); 3921 hugepage_add_new_anon_rmap(page, vma, haddr); 3922 } else 3923 page_dup_rmap(page, true); 3924 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 3925 && (vma->vm_flags & VM_SHARED))); 3926 set_huge_pte_at(mm, haddr, ptep, new_pte); 3927 3928 hugetlb_count_add(pages_per_huge_page(h), mm); 3929 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 3930 /* Optimization, do the COW without a second fault */ 3931 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); 3932 } 3933 3934 spin_unlock(ptl); 3935 3936 /* 3937 * Only make newly allocated pages active. Existing pages found 3938 * in the pagecache could be !page_huge_active() if they have been 3939 * isolated for migration. 3940 */ 3941 if (new_page) 3942 set_page_huge_active(page); 3943 3944 unlock_page(page); 3945 out: 3946 return ret; 3947 3948 backout: 3949 spin_unlock(ptl); 3950 backout_unlocked: 3951 unlock_page(page); 3952 restore_reserve_on_error(h, vma, haddr, page); 3953 put_page(page); 3954 goto out; 3955 } 3956 3957 #ifdef CONFIG_SMP 3958 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, 3959 pgoff_t idx, unsigned long address) 3960 { 3961 unsigned long key[2]; 3962 u32 hash; 3963 3964 key[0] = (unsigned long) mapping; 3965 key[1] = idx; 3966 3967 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); 3968 3969 return hash & (num_fault_mutexes - 1); 3970 } 3971 #else 3972 /* 3973 * For uniprocesor systems we always use a single mutex, so just 3974 * return 0 and avoid the hashing overhead. 3975 */ 3976 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, 3977 pgoff_t idx, unsigned long address) 3978 { 3979 return 0; 3980 } 3981 #endif 3982 3983 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 3984 unsigned long address, unsigned int flags) 3985 { 3986 pte_t *ptep, entry; 3987 spinlock_t *ptl; 3988 vm_fault_t ret; 3989 u32 hash; 3990 pgoff_t idx; 3991 struct page *page = NULL; 3992 struct page *pagecache_page = NULL; 3993 struct hstate *h = hstate_vma(vma); 3994 struct address_space *mapping; 3995 int need_wait_lock = 0; 3996 unsigned long haddr = address & huge_page_mask(h); 3997 3998 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 3999 if (ptep) { 4000 entry = huge_ptep_get(ptep); 4001 if (unlikely(is_hugetlb_entry_migration(entry))) { 4002 migration_entry_wait_huge(vma, mm, ptep); 4003 return 0; 4004 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 4005 return VM_FAULT_HWPOISON_LARGE | 4006 VM_FAULT_SET_HINDEX(hstate_index(h)); 4007 } else { 4008 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); 4009 if (!ptep) 4010 return VM_FAULT_OOM; 4011 } 4012 4013 mapping = vma->vm_file->f_mapping; 4014 idx = vma_hugecache_offset(h, vma, haddr); 4015 4016 /* 4017 * Serialize hugepage allocation and instantiation, so that we don't 4018 * get spurious allocation failures if two CPUs race to instantiate 4019 * the same page in the page cache. 4020 */ 4021 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); 4022 mutex_lock(&hugetlb_fault_mutex_table[hash]); 4023 4024 entry = huge_ptep_get(ptep); 4025 if (huge_pte_none(entry)) { 4026 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); 4027 goto out_mutex; 4028 } 4029 4030 ret = 0; 4031 4032 /* 4033 * entry could be a migration/hwpoison entry at this point, so this 4034 * check prevents the kernel from going below assuming that we have 4035 * a active hugepage in pagecache. This goto expects the 2nd page fault, 4036 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly 4037 * handle it. 4038 */ 4039 if (!pte_present(entry)) 4040 goto out_mutex; 4041 4042 /* 4043 * If we are going to COW the mapping later, we examine the pending 4044 * reservations for this page now. This will ensure that any 4045 * allocations necessary to record that reservation occur outside the 4046 * spinlock. For private mappings, we also lookup the pagecache 4047 * page now as it is used to determine if a reservation has been 4048 * consumed. 4049 */ 4050 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 4051 if (vma_needs_reservation(h, vma, haddr) < 0) { 4052 ret = VM_FAULT_OOM; 4053 goto out_mutex; 4054 } 4055 /* Just decrements count, does not deallocate */ 4056 vma_end_reservation(h, vma, haddr); 4057 4058 if (!(vma->vm_flags & VM_MAYSHARE)) 4059 pagecache_page = hugetlbfs_pagecache_page(h, 4060 vma, haddr); 4061 } 4062 4063 ptl = huge_pte_lock(h, mm, ptep); 4064 4065 /* Check for a racing update before calling hugetlb_cow */ 4066 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 4067 goto out_ptl; 4068 4069 /* 4070 * hugetlb_cow() requires page locks of pte_page(entry) and 4071 * pagecache_page, so here we need take the former one 4072 * when page != pagecache_page or !pagecache_page. 4073 */ 4074 page = pte_page(entry); 4075 if (page != pagecache_page) 4076 if (!trylock_page(page)) { 4077 need_wait_lock = 1; 4078 goto out_ptl; 4079 } 4080 4081 get_page(page); 4082 4083 if (flags & FAULT_FLAG_WRITE) { 4084 if (!huge_pte_write(entry)) { 4085 ret = hugetlb_cow(mm, vma, address, ptep, 4086 pagecache_page, ptl); 4087 goto out_put_page; 4088 } 4089 entry = huge_pte_mkdirty(entry); 4090 } 4091 entry = pte_mkyoung(entry); 4092 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 4093 flags & FAULT_FLAG_WRITE)) 4094 update_mmu_cache(vma, haddr, ptep); 4095 out_put_page: 4096 if (page != pagecache_page) 4097 unlock_page(page); 4098 put_page(page); 4099 out_ptl: 4100 spin_unlock(ptl); 4101 4102 if (pagecache_page) { 4103 unlock_page(pagecache_page); 4104 put_page(pagecache_page); 4105 } 4106 out_mutex: 4107 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 4108 /* 4109 * Generally it's safe to hold refcount during waiting page lock. But 4110 * here we just wait to defer the next page fault to avoid busy loop and 4111 * the page is not used after unlocked before returning from the current 4112 * page fault. So we are safe from accessing freed page, even if we wait 4113 * here without taking refcount. 4114 */ 4115 if (need_wait_lock) 4116 wait_on_page_locked(page); 4117 return ret; 4118 } 4119 4120 /* 4121 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with 4122 * modifications for huge pages. 4123 */ 4124 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 4125 pte_t *dst_pte, 4126 struct vm_area_struct *dst_vma, 4127 unsigned long dst_addr, 4128 unsigned long src_addr, 4129 struct page **pagep) 4130 { 4131 struct address_space *mapping; 4132 pgoff_t idx; 4133 unsigned long size; 4134 int vm_shared = dst_vma->vm_flags & VM_SHARED; 4135 struct hstate *h = hstate_vma(dst_vma); 4136 pte_t _dst_pte; 4137 spinlock_t *ptl; 4138 int ret; 4139 struct page *page; 4140 4141 if (!*pagep) { 4142 ret = -ENOMEM; 4143 page = alloc_huge_page(dst_vma, dst_addr, 0); 4144 if (IS_ERR(page)) 4145 goto out; 4146 4147 ret = copy_huge_page_from_user(page, 4148 (const void __user *) src_addr, 4149 pages_per_huge_page(h), false); 4150 4151 /* fallback to copy_from_user outside mmap_sem */ 4152 if (unlikely(ret)) { 4153 ret = -ENOENT; 4154 *pagep = page; 4155 /* don't free the page */ 4156 goto out; 4157 } 4158 } else { 4159 page = *pagep; 4160 *pagep = NULL; 4161 } 4162 4163 /* 4164 * The memory barrier inside __SetPageUptodate makes sure that 4165 * preceding stores to the page contents become visible before 4166 * the set_pte_at() write. 4167 */ 4168 __SetPageUptodate(page); 4169 4170 mapping = dst_vma->vm_file->f_mapping; 4171 idx = vma_hugecache_offset(h, dst_vma, dst_addr); 4172 4173 /* 4174 * If shared, add to page cache 4175 */ 4176 if (vm_shared) { 4177 size = i_size_read(mapping->host) >> huge_page_shift(h); 4178 ret = -EFAULT; 4179 if (idx >= size) 4180 goto out_release_nounlock; 4181 4182 /* 4183 * Serialization between remove_inode_hugepages() and 4184 * huge_add_to_page_cache() below happens through the 4185 * hugetlb_fault_mutex_table that here must be hold by 4186 * the caller. 4187 */ 4188 ret = huge_add_to_page_cache(page, mapping, idx); 4189 if (ret) 4190 goto out_release_nounlock; 4191 } 4192 4193 ptl = huge_pte_lockptr(h, dst_mm, dst_pte); 4194 spin_lock(ptl); 4195 4196 /* 4197 * Recheck the i_size after holding PT lock to make sure not 4198 * to leave any page mapped (as page_mapped()) beyond the end 4199 * of the i_size (remove_inode_hugepages() is strict about 4200 * enforcing that). If we bail out here, we'll also leave a 4201 * page in the radix tree in the vm_shared case beyond the end 4202 * of the i_size, but remove_inode_hugepages() will take care 4203 * of it as soon as we drop the hugetlb_fault_mutex_table. 4204 */ 4205 size = i_size_read(mapping->host) >> huge_page_shift(h); 4206 ret = -EFAULT; 4207 if (idx >= size) 4208 goto out_release_unlock; 4209 4210 ret = -EEXIST; 4211 if (!huge_pte_none(huge_ptep_get(dst_pte))) 4212 goto out_release_unlock; 4213 4214 if (vm_shared) { 4215 page_dup_rmap(page, true); 4216 } else { 4217 ClearPagePrivate(page); 4218 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); 4219 } 4220 4221 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE); 4222 if (dst_vma->vm_flags & VM_WRITE) 4223 _dst_pte = huge_pte_mkdirty(_dst_pte); 4224 _dst_pte = pte_mkyoung(_dst_pte); 4225 4226 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 4227 4228 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte, 4229 dst_vma->vm_flags & VM_WRITE); 4230 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 4231 4232 /* No need to invalidate - it was non-present before */ 4233 update_mmu_cache(dst_vma, dst_addr, dst_pte); 4234 4235 spin_unlock(ptl); 4236 set_page_huge_active(page); 4237 if (vm_shared) 4238 unlock_page(page); 4239 ret = 0; 4240 out: 4241 return ret; 4242 out_release_unlock: 4243 spin_unlock(ptl); 4244 if (vm_shared) 4245 unlock_page(page); 4246 out_release_nounlock: 4247 put_page(page); 4248 goto out; 4249 } 4250 4251 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 4252 struct page **pages, struct vm_area_struct **vmas, 4253 unsigned long *position, unsigned long *nr_pages, 4254 long i, unsigned int flags, int *nonblocking) 4255 { 4256 unsigned long pfn_offset; 4257 unsigned long vaddr = *position; 4258 unsigned long remainder = *nr_pages; 4259 struct hstate *h = hstate_vma(vma); 4260 int err = -EFAULT; 4261 4262 while (vaddr < vma->vm_end && remainder) { 4263 pte_t *pte; 4264 spinlock_t *ptl = NULL; 4265 int absent; 4266 struct page *page; 4267 4268 /* 4269 * If we have a pending SIGKILL, don't keep faulting pages and 4270 * potentially allocating memory. 4271 */ 4272 if (fatal_signal_pending(current)) { 4273 remainder = 0; 4274 break; 4275 } 4276 4277 /* 4278 * Some archs (sparc64, sh*) have multiple pte_ts to 4279 * each hugepage. We have to make sure we get the 4280 * first, for the page indexing below to work. 4281 * 4282 * Note that page table lock is not held when pte is null. 4283 */ 4284 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), 4285 huge_page_size(h)); 4286 if (pte) 4287 ptl = huge_pte_lock(h, mm, pte); 4288 absent = !pte || huge_pte_none(huge_ptep_get(pte)); 4289 4290 /* 4291 * When coredumping, it suits get_dump_page if we just return 4292 * an error where there's an empty slot with no huge pagecache 4293 * to back it. This way, we avoid allocating a hugepage, and 4294 * the sparse dumpfile avoids allocating disk blocks, but its 4295 * huge holes still show up with zeroes where they need to be. 4296 */ 4297 if (absent && (flags & FOLL_DUMP) && 4298 !hugetlbfs_pagecache_present(h, vma, vaddr)) { 4299 if (pte) 4300 spin_unlock(ptl); 4301 remainder = 0; 4302 break; 4303 } 4304 4305 /* 4306 * We need call hugetlb_fault for both hugepages under migration 4307 * (in which case hugetlb_fault waits for the migration,) and 4308 * hwpoisoned hugepages (in which case we need to prevent the 4309 * caller from accessing to them.) In order to do this, we use 4310 * here is_swap_pte instead of is_hugetlb_entry_migration and 4311 * is_hugetlb_entry_hwpoisoned. This is because it simply covers 4312 * both cases, and because we can't follow correct pages 4313 * directly from any kind of swap entries. 4314 */ 4315 if (absent || is_swap_pte(huge_ptep_get(pte)) || 4316 ((flags & FOLL_WRITE) && 4317 !huge_pte_write(huge_ptep_get(pte)))) { 4318 vm_fault_t ret; 4319 unsigned int fault_flags = 0; 4320 4321 if (pte) 4322 spin_unlock(ptl); 4323 if (flags & FOLL_WRITE) 4324 fault_flags |= FAULT_FLAG_WRITE; 4325 if (nonblocking) 4326 fault_flags |= FAULT_FLAG_ALLOW_RETRY; 4327 if (flags & FOLL_NOWAIT) 4328 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 4329 FAULT_FLAG_RETRY_NOWAIT; 4330 if (flags & FOLL_TRIED) { 4331 VM_WARN_ON_ONCE(fault_flags & 4332 FAULT_FLAG_ALLOW_RETRY); 4333 fault_flags |= FAULT_FLAG_TRIED; 4334 } 4335 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); 4336 if (ret & VM_FAULT_ERROR) { 4337 err = vm_fault_to_errno(ret, flags); 4338 remainder = 0; 4339 break; 4340 } 4341 if (ret & VM_FAULT_RETRY) { 4342 if (nonblocking && 4343 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 4344 *nonblocking = 0; 4345 *nr_pages = 0; 4346 /* 4347 * VM_FAULT_RETRY must not return an 4348 * error, it will return zero 4349 * instead. 4350 * 4351 * No need to update "position" as the 4352 * caller will not check it after 4353 * *nr_pages is set to 0. 4354 */ 4355 return i; 4356 } 4357 continue; 4358 } 4359 4360 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 4361 page = pte_page(huge_ptep_get(pte)); 4362 4363 /* 4364 * Instead of doing 'try_get_page()' below in the same_page 4365 * loop, just check the count once here. 4366 */ 4367 if (unlikely(page_count(page) <= 0)) { 4368 if (pages) { 4369 spin_unlock(ptl); 4370 remainder = 0; 4371 err = -ENOMEM; 4372 break; 4373 } 4374 } 4375 same_page: 4376 if (pages) { 4377 pages[i] = mem_map_offset(page, pfn_offset); 4378 get_page(pages[i]); 4379 } 4380 4381 if (vmas) 4382 vmas[i] = vma; 4383 4384 vaddr += PAGE_SIZE; 4385 ++pfn_offset; 4386 --remainder; 4387 ++i; 4388 if (vaddr < vma->vm_end && remainder && 4389 pfn_offset < pages_per_huge_page(h)) { 4390 /* 4391 * We use pfn_offset to avoid touching the pageframes 4392 * of this compound page. 4393 */ 4394 goto same_page; 4395 } 4396 spin_unlock(ptl); 4397 } 4398 *nr_pages = remainder; 4399 /* 4400 * setting position is actually required only if remainder is 4401 * not zero but it's faster not to add a "if (remainder)" 4402 * branch. 4403 */ 4404 *position = vaddr; 4405 4406 return i ? i : err; 4407 } 4408 4409 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 4410 /* 4411 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can 4412 * implement this. 4413 */ 4414 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 4415 #endif 4416 4417 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 4418 unsigned long address, unsigned long end, pgprot_t newprot) 4419 { 4420 struct mm_struct *mm = vma->vm_mm; 4421 unsigned long start = address; 4422 pte_t *ptep; 4423 pte_t pte; 4424 struct hstate *h = hstate_vma(vma); 4425 unsigned long pages = 0; 4426 bool shared_pmd = false; 4427 struct mmu_notifier_range range; 4428 4429 /* 4430 * In the case of shared PMDs, the area to flush could be beyond 4431 * start/end. Set range.start/range.end to cover the maximum possible 4432 * range if PMD sharing is possible. 4433 */ 4434 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 4435 0, vma, mm, start, end); 4436 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 4437 4438 BUG_ON(address >= end); 4439 flush_cache_range(vma, range.start, range.end); 4440 4441 mmu_notifier_invalidate_range_start(&range); 4442 i_mmap_lock_write(vma->vm_file->f_mapping); 4443 for (; address < end; address += huge_page_size(h)) { 4444 spinlock_t *ptl; 4445 ptep = huge_pte_offset(mm, address, huge_page_size(h)); 4446 if (!ptep) 4447 continue; 4448 ptl = huge_pte_lock(h, mm, ptep); 4449 if (huge_pmd_unshare(mm, &address, ptep)) { 4450 pages++; 4451 spin_unlock(ptl); 4452 shared_pmd = true; 4453 continue; 4454 } 4455 pte = huge_ptep_get(ptep); 4456 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 4457 spin_unlock(ptl); 4458 continue; 4459 } 4460 if (unlikely(is_hugetlb_entry_migration(pte))) { 4461 swp_entry_t entry = pte_to_swp_entry(pte); 4462 4463 if (is_write_migration_entry(entry)) { 4464 pte_t newpte; 4465 4466 make_migration_entry_read(&entry); 4467 newpte = swp_entry_to_pte(entry); 4468 set_huge_swap_pte_at(mm, address, ptep, 4469 newpte, huge_page_size(h)); 4470 pages++; 4471 } 4472 spin_unlock(ptl); 4473 continue; 4474 } 4475 if (!huge_pte_none(pte)) { 4476 pte_t old_pte; 4477 4478 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 4479 pte = pte_mkhuge(huge_pte_modify(old_pte, newprot)); 4480 pte = arch_make_huge_pte(pte, vma, NULL, 0); 4481 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 4482 pages++; 4483 } 4484 spin_unlock(ptl); 4485 } 4486 /* 4487 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 4488 * may have cleared our pud entry and done put_page on the page table: 4489 * once we release i_mmap_rwsem, another task can do the final put_page 4490 * and that page table be reused and filled with junk. If we actually 4491 * did unshare a page of pmds, flush the range corresponding to the pud. 4492 */ 4493 if (shared_pmd) 4494 flush_hugetlb_tlb_range(vma, range.start, range.end); 4495 else 4496 flush_hugetlb_tlb_range(vma, start, end); 4497 /* 4498 * No need to call mmu_notifier_invalidate_range() we are downgrading 4499 * page table protection not changing it to point to a new page. 4500 * 4501 * See Documentation/vm/mmu_notifier.rst 4502 */ 4503 i_mmap_unlock_write(vma->vm_file->f_mapping); 4504 mmu_notifier_invalidate_range_end(&range); 4505 4506 return pages << h->order; 4507 } 4508 4509 int hugetlb_reserve_pages(struct inode *inode, 4510 long from, long to, 4511 struct vm_area_struct *vma, 4512 vm_flags_t vm_flags) 4513 { 4514 long ret, chg; 4515 struct hstate *h = hstate_inode(inode); 4516 struct hugepage_subpool *spool = subpool_inode(inode); 4517 struct resv_map *resv_map; 4518 long gbl_reserve; 4519 4520 /* This should never happen */ 4521 if (from > to) { 4522 VM_WARN(1, "%s called with a negative range\n", __func__); 4523 return -EINVAL; 4524 } 4525 4526 /* 4527 * Only apply hugepage reservation if asked. At fault time, an 4528 * attempt will be made for VM_NORESERVE to allocate a page 4529 * without using reserves 4530 */ 4531 if (vm_flags & VM_NORESERVE) 4532 return 0; 4533 4534 /* 4535 * Shared mappings base their reservation on the number of pages that 4536 * are already allocated on behalf of the file. Private mappings need 4537 * to reserve the full area even if read-only as mprotect() may be 4538 * called to make the mapping read-write. Assume !vma is a shm mapping 4539 */ 4540 if (!vma || vma->vm_flags & VM_MAYSHARE) { 4541 /* 4542 * resv_map can not be NULL as hugetlb_reserve_pages is only 4543 * called for inodes for which resv_maps were created (see 4544 * hugetlbfs_get_inode). 4545 */ 4546 resv_map = inode_resv_map(inode); 4547 4548 chg = region_chg(resv_map, from, to); 4549 4550 } else { 4551 resv_map = resv_map_alloc(); 4552 if (!resv_map) 4553 return -ENOMEM; 4554 4555 chg = to - from; 4556 4557 set_vma_resv_map(vma, resv_map); 4558 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 4559 } 4560 4561 if (chg < 0) { 4562 ret = chg; 4563 goto out_err; 4564 } 4565 4566 /* 4567 * There must be enough pages in the subpool for the mapping. If 4568 * the subpool has a minimum size, there may be some global 4569 * reservations already in place (gbl_reserve). 4570 */ 4571 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 4572 if (gbl_reserve < 0) { 4573 ret = -ENOSPC; 4574 goto out_err; 4575 } 4576 4577 /* 4578 * Check enough hugepages are available for the reservation. 4579 * Hand the pages back to the subpool if there are not 4580 */ 4581 ret = hugetlb_acct_memory(h, gbl_reserve); 4582 if (ret < 0) { 4583 /* put back original number of pages, chg */ 4584 (void)hugepage_subpool_put_pages(spool, chg); 4585 goto out_err; 4586 } 4587 4588 /* 4589 * Account for the reservations made. Shared mappings record regions 4590 * that have reservations as they are shared by multiple VMAs. 4591 * When the last VMA disappears, the region map says how much 4592 * the reservation was and the page cache tells how much of 4593 * the reservation was consumed. Private mappings are per-VMA and 4594 * only the consumed reservations are tracked. When the VMA 4595 * disappears, the original reservation is the VMA size and the 4596 * consumed reservations are stored in the map. Hence, nothing 4597 * else has to be done for private mappings here 4598 */ 4599 if (!vma || vma->vm_flags & VM_MAYSHARE) { 4600 long add = region_add(resv_map, from, to); 4601 4602 if (unlikely(chg > add)) { 4603 /* 4604 * pages in this range were added to the reserve 4605 * map between region_chg and region_add. This 4606 * indicates a race with alloc_huge_page. Adjust 4607 * the subpool and reserve counts modified above 4608 * based on the difference. 4609 */ 4610 long rsv_adjust; 4611 4612 rsv_adjust = hugepage_subpool_put_pages(spool, 4613 chg - add); 4614 hugetlb_acct_memory(h, -rsv_adjust); 4615 } 4616 } 4617 return 0; 4618 out_err: 4619 if (!vma || vma->vm_flags & VM_MAYSHARE) 4620 /* Don't call region_abort if region_chg failed */ 4621 if (chg >= 0) 4622 region_abort(resv_map, from, to); 4623 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4624 kref_put(&resv_map->refs, resv_map_release); 4625 return ret; 4626 } 4627 4628 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 4629 long freed) 4630 { 4631 struct hstate *h = hstate_inode(inode); 4632 struct resv_map *resv_map = inode_resv_map(inode); 4633 long chg = 0; 4634 struct hugepage_subpool *spool = subpool_inode(inode); 4635 long gbl_reserve; 4636 4637 /* 4638 * Since this routine can be called in the evict inode path for all 4639 * hugetlbfs inodes, resv_map could be NULL. 4640 */ 4641 if (resv_map) { 4642 chg = region_del(resv_map, start, end); 4643 /* 4644 * region_del() can fail in the rare case where a region 4645 * must be split and another region descriptor can not be 4646 * allocated. If end == LONG_MAX, it will not fail. 4647 */ 4648 if (chg < 0) 4649 return chg; 4650 } 4651 4652 spin_lock(&inode->i_lock); 4653 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 4654 spin_unlock(&inode->i_lock); 4655 4656 /* 4657 * If the subpool has a minimum size, the number of global 4658 * reservations to be released may be adjusted. 4659 */ 4660 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 4661 hugetlb_acct_memory(h, -gbl_reserve); 4662 4663 return 0; 4664 } 4665 4666 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 4667 static unsigned long page_table_shareable(struct vm_area_struct *svma, 4668 struct vm_area_struct *vma, 4669 unsigned long addr, pgoff_t idx) 4670 { 4671 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 4672 svma->vm_start; 4673 unsigned long sbase = saddr & PUD_MASK; 4674 unsigned long s_end = sbase + PUD_SIZE; 4675 4676 /* Allow segments to share if only one is marked locked */ 4677 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; 4678 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; 4679 4680 /* 4681 * match the virtual addresses, permission and the alignment of the 4682 * page table page. 4683 */ 4684 if (pmd_index(addr) != pmd_index(saddr) || 4685 vm_flags != svm_flags || 4686 sbase < svma->vm_start || svma->vm_end < s_end) 4687 return 0; 4688 4689 return saddr; 4690 } 4691 4692 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) 4693 { 4694 unsigned long base = addr & PUD_MASK; 4695 unsigned long end = base + PUD_SIZE; 4696 4697 /* 4698 * check on proper vm_flags and page table alignment 4699 */ 4700 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) 4701 return true; 4702 return false; 4703 } 4704 4705 /* 4706 * Determine if start,end range within vma could be mapped by shared pmd. 4707 * If yes, adjust start and end to cover range associated with possible 4708 * shared pmd mappings. 4709 */ 4710 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 4711 unsigned long *start, unsigned long *end) 4712 { 4713 unsigned long check_addr = *start; 4714 4715 if (!(vma->vm_flags & VM_MAYSHARE)) 4716 return; 4717 4718 for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) { 4719 unsigned long a_start = check_addr & PUD_MASK; 4720 unsigned long a_end = a_start + PUD_SIZE; 4721 4722 /* 4723 * If sharing is possible, adjust start/end if necessary. 4724 */ 4725 if (range_in_vma(vma, a_start, a_end)) { 4726 if (a_start < *start) 4727 *start = a_start; 4728 if (a_end > *end) 4729 *end = a_end; 4730 } 4731 } 4732 } 4733 4734 /* 4735 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 4736 * and returns the corresponding pte. While this is not necessary for the 4737 * !shared pmd case because we can allocate the pmd later as well, it makes the 4738 * code much cleaner. pmd allocation is essential for the shared case because 4739 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 4740 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 4741 * bad pmd for sharing. 4742 */ 4743 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 4744 { 4745 struct vm_area_struct *vma = find_vma(mm, addr); 4746 struct address_space *mapping = vma->vm_file->f_mapping; 4747 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 4748 vma->vm_pgoff; 4749 struct vm_area_struct *svma; 4750 unsigned long saddr; 4751 pte_t *spte = NULL; 4752 pte_t *pte; 4753 spinlock_t *ptl; 4754 4755 if (!vma_shareable(vma, addr)) 4756 return (pte_t *)pmd_alloc(mm, pud, addr); 4757 4758 i_mmap_lock_write(mapping); 4759 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 4760 if (svma == vma) 4761 continue; 4762 4763 saddr = page_table_shareable(svma, vma, addr, idx); 4764 if (saddr) { 4765 spte = huge_pte_offset(svma->vm_mm, saddr, 4766 vma_mmu_pagesize(svma)); 4767 if (spte) { 4768 get_page(virt_to_page(spte)); 4769 break; 4770 } 4771 } 4772 } 4773 4774 if (!spte) 4775 goto out; 4776 4777 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); 4778 if (pud_none(*pud)) { 4779 pud_populate(mm, pud, 4780 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 4781 mm_inc_nr_pmds(mm); 4782 } else { 4783 put_page(virt_to_page(spte)); 4784 } 4785 spin_unlock(ptl); 4786 out: 4787 pte = (pte_t *)pmd_alloc(mm, pud, addr); 4788 i_mmap_unlock_write(mapping); 4789 return pte; 4790 } 4791 4792 /* 4793 * unmap huge page backed by shared pte. 4794 * 4795 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 4796 * indicated by page_count > 1, unmap is achieved by clearing pud and 4797 * decrementing the ref count. If count == 1, the pte page is not shared. 4798 * 4799 * called with page table lock held. 4800 * 4801 * returns: 1 successfully unmapped a shared pte page 4802 * 0 the underlying pte page is not shared, or it is the last user 4803 */ 4804 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 4805 { 4806 pgd_t *pgd = pgd_offset(mm, *addr); 4807 p4d_t *p4d = p4d_offset(pgd, *addr); 4808 pud_t *pud = pud_offset(p4d, *addr); 4809 4810 BUG_ON(page_count(virt_to_page(ptep)) == 0); 4811 if (page_count(virt_to_page(ptep)) == 1) 4812 return 0; 4813 4814 pud_clear(pud); 4815 put_page(virt_to_page(ptep)); 4816 mm_dec_nr_pmds(mm); 4817 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 4818 return 1; 4819 } 4820 #define want_pmd_share() (1) 4821 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 4822 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 4823 { 4824 return NULL; 4825 } 4826 4827 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 4828 { 4829 return 0; 4830 } 4831 4832 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 4833 unsigned long *start, unsigned long *end) 4834 { 4835 } 4836 #define want_pmd_share() (0) 4837 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 4838 4839 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 4840 pte_t *huge_pte_alloc(struct mm_struct *mm, 4841 unsigned long addr, unsigned long sz) 4842 { 4843 pgd_t *pgd; 4844 p4d_t *p4d; 4845 pud_t *pud; 4846 pte_t *pte = NULL; 4847 4848 pgd = pgd_offset(mm, addr); 4849 p4d = p4d_alloc(mm, pgd, addr); 4850 if (!p4d) 4851 return NULL; 4852 pud = pud_alloc(mm, p4d, addr); 4853 if (pud) { 4854 if (sz == PUD_SIZE) { 4855 pte = (pte_t *)pud; 4856 } else { 4857 BUG_ON(sz != PMD_SIZE); 4858 if (want_pmd_share() && pud_none(*pud)) 4859 pte = huge_pmd_share(mm, addr, pud); 4860 else 4861 pte = (pte_t *)pmd_alloc(mm, pud, addr); 4862 } 4863 } 4864 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); 4865 4866 return pte; 4867 } 4868 4869 /* 4870 * huge_pte_offset() - Walk the page table to resolve the hugepage 4871 * entry at address @addr 4872 * 4873 * Return: Pointer to page table or swap entry (PUD or PMD) for 4874 * address @addr, or NULL if a p*d_none() entry is encountered and the 4875 * size @sz doesn't match the hugepage size at this level of the page 4876 * table. 4877 */ 4878 pte_t *huge_pte_offset(struct mm_struct *mm, 4879 unsigned long addr, unsigned long sz) 4880 { 4881 pgd_t *pgd; 4882 p4d_t *p4d; 4883 pud_t *pud; 4884 pmd_t *pmd; 4885 4886 pgd = pgd_offset(mm, addr); 4887 if (!pgd_present(*pgd)) 4888 return NULL; 4889 p4d = p4d_offset(pgd, addr); 4890 if (!p4d_present(*p4d)) 4891 return NULL; 4892 4893 pud = pud_offset(p4d, addr); 4894 if (sz != PUD_SIZE && pud_none(*pud)) 4895 return NULL; 4896 /* hugepage or swap? */ 4897 if (pud_huge(*pud) || !pud_present(*pud)) 4898 return (pte_t *)pud; 4899 4900 pmd = pmd_offset(pud, addr); 4901 if (sz != PMD_SIZE && pmd_none(*pmd)) 4902 return NULL; 4903 /* hugepage or swap? */ 4904 if (pmd_huge(*pmd) || !pmd_present(*pmd)) 4905 return (pte_t *)pmd; 4906 4907 return NULL; 4908 } 4909 4910 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 4911 4912 /* 4913 * These functions are overwritable if your architecture needs its own 4914 * behavior. 4915 */ 4916 struct page * __weak 4917 follow_huge_addr(struct mm_struct *mm, unsigned long address, 4918 int write) 4919 { 4920 return ERR_PTR(-EINVAL); 4921 } 4922 4923 struct page * __weak 4924 follow_huge_pd(struct vm_area_struct *vma, 4925 unsigned long address, hugepd_t hpd, int flags, int pdshift) 4926 { 4927 WARN(1, "hugepd follow called with no support for hugepage directory format\n"); 4928 return NULL; 4929 } 4930 4931 struct page * __weak 4932 follow_huge_pmd(struct mm_struct *mm, unsigned long address, 4933 pmd_t *pmd, int flags) 4934 { 4935 struct page *page = NULL; 4936 spinlock_t *ptl; 4937 pte_t pte; 4938 retry: 4939 ptl = pmd_lockptr(mm, pmd); 4940 spin_lock(ptl); 4941 /* 4942 * make sure that the address range covered by this pmd is not 4943 * unmapped from other threads. 4944 */ 4945 if (!pmd_huge(*pmd)) 4946 goto out; 4947 pte = huge_ptep_get((pte_t *)pmd); 4948 if (pte_present(pte)) { 4949 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 4950 if (flags & FOLL_GET) 4951 get_page(page); 4952 } else { 4953 if (is_hugetlb_entry_migration(pte)) { 4954 spin_unlock(ptl); 4955 __migration_entry_wait(mm, (pte_t *)pmd, ptl); 4956 goto retry; 4957 } 4958 /* 4959 * hwpoisoned entry is treated as no_page_table in 4960 * follow_page_mask(). 4961 */ 4962 } 4963 out: 4964 spin_unlock(ptl); 4965 return page; 4966 } 4967 4968 struct page * __weak 4969 follow_huge_pud(struct mm_struct *mm, unsigned long address, 4970 pud_t *pud, int flags) 4971 { 4972 if (flags & FOLL_GET) 4973 return NULL; 4974 4975 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); 4976 } 4977 4978 struct page * __weak 4979 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags) 4980 { 4981 if (flags & FOLL_GET) 4982 return NULL; 4983 4984 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); 4985 } 4986 4987 bool isolate_huge_page(struct page *page, struct list_head *list) 4988 { 4989 bool ret = true; 4990 4991 VM_BUG_ON_PAGE(!PageHead(page), page); 4992 spin_lock(&hugetlb_lock); 4993 if (!page_huge_active(page) || !get_page_unless_zero(page)) { 4994 ret = false; 4995 goto unlock; 4996 } 4997 clear_page_huge_active(page); 4998 list_move_tail(&page->lru, list); 4999 unlock: 5000 spin_unlock(&hugetlb_lock); 5001 return ret; 5002 } 5003 5004 void putback_active_hugepage(struct page *page) 5005 { 5006 VM_BUG_ON_PAGE(!PageHead(page), page); 5007 spin_lock(&hugetlb_lock); 5008 set_page_huge_active(page); 5009 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 5010 spin_unlock(&hugetlb_lock); 5011 put_page(page); 5012 } 5013 5014 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) 5015 { 5016 struct hstate *h = page_hstate(oldpage); 5017 5018 hugetlb_cgroup_migrate(oldpage, newpage); 5019 set_page_owner_migrate_reason(newpage, reason); 5020 5021 /* 5022 * transfer temporary state of the new huge page. This is 5023 * reverse to other transitions because the newpage is going to 5024 * be final while the old one will be freed so it takes over 5025 * the temporary status. 5026 * 5027 * Also note that we have to transfer the per-node surplus state 5028 * here as well otherwise the global surplus count will not match 5029 * the per-node's. 5030 */ 5031 if (PageHugeTemporary(newpage)) { 5032 int old_nid = page_to_nid(oldpage); 5033 int new_nid = page_to_nid(newpage); 5034 5035 SetPageHugeTemporary(oldpage); 5036 ClearPageHugeTemporary(newpage); 5037 5038 spin_lock(&hugetlb_lock); 5039 if (h->surplus_huge_pages_node[old_nid]) { 5040 h->surplus_huge_pages_node[old_nid]--; 5041 h->surplus_huge_pages_node[new_nid]++; 5042 } 5043 spin_unlock(&hugetlb_lock); 5044 } 5045 } 5046