1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/mmdebug.h> 24 #include <linux/sched/signal.h> 25 #include <linux/rmap.h> 26 #include <linux/string_helpers.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/jhash.h> 30 #include <linux/numa.h> 31 #include <linux/llist.h> 32 #include <linux/cma.h> 33 #include <linux/migrate.h> 34 #include <linux/nospec.h> 35 #include <linux/delayacct.h> 36 #include <linux/memory.h> 37 #include <linux/mm_inline.h> 38 39 #include <asm/page.h> 40 #include <asm/pgalloc.h> 41 #include <asm/tlb.h> 42 43 #include <linux/io.h> 44 #include <linux/hugetlb.h> 45 #include <linux/hugetlb_cgroup.h> 46 #include <linux/node.h> 47 #include <linux/page_owner.h> 48 #include "internal.h" 49 #include "hugetlb_vmemmap.h" 50 51 int hugetlb_max_hstate __read_mostly; 52 unsigned int default_hstate_idx; 53 struct hstate hstates[HUGE_MAX_HSTATE]; 54 55 #ifdef CONFIG_CMA 56 static struct cma *hugetlb_cma[MAX_NUMNODES]; 57 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; 58 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) 59 { 60 return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page, 61 1 << order); 62 } 63 #else 64 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) 65 { 66 return false; 67 } 68 #endif 69 static unsigned long hugetlb_cma_size __initdata; 70 71 __initdata LIST_HEAD(huge_boot_pages); 72 73 /* for command line parsing */ 74 static struct hstate * __initdata parsed_hstate; 75 static unsigned long __initdata default_hstate_max_huge_pages; 76 static bool __initdata parsed_valid_hugepagesz = true; 77 static bool __initdata parsed_default_hugepagesz; 78 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 79 80 /* 81 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 82 * free_huge_pages, and surplus_huge_pages. 83 */ 84 DEFINE_SPINLOCK(hugetlb_lock); 85 86 /* 87 * Serializes faults on the same logical page. This is used to 88 * prevent spurious OOMs when the hugepage pool is fully utilized. 89 */ 90 static int num_fault_mutexes; 91 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; 92 93 /* Forward declaration */ 94 static int hugetlb_acct_memory(struct hstate *h, long delta); 95 static void hugetlb_vma_lock_free(struct vm_area_struct *vma); 96 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 97 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); 98 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 99 unsigned long start, unsigned long end); 100 101 static inline bool subpool_is_free(struct hugepage_subpool *spool) 102 { 103 if (spool->count) 104 return false; 105 if (spool->max_hpages != -1) 106 return spool->used_hpages == 0; 107 if (spool->min_hpages != -1) 108 return spool->rsv_hpages == spool->min_hpages; 109 110 return true; 111 } 112 113 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 114 unsigned long irq_flags) 115 { 116 spin_unlock_irqrestore(&spool->lock, irq_flags); 117 118 /* If no pages are used, and no other handles to the subpool 119 * remain, give up any reservations based on minimum size and 120 * free the subpool */ 121 if (subpool_is_free(spool)) { 122 if (spool->min_hpages != -1) 123 hugetlb_acct_memory(spool->hstate, 124 -spool->min_hpages); 125 kfree(spool); 126 } 127 } 128 129 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 130 long min_hpages) 131 { 132 struct hugepage_subpool *spool; 133 134 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 135 if (!spool) 136 return NULL; 137 138 spin_lock_init(&spool->lock); 139 spool->count = 1; 140 spool->max_hpages = max_hpages; 141 spool->hstate = h; 142 spool->min_hpages = min_hpages; 143 144 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 145 kfree(spool); 146 return NULL; 147 } 148 spool->rsv_hpages = min_hpages; 149 150 return spool; 151 } 152 153 void hugepage_put_subpool(struct hugepage_subpool *spool) 154 { 155 unsigned long flags; 156 157 spin_lock_irqsave(&spool->lock, flags); 158 BUG_ON(!spool->count); 159 spool->count--; 160 unlock_or_release_subpool(spool, flags); 161 } 162 163 /* 164 * Subpool accounting for allocating and reserving pages. 165 * Return -ENOMEM if there are not enough resources to satisfy the 166 * request. Otherwise, return the number of pages by which the 167 * global pools must be adjusted (upward). The returned value may 168 * only be different than the passed value (delta) in the case where 169 * a subpool minimum size must be maintained. 170 */ 171 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 172 long delta) 173 { 174 long ret = delta; 175 176 if (!spool) 177 return ret; 178 179 spin_lock_irq(&spool->lock); 180 181 if (spool->max_hpages != -1) { /* maximum size accounting */ 182 if ((spool->used_hpages + delta) <= spool->max_hpages) 183 spool->used_hpages += delta; 184 else { 185 ret = -ENOMEM; 186 goto unlock_ret; 187 } 188 } 189 190 /* minimum size accounting */ 191 if (spool->min_hpages != -1 && spool->rsv_hpages) { 192 if (delta > spool->rsv_hpages) { 193 /* 194 * Asking for more reserves than those already taken on 195 * behalf of subpool. Return difference. 196 */ 197 ret = delta - spool->rsv_hpages; 198 spool->rsv_hpages = 0; 199 } else { 200 ret = 0; /* reserves already accounted for */ 201 spool->rsv_hpages -= delta; 202 } 203 } 204 205 unlock_ret: 206 spin_unlock_irq(&spool->lock); 207 return ret; 208 } 209 210 /* 211 * Subpool accounting for freeing and unreserving pages. 212 * Return the number of global page reservations that must be dropped. 213 * The return value may only be different than the passed value (delta) 214 * in the case where a subpool minimum size must be maintained. 215 */ 216 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 217 long delta) 218 { 219 long ret = delta; 220 unsigned long flags; 221 222 if (!spool) 223 return delta; 224 225 spin_lock_irqsave(&spool->lock, flags); 226 227 if (spool->max_hpages != -1) /* maximum size accounting */ 228 spool->used_hpages -= delta; 229 230 /* minimum size accounting */ 231 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 232 if (spool->rsv_hpages + delta <= spool->min_hpages) 233 ret = 0; 234 else 235 ret = spool->rsv_hpages + delta - spool->min_hpages; 236 237 spool->rsv_hpages += delta; 238 if (spool->rsv_hpages > spool->min_hpages) 239 spool->rsv_hpages = spool->min_hpages; 240 } 241 242 /* 243 * If hugetlbfs_put_super couldn't free spool due to an outstanding 244 * quota reference, free it now. 245 */ 246 unlock_or_release_subpool(spool, flags); 247 248 return ret; 249 } 250 251 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 252 { 253 return HUGETLBFS_SB(inode->i_sb)->spool; 254 } 255 256 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 257 { 258 return subpool_inode(file_inode(vma->vm_file)); 259 } 260 261 /* 262 * hugetlb vma_lock helper routines 263 */ 264 void hugetlb_vma_lock_read(struct vm_area_struct *vma) 265 { 266 if (__vma_shareable_lock(vma)) { 267 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 268 269 down_read(&vma_lock->rw_sema); 270 } 271 } 272 273 void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 274 { 275 if (__vma_shareable_lock(vma)) { 276 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 277 278 up_read(&vma_lock->rw_sema); 279 } 280 } 281 282 void hugetlb_vma_lock_write(struct vm_area_struct *vma) 283 { 284 if (__vma_shareable_lock(vma)) { 285 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 286 287 down_write(&vma_lock->rw_sema); 288 } 289 } 290 291 void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 292 { 293 if (__vma_shareable_lock(vma)) { 294 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 295 296 up_write(&vma_lock->rw_sema); 297 } 298 } 299 300 int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 301 { 302 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 303 304 if (!__vma_shareable_lock(vma)) 305 return 1; 306 307 return down_write_trylock(&vma_lock->rw_sema); 308 } 309 310 void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 311 { 312 if (__vma_shareable_lock(vma)) { 313 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 314 315 lockdep_assert_held(&vma_lock->rw_sema); 316 } 317 } 318 319 void hugetlb_vma_lock_release(struct kref *kref) 320 { 321 struct hugetlb_vma_lock *vma_lock = container_of(kref, 322 struct hugetlb_vma_lock, refs); 323 324 kfree(vma_lock); 325 } 326 327 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 328 { 329 struct vm_area_struct *vma = vma_lock->vma; 330 331 /* 332 * vma_lock structure may or not be released as a result of put, 333 * it certainly will no longer be attached to vma so clear pointer. 334 * Semaphore synchronizes access to vma_lock->vma field. 335 */ 336 vma_lock->vma = NULL; 337 vma->vm_private_data = NULL; 338 up_write(&vma_lock->rw_sema); 339 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 340 } 341 342 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) 343 { 344 if (__vma_shareable_lock(vma)) { 345 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 346 347 __hugetlb_vma_unlock_write_put(vma_lock); 348 } 349 } 350 351 static void hugetlb_vma_lock_free(struct vm_area_struct *vma) 352 { 353 /* 354 * Only present in sharable vmas. 355 */ 356 if (!vma || !__vma_shareable_lock(vma)) 357 return; 358 359 if (vma->vm_private_data) { 360 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 361 362 down_write(&vma_lock->rw_sema); 363 __hugetlb_vma_unlock_write_put(vma_lock); 364 } 365 } 366 367 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) 368 { 369 struct hugetlb_vma_lock *vma_lock; 370 371 /* Only establish in (flags) sharable vmas */ 372 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) 373 return; 374 375 /* Should never get here with non-NULL vm_private_data */ 376 if (vma->vm_private_data) 377 return; 378 379 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); 380 if (!vma_lock) { 381 /* 382 * If we can not allocate structure, then vma can not 383 * participate in pmd sharing. This is only a possible 384 * performance enhancement and memory saving issue. 385 * However, the lock is also used to synchronize page 386 * faults with truncation. If the lock is not present, 387 * unlikely races could leave pages in a file past i_size 388 * until the file is removed. Warn in the unlikely case of 389 * allocation failure. 390 */ 391 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); 392 return; 393 } 394 395 kref_init(&vma_lock->refs); 396 init_rwsem(&vma_lock->rw_sema); 397 vma_lock->vma = vma; 398 vma->vm_private_data = vma_lock; 399 } 400 401 /* Helper that removes a struct file_region from the resv_map cache and returns 402 * it for use. 403 */ 404 static struct file_region * 405 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 406 { 407 struct file_region *nrg; 408 409 VM_BUG_ON(resv->region_cache_count <= 0); 410 411 resv->region_cache_count--; 412 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 413 list_del(&nrg->link); 414 415 nrg->from = from; 416 nrg->to = to; 417 418 return nrg; 419 } 420 421 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 422 struct file_region *rg) 423 { 424 #ifdef CONFIG_CGROUP_HUGETLB 425 nrg->reservation_counter = rg->reservation_counter; 426 nrg->css = rg->css; 427 if (rg->css) 428 css_get(rg->css); 429 #endif 430 } 431 432 /* Helper that records hugetlb_cgroup uncharge info. */ 433 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 434 struct hstate *h, 435 struct resv_map *resv, 436 struct file_region *nrg) 437 { 438 #ifdef CONFIG_CGROUP_HUGETLB 439 if (h_cg) { 440 nrg->reservation_counter = 441 &h_cg->rsvd_hugepage[hstate_index(h)]; 442 nrg->css = &h_cg->css; 443 /* 444 * The caller will hold exactly one h_cg->css reference for the 445 * whole contiguous reservation region. But this area might be 446 * scattered when there are already some file_regions reside in 447 * it. As a result, many file_regions may share only one css 448 * reference. In order to ensure that one file_region must hold 449 * exactly one h_cg->css reference, we should do css_get for 450 * each file_region and leave the reference held by caller 451 * untouched. 452 */ 453 css_get(&h_cg->css); 454 if (!resv->pages_per_hpage) 455 resv->pages_per_hpage = pages_per_huge_page(h); 456 /* pages_per_hpage should be the same for all entries in 457 * a resv_map. 458 */ 459 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 460 } else { 461 nrg->reservation_counter = NULL; 462 nrg->css = NULL; 463 } 464 #endif 465 } 466 467 static void put_uncharge_info(struct file_region *rg) 468 { 469 #ifdef CONFIG_CGROUP_HUGETLB 470 if (rg->css) 471 css_put(rg->css); 472 #endif 473 } 474 475 static bool has_same_uncharge_info(struct file_region *rg, 476 struct file_region *org) 477 { 478 #ifdef CONFIG_CGROUP_HUGETLB 479 return rg->reservation_counter == org->reservation_counter && 480 rg->css == org->css; 481 482 #else 483 return true; 484 #endif 485 } 486 487 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 488 { 489 struct file_region *nrg, *prg; 490 491 prg = list_prev_entry(rg, link); 492 if (&prg->link != &resv->regions && prg->to == rg->from && 493 has_same_uncharge_info(prg, rg)) { 494 prg->to = rg->to; 495 496 list_del(&rg->link); 497 put_uncharge_info(rg); 498 kfree(rg); 499 500 rg = prg; 501 } 502 503 nrg = list_next_entry(rg, link); 504 if (&nrg->link != &resv->regions && nrg->from == rg->to && 505 has_same_uncharge_info(nrg, rg)) { 506 nrg->from = rg->from; 507 508 list_del(&rg->link); 509 put_uncharge_info(rg); 510 kfree(rg); 511 } 512 } 513 514 static inline long 515 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, 516 long to, struct hstate *h, struct hugetlb_cgroup *cg, 517 long *regions_needed) 518 { 519 struct file_region *nrg; 520 521 if (!regions_needed) { 522 nrg = get_file_region_entry_from_cache(map, from, to); 523 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 524 list_add(&nrg->link, rg); 525 coalesce_file_region(map, nrg); 526 } else 527 *regions_needed += 1; 528 529 return to - from; 530 } 531 532 /* 533 * Must be called with resv->lock held. 534 * 535 * Calling this with regions_needed != NULL will count the number of pages 536 * to be added but will not modify the linked list. And regions_needed will 537 * indicate the number of file_regions needed in the cache to carry out to add 538 * the regions for this range. 539 */ 540 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 541 struct hugetlb_cgroup *h_cg, 542 struct hstate *h, long *regions_needed) 543 { 544 long add = 0; 545 struct list_head *head = &resv->regions; 546 long last_accounted_offset = f; 547 struct file_region *iter, *trg = NULL; 548 struct list_head *rg = NULL; 549 550 if (regions_needed) 551 *regions_needed = 0; 552 553 /* In this loop, we essentially handle an entry for the range 554 * [last_accounted_offset, iter->from), at every iteration, with some 555 * bounds checking. 556 */ 557 list_for_each_entry_safe(iter, trg, head, link) { 558 /* Skip irrelevant regions that start before our range. */ 559 if (iter->from < f) { 560 /* If this region ends after the last accounted offset, 561 * then we need to update last_accounted_offset. 562 */ 563 if (iter->to > last_accounted_offset) 564 last_accounted_offset = iter->to; 565 continue; 566 } 567 568 /* When we find a region that starts beyond our range, we've 569 * finished. 570 */ 571 if (iter->from >= t) { 572 rg = iter->link.prev; 573 break; 574 } 575 576 /* Add an entry for last_accounted_offset -> iter->from, and 577 * update last_accounted_offset. 578 */ 579 if (iter->from > last_accounted_offset) 580 add += hugetlb_resv_map_add(resv, iter->link.prev, 581 last_accounted_offset, 582 iter->from, h, h_cg, 583 regions_needed); 584 585 last_accounted_offset = iter->to; 586 } 587 588 /* Handle the case where our range extends beyond 589 * last_accounted_offset. 590 */ 591 if (!rg) 592 rg = head->prev; 593 if (last_accounted_offset < t) 594 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 595 t, h, h_cg, regions_needed); 596 597 return add; 598 } 599 600 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 601 */ 602 static int allocate_file_region_entries(struct resv_map *resv, 603 int regions_needed) 604 __must_hold(&resv->lock) 605 { 606 LIST_HEAD(allocated_regions); 607 int to_allocate = 0, i = 0; 608 struct file_region *trg = NULL, *rg = NULL; 609 610 VM_BUG_ON(regions_needed < 0); 611 612 /* 613 * Check for sufficient descriptors in the cache to accommodate 614 * the number of in progress add operations plus regions_needed. 615 * 616 * This is a while loop because when we drop the lock, some other call 617 * to region_add or region_del may have consumed some region_entries, 618 * so we keep looping here until we finally have enough entries for 619 * (adds_in_progress + regions_needed). 620 */ 621 while (resv->region_cache_count < 622 (resv->adds_in_progress + regions_needed)) { 623 to_allocate = resv->adds_in_progress + regions_needed - 624 resv->region_cache_count; 625 626 /* At this point, we should have enough entries in the cache 627 * for all the existing adds_in_progress. We should only be 628 * needing to allocate for regions_needed. 629 */ 630 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 631 632 spin_unlock(&resv->lock); 633 for (i = 0; i < to_allocate; i++) { 634 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 635 if (!trg) 636 goto out_of_memory; 637 list_add(&trg->link, &allocated_regions); 638 } 639 640 spin_lock(&resv->lock); 641 642 list_splice(&allocated_regions, &resv->region_cache); 643 resv->region_cache_count += to_allocate; 644 } 645 646 return 0; 647 648 out_of_memory: 649 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 650 list_del(&rg->link); 651 kfree(rg); 652 } 653 return -ENOMEM; 654 } 655 656 /* 657 * Add the huge page range represented by [f, t) to the reserve 658 * map. Regions will be taken from the cache to fill in this range. 659 * Sufficient regions should exist in the cache due to the previous 660 * call to region_chg with the same range, but in some cases the cache will not 661 * have sufficient entries due to races with other code doing region_add or 662 * region_del. The extra needed entries will be allocated. 663 * 664 * regions_needed is the out value provided by a previous call to region_chg. 665 * 666 * Return the number of new huge pages added to the map. This number is greater 667 * than or equal to zero. If file_region entries needed to be allocated for 668 * this operation and we were not able to allocate, it returns -ENOMEM. 669 * region_add of regions of length 1 never allocate file_regions and cannot 670 * fail; region_chg will always allocate at least 1 entry and a region_add for 671 * 1 page will only require at most 1 entry. 672 */ 673 static long region_add(struct resv_map *resv, long f, long t, 674 long in_regions_needed, struct hstate *h, 675 struct hugetlb_cgroup *h_cg) 676 { 677 long add = 0, actual_regions_needed = 0; 678 679 spin_lock(&resv->lock); 680 retry: 681 682 /* Count how many regions are actually needed to execute this add. */ 683 add_reservation_in_range(resv, f, t, NULL, NULL, 684 &actual_regions_needed); 685 686 /* 687 * Check for sufficient descriptors in the cache to accommodate 688 * this add operation. Note that actual_regions_needed may be greater 689 * than in_regions_needed, as the resv_map may have been modified since 690 * the region_chg call. In this case, we need to make sure that we 691 * allocate extra entries, such that we have enough for all the 692 * existing adds_in_progress, plus the excess needed for this 693 * operation. 694 */ 695 if (actual_regions_needed > in_regions_needed && 696 resv->region_cache_count < 697 resv->adds_in_progress + 698 (actual_regions_needed - in_regions_needed)) { 699 /* region_add operation of range 1 should never need to 700 * allocate file_region entries. 701 */ 702 VM_BUG_ON(t - f <= 1); 703 704 if (allocate_file_region_entries( 705 resv, actual_regions_needed - in_regions_needed)) { 706 return -ENOMEM; 707 } 708 709 goto retry; 710 } 711 712 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 713 714 resv->adds_in_progress -= in_regions_needed; 715 716 spin_unlock(&resv->lock); 717 return add; 718 } 719 720 /* 721 * Examine the existing reserve map and determine how many 722 * huge pages in the specified range [f, t) are NOT currently 723 * represented. This routine is called before a subsequent 724 * call to region_add that will actually modify the reserve 725 * map to add the specified range [f, t). region_chg does 726 * not change the number of huge pages represented by the 727 * map. A number of new file_region structures is added to the cache as a 728 * placeholder, for the subsequent region_add call to use. At least 1 729 * file_region structure is added. 730 * 731 * out_regions_needed is the number of regions added to the 732 * resv->adds_in_progress. This value needs to be provided to a follow up call 733 * to region_add or region_abort for proper accounting. 734 * 735 * Returns the number of huge pages that need to be added to the existing 736 * reservation map for the range [f, t). This number is greater or equal to 737 * zero. -ENOMEM is returned if a new file_region structure or cache entry 738 * is needed and can not be allocated. 739 */ 740 static long region_chg(struct resv_map *resv, long f, long t, 741 long *out_regions_needed) 742 { 743 long chg = 0; 744 745 spin_lock(&resv->lock); 746 747 /* Count how many hugepages in this range are NOT represented. */ 748 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 749 out_regions_needed); 750 751 if (*out_regions_needed == 0) 752 *out_regions_needed = 1; 753 754 if (allocate_file_region_entries(resv, *out_regions_needed)) 755 return -ENOMEM; 756 757 resv->adds_in_progress += *out_regions_needed; 758 759 spin_unlock(&resv->lock); 760 return chg; 761 } 762 763 /* 764 * Abort the in progress add operation. The adds_in_progress field 765 * of the resv_map keeps track of the operations in progress between 766 * calls to region_chg and region_add. Operations are sometimes 767 * aborted after the call to region_chg. In such cases, region_abort 768 * is called to decrement the adds_in_progress counter. regions_needed 769 * is the value returned by the region_chg call, it is used to decrement 770 * the adds_in_progress counter. 771 * 772 * NOTE: The range arguments [f, t) are not needed or used in this 773 * routine. They are kept to make reading the calling code easier as 774 * arguments will match the associated region_chg call. 775 */ 776 static void region_abort(struct resv_map *resv, long f, long t, 777 long regions_needed) 778 { 779 spin_lock(&resv->lock); 780 VM_BUG_ON(!resv->region_cache_count); 781 resv->adds_in_progress -= regions_needed; 782 spin_unlock(&resv->lock); 783 } 784 785 /* 786 * Delete the specified range [f, t) from the reserve map. If the 787 * t parameter is LONG_MAX, this indicates that ALL regions after f 788 * should be deleted. Locate the regions which intersect [f, t) 789 * and either trim, delete or split the existing regions. 790 * 791 * Returns the number of huge pages deleted from the reserve map. 792 * In the normal case, the return value is zero or more. In the 793 * case where a region must be split, a new region descriptor must 794 * be allocated. If the allocation fails, -ENOMEM will be returned. 795 * NOTE: If the parameter t == LONG_MAX, then we will never split 796 * a region and possibly return -ENOMEM. Callers specifying 797 * t == LONG_MAX do not need to check for -ENOMEM error. 798 */ 799 static long region_del(struct resv_map *resv, long f, long t) 800 { 801 struct list_head *head = &resv->regions; 802 struct file_region *rg, *trg; 803 struct file_region *nrg = NULL; 804 long del = 0; 805 806 retry: 807 spin_lock(&resv->lock); 808 list_for_each_entry_safe(rg, trg, head, link) { 809 /* 810 * Skip regions before the range to be deleted. file_region 811 * ranges are normally of the form [from, to). However, there 812 * may be a "placeholder" entry in the map which is of the form 813 * (from, to) with from == to. Check for placeholder entries 814 * at the beginning of the range to be deleted. 815 */ 816 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 817 continue; 818 819 if (rg->from >= t) 820 break; 821 822 if (f > rg->from && t < rg->to) { /* Must split region */ 823 /* 824 * Check for an entry in the cache before dropping 825 * lock and attempting allocation. 826 */ 827 if (!nrg && 828 resv->region_cache_count > resv->adds_in_progress) { 829 nrg = list_first_entry(&resv->region_cache, 830 struct file_region, 831 link); 832 list_del(&nrg->link); 833 resv->region_cache_count--; 834 } 835 836 if (!nrg) { 837 spin_unlock(&resv->lock); 838 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 839 if (!nrg) 840 return -ENOMEM; 841 goto retry; 842 } 843 844 del += t - f; 845 hugetlb_cgroup_uncharge_file_region( 846 resv, rg, t - f, false); 847 848 /* New entry for end of split region */ 849 nrg->from = t; 850 nrg->to = rg->to; 851 852 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 853 854 INIT_LIST_HEAD(&nrg->link); 855 856 /* Original entry is trimmed */ 857 rg->to = f; 858 859 list_add(&nrg->link, &rg->link); 860 nrg = NULL; 861 break; 862 } 863 864 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 865 del += rg->to - rg->from; 866 hugetlb_cgroup_uncharge_file_region(resv, rg, 867 rg->to - rg->from, true); 868 list_del(&rg->link); 869 kfree(rg); 870 continue; 871 } 872 873 if (f <= rg->from) { /* Trim beginning of region */ 874 hugetlb_cgroup_uncharge_file_region(resv, rg, 875 t - rg->from, false); 876 877 del += t - rg->from; 878 rg->from = t; 879 } else { /* Trim end of region */ 880 hugetlb_cgroup_uncharge_file_region(resv, rg, 881 rg->to - f, false); 882 883 del += rg->to - f; 884 rg->to = f; 885 } 886 } 887 888 spin_unlock(&resv->lock); 889 kfree(nrg); 890 return del; 891 } 892 893 /* 894 * A rare out of memory error was encountered which prevented removal of 895 * the reserve map region for a page. The huge page itself was free'ed 896 * and removed from the page cache. This routine will adjust the subpool 897 * usage count, and the global reserve count if needed. By incrementing 898 * these counts, the reserve map entry which could not be deleted will 899 * appear as a "reserved" entry instead of simply dangling with incorrect 900 * counts. 901 */ 902 void hugetlb_fix_reserve_counts(struct inode *inode) 903 { 904 struct hugepage_subpool *spool = subpool_inode(inode); 905 long rsv_adjust; 906 bool reserved = false; 907 908 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 909 if (rsv_adjust > 0) { 910 struct hstate *h = hstate_inode(inode); 911 912 if (!hugetlb_acct_memory(h, 1)) 913 reserved = true; 914 } else if (!rsv_adjust) { 915 reserved = true; 916 } 917 918 if (!reserved) 919 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 920 } 921 922 /* 923 * Count and return the number of huge pages in the reserve map 924 * that intersect with the range [f, t). 925 */ 926 static long region_count(struct resv_map *resv, long f, long t) 927 { 928 struct list_head *head = &resv->regions; 929 struct file_region *rg; 930 long chg = 0; 931 932 spin_lock(&resv->lock); 933 /* Locate each segment we overlap with, and count that overlap. */ 934 list_for_each_entry(rg, head, link) { 935 long seg_from; 936 long seg_to; 937 938 if (rg->to <= f) 939 continue; 940 if (rg->from >= t) 941 break; 942 943 seg_from = max(rg->from, f); 944 seg_to = min(rg->to, t); 945 946 chg += seg_to - seg_from; 947 } 948 spin_unlock(&resv->lock); 949 950 return chg; 951 } 952 953 /* 954 * Convert the address within this vma to the page offset within 955 * the mapping, in pagecache page units; huge pages here. 956 */ 957 static pgoff_t vma_hugecache_offset(struct hstate *h, 958 struct vm_area_struct *vma, unsigned long address) 959 { 960 return ((address - vma->vm_start) >> huge_page_shift(h)) + 961 (vma->vm_pgoff >> huge_page_order(h)); 962 } 963 964 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 965 unsigned long address) 966 { 967 return vma_hugecache_offset(hstate_vma(vma), vma, address); 968 } 969 EXPORT_SYMBOL_GPL(linear_hugepage_index); 970 971 /** 972 * vma_kernel_pagesize - Page size granularity for this VMA. 973 * @vma: The user mapping. 974 * 975 * Folios in this VMA will be aligned to, and at least the size of the 976 * number of bytes returned by this function. 977 * 978 * Return: The default size of the folios allocated when backing a VMA. 979 */ 980 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 981 { 982 if (vma->vm_ops && vma->vm_ops->pagesize) 983 return vma->vm_ops->pagesize(vma); 984 return PAGE_SIZE; 985 } 986 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 987 988 /* 989 * Return the page size being used by the MMU to back a VMA. In the majority 990 * of cases, the page size used by the kernel matches the MMU size. On 991 * architectures where it differs, an architecture-specific 'strong' 992 * version of this symbol is required. 993 */ 994 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 995 { 996 return vma_kernel_pagesize(vma); 997 } 998 999 /* 1000 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 1001 * bits of the reservation map pointer, which are always clear due to 1002 * alignment. 1003 */ 1004 #define HPAGE_RESV_OWNER (1UL << 0) 1005 #define HPAGE_RESV_UNMAPPED (1UL << 1) 1006 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 1007 1008 /* 1009 * These helpers are used to track how many pages are reserved for 1010 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 1011 * is guaranteed to have their future faults succeed. 1012 * 1013 * With the exception of hugetlb_dup_vma_private() which is called at fork(), 1014 * the reserve counters are updated with the hugetlb_lock held. It is safe 1015 * to reset the VMA at fork() time as it is not in use yet and there is no 1016 * chance of the global counters getting corrupted as a result of the values. 1017 * 1018 * The private mapping reservation is represented in a subtly different 1019 * manner to a shared mapping. A shared mapping has a region map associated 1020 * with the underlying file, this region map represents the backing file 1021 * pages which have ever had a reservation assigned which this persists even 1022 * after the page is instantiated. A private mapping has a region map 1023 * associated with the original mmap which is attached to all VMAs which 1024 * reference it, this region map represents those offsets which have consumed 1025 * reservation ie. where pages have been instantiated. 1026 */ 1027 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 1028 { 1029 return (unsigned long)vma->vm_private_data; 1030 } 1031 1032 static void set_vma_private_data(struct vm_area_struct *vma, 1033 unsigned long value) 1034 { 1035 vma->vm_private_data = (void *)value; 1036 } 1037 1038 static void 1039 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 1040 struct hugetlb_cgroup *h_cg, 1041 struct hstate *h) 1042 { 1043 #ifdef CONFIG_CGROUP_HUGETLB 1044 if (!h_cg || !h) { 1045 resv_map->reservation_counter = NULL; 1046 resv_map->pages_per_hpage = 0; 1047 resv_map->css = NULL; 1048 } else { 1049 resv_map->reservation_counter = 1050 &h_cg->rsvd_hugepage[hstate_index(h)]; 1051 resv_map->pages_per_hpage = pages_per_huge_page(h); 1052 resv_map->css = &h_cg->css; 1053 } 1054 #endif 1055 } 1056 1057 struct resv_map *resv_map_alloc(void) 1058 { 1059 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 1060 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 1061 1062 if (!resv_map || !rg) { 1063 kfree(resv_map); 1064 kfree(rg); 1065 return NULL; 1066 } 1067 1068 kref_init(&resv_map->refs); 1069 spin_lock_init(&resv_map->lock); 1070 INIT_LIST_HEAD(&resv_map->regions); 1071 1072 resv_map->adds_in_progress = 0; 1073 /* 1074 * Initialize these to 0. On shared mappings, 0's here indicate these 1075 * fields don't do cgroup accounting. On private mappings, these will be 1076 * re-initialized to the proper values, to indicate that hugetlb cgroup 1077 * reservations are to be un-charged from here. 1078 */ 1079 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 1080 1081 INIT_LIST_HEAD(&resv_map->region_cache); 1082 list_add(&rg->link, &resv_map->region_cache); 1083 resv_map->region_cache_count = 1; 1084 1085 return resv_map; 1086 } 1087 1088 void resv_map_release(struct kref *ref) 1089 { 1090 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 1091 struct list_head *head = &resv_map->region_cache; 1092 struct file_region *rg, *trg; 1093 1094 /* Clear out any active regions before we release the map. */ 1095 region_del(resv_map, 0, LONG_MAX); 1096 1097 /* ... and any entries left in the cache */ 1098 list_for_each_entry_safe(rg, trg, head, link) { 1099 list_del(&rg->link); 1100 kfree(rg); 1101 } 1102 1103 VM_BUG_ON(resv_map->adds_in_progress); 1104 1105 kfree(resv_map); 1106 } 1107 1108 static inline struct resv_map *inode_resv_map(struct inode *inode) 1109 { 1110 /* 1111 * At inode evict time, i_mapping may not point to the original 1112 * address space within the inode. This original address space 1113 * contains the pointer to the resv_map. So, always use the 1114 * address space embedded within the inode. 1115 * The VERY common case is inode->mapping == &inode->i_data but, 1116 * this may not be true for device special inodes. 1117 */ 1118 return (struct resv_map *)(&inode->i_data)->private_data; 1119 } 1120 1121 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 1122 { 1123 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1124 if (vma->vm_flags & VM_MAYSHARE) { 1125 struct address_space *mapping = vma->vm_file->f_mapping; 1126 struct inode *inode = mapping->host; 1127 1128 return inode_resv_map(inode); 1129 1130 } else { 1131 return (struct resv_map *)(get_vma_private_data(vma) & 1132 ~HPAGE_RESV_MASK); 1133 } 1134 } 1135 1136 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 1137 { 1138 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1139 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1140 1141 set_vma_private_data(vma, (get_vma_private_data(vma) & 1142 HPAGE_RESV_MASK) | (unsigned long)map); 1143 } 1144 1145 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 1146 { 1147 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1148 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1149 1150 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1151 } 1152 1153 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1154 { 1155 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1156 1157 return (get_vma_private_data(vma) & flag) != 0; 1158 } 1159 1160 void hugetlb_dup_vma_private(struct vm_area_struct *vma) 1161 { 1162 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1163 /* 1164 * Clear vm_private_data 1165 * - For shared mappings this is a per-vma semaphore that may be 1166 * allocated in a subsequent call to hugetlb_vm_op_open. 1167 * Before clearing, make sure pointer is not associated with vma 1168 * as this will leak the structure. This is the case when called 1169 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already 1170 * been called to allocate a new structure. 1171 * - For MAP_PRIVATE mappings, this is the reserve map which does 1172 * not apply to children. Faults generated by the children are 1173 * not guaranteed to succeed, even if read-only. 1174 */ 1175 if (vma->vm_flags & VM_MAYSHARE) { 1176 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1177 1178 if (vma_lock && vma_lock->vma != vma) 1179 vma->vm_private_data = NULL; 1180 } else 1181 vma->vm_private_data = NULL; 1182 } 1183 1184 /* 1185 * Reset and decrement one ref on hugepage private reservation. 1186 * Called with mm->mmap_lock writer semaphore held. 1187 * This function should be only used by move_vma() and operate on 1188 * same sized vma. It should never come here with last ref on the 1189 * reservation. 1190 */ 1191 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1192 { 1193 /* 1194 * Clear the old hugetlb private page reservation. 1195 * It has already been transferred to new_vma. 1196 * 1197 * During a mremap() operation of a hugetlb vma we call move_vma() 1198 * which copies vma into new_vma and unmaps vma. After the copy 1199 * operation both new_vma and vma share a reference to the resv_map 1200 * struct, and at that point vma is about to be unmapped. We don't 1201 * want to return the reservation to the pool at unmap of vma because 1202 * the reservation still lives on in new_vma, so simply decrement the 1203 * ref here and remove the resv_map reference from this vma. 1204 */ 1205 struct resv_map *reservations = vma_resv_map(vma); 1206 1207 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1208 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1209 kref_put(&reservations->refs, resv_map_release); 1210 } 1211 1212 hugetlb_dup_vma_private(vma); 1213 } 1214 1215 /* Returns true if the VMA has associated reserve pages */ 1216 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 1217 { 1218 if (vma->vm_flags & VM_NORESERVE) { 1219 /* 1220 * This address is already reserved by other process(chg == 0), 1221 * so, we should decrement reserved count. Without decrementing, 1222 * reserve count remains after releasing inode, because this 1223 * allocated page will go into page cache and is regarded as 1224 * coming from reserved pool in releasing step. Currently, we 1225 * don't have any other solution to deal with this situation 1226 * properly, so add work-around here. 1227 */ 1228 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 1229 return true; 1230 else 1231 return false; 1232 } 1233 1234 /* Shared mappings always use reserves */ 1235 if (vma->vm_flags & VM_MAYSHARE) { 1236 /* 1237 * We know VM_NORESERVE is not set. Therefore, there SHOULD 1238 * be a region map for all pages. The only situation where 1239 * there is no region map is if a hole was punched via 1240 * fallocate. In this case, there really are no reserves to 1241 * use. This situation is indicated if chg != 0. 1242 */ 1243 if (chg) 1244 return false; 1245 else 1246 return true; 1247 } 1248 1249 /* 1250 * Only the process that called mmap() has reserves for 1251 * private mappings. 1252 */ 1253 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1254 /* 1255 * Like the shared case above, a hole punch or truncate 1256 * could have been performed on the private mapping. 1257 * Examine the value of chg to determine if reserves 1258 * actually exist or were previously consumed. 1259 * Very Subtle - The value of chg comes from a previous 1260 * call to vma_needs_reserves(). The reserve map for 1261 * private mappings has different (opposite) semantics 1262 * than that of shared mappings. vma_needs_reserves() 1263 * has already taken this difference in semantics into 1264 * account. Therefore, the meaning of chg is the same 1265 * as in the shared case above. Code could easily be 1266 * combined, but keeping it separate draws attention to 1267 * subtle differences. 1268 */ 1269 if (chg) 1270 return false; 1271 else 1272 return true; 1273 } 1274 1275 return false; 1276 } 1277 1278 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) 1279 { 1280 int nid = folio_nid(folio); 1281 1282 lockdep_assert_held(&hugetlb_lock); 1283 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1284 1285 list_move(&folio->lru, &h->hugepage_freelists[nid]); 1286 h->free_huge_pages++; 1287 h->free_huge_pages_node[nid]++; 1288 folio_set_hugetlb_freed(folio); 1289 } 1290 1291 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, 1292 int nid) 1293 { 1294 struct folio *folio; 1295 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1296 1297 lockdep_assert_held(&hugetlb_lock); 1298 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { 1299 if (pin && !folio_is_longterm_pinnable(folio)) 1300 continue; 1301 1302 if (folio_test_hwpoison(folio)) 1303 continue; 1304 1305 list_move(&folio->lru, &h->hugepage_activelist); 1306 folio_ref_unfreeze(folio, 1); 1307 folio_clear_hugetlb_freed(folio); 1308 h->free_huge_pages--; 1309 h->free_huge_pages_node[nid]--; 1310 return folio; 1311 } 1312 1313 return NULL; 1314 } 1315 1316 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, 1317 int nid, nodemask_t *nmask) 1318 { 1319 unsigned int cpuset_mems_cookie; 1320 struct zonelist *zonelist; 1321 struct zone *zone; 1322 struct zoneref *z; 1323 int node = NUMA_NO_NODE; 1324 1325 zonelist = node_zonelist(nid, gfp_mask); 1326 1327 retry_cpuset: 1328 cpuset_mems_cookie = read_mems_allowed_begin(); 1329 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1330 struct folio *folio; 1331 1332 if (!cpuset_zone_allowed(zone, gfp_mask)) 1333 continue; 1334 /* 1335 * no need to ask again on the same node. Pool is node rather than 1336 * zone aware 1337 */ 1338 if (zone_to_nid(zone) == node) 1339 continue; 1340 node = zone_to_nid(zone); 1341 1342 folio = dequeue_hugetlb_folio_node_exact(h, node); 1343 if (folio) 1344 return folio; 1345 } 1346 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1347 goto retry_cpuset; 1348 1349 return NULL; 1350 } 1351 1352 static unsigned long available_huge_pages(struct hstate *h) 1353 { 1354 return h->free_huge_pages - h->resv_huge_pages; 1355 } 1356 1357 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, 1358 struct vm_area_struct *vma, 1359 unsigned long address, int avoid_reserve, 1360 long chg) 1361 { 1362 struct folio *folio = NULL; 1363 struct mempolicy *mpol; 1364 gfp_t gfp_mask; 1365 nodemask_t *nodemask; 1366 int nid; 1367 1368 /* 1369 * A child process with MAP_PRIVATE mappings created by their parent 1370 * have no page reserves. This check ensures that reservations are 1371 * not "stolen". The child may still get SIGKILLed 1372 */ 1373 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) 1374 goto err; 1375 1376 /* If reserves cannot be used, ensure enough pages are in the pool */ 1377 if (avoid_reserve && !available_huge_pages(h)) 1378 goto err; 1379 1380 gfp_mask = htlb_alloc_mask(h); 1381 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1382 1383 if (mpol_is_preferred_many(mpol)) { 1384 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1385 nid, nodemask); 1386 1387 /* Fallback to all nodes if page==NULL */ 1388 nodemask = NULL; 1389 } 1390 1391 if (!folio) 1392 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1393 nid, nodemask); 1394 1395 if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) { 1396 folio_set_hugetlb_restore_reserve(folio); 1397 h->resv_huge_pages--; 1398 } 1399 1400 mpol_cond_put(mpol); 1401 return folio; 1402 1403 err: 1404 return NULL; 1405 } 1406 1407 /* 1408 * common helper functions for hstate_next_node_to_{alloc|free}. 1409 * We may have allocated or freed a huge page based on a different 1410 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1411 * be outside of *nodes_allowed. Ensure that we use an allowed 1412 * node for alloc or free. 1413 */ 1414 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1415 { 1416 nid = next_node_in(nid, *nodes_allowed); 1417 VM_BUG_ON(nid >= MAX_NUMNODES); 1418 1419 return nid; 1420 } 1421 1422 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1423 { 1424 if (!node_isset(nid, *nodes_allowed)) 1425 nid = next_node_allowed(nid, nodes_allowed); 1426 return nid; 1427 } 1428 1429 /* 1430 * returns the previously saved node ["this node"] from which to 1431 * allocate a persistent huge page for the pool and advance the 1432 * next node from which to allocate, handling wrap at end of node 1433 * mask. 1434 */ 1435 static int hstate_next_node_to_alloc(struct hstate *h, 1436 nodemask_t *nodes_allowed) 1437 { 1438 int nid; 1439 1440 VM_BUG_ON(!nodes_allowed); 1441 1442 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 1443 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 1444 1445 return nid; 1446 } 1447 1448 /* 1449 * helper for remove_pool_huge_page() - return the previously saved 1450 * node ["this node"] from which to free a huge page. Advance the 1451 * next node id whether or not we find a free huge page to free so 1452 * that the next attempt to free addresses the next node. 1453 */ 1454 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1455 { 1456 int nid; 1457 1458 VM_BUG_ON(!nodes_allowed); 1459 1460 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1461 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1462 1463 return nid; 1464 } 1465 1466 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 1467 for (nr_nodes = nodes_weight(*mask); \ 1468 nr_nodes > 0 && \ 1469 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1470 nr_nodes--) 1471 1472 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1473 for (nr_nodes = nodes_weight(*mask); \ 1474 nr_nodes > 0 && \ 1475 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1476 nr_nodes--) 1477 1478 /* used to demote non-gigantic_huge pages as well */ 1479 static void __destroy_compound_gigantic_folio(struct folio *folio, 1480 unsigned int order, bool demote) 1481 { 1482 int i; 1483 int nr_pages = 1 << order; 1484 struct page *p; 1485 1486 atomic_set(&folio->_entire_mapcount, 0); 1487 atomic_set(&folio->_nr_pages_mapped, 0); 1488 atomic_set(&folio->_pincount, 0); 1489 1490 for (i = 1; i < nr_pages; i++) { 1491 p = folio_page(folio, i); 1492 p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE; 1493 p->mapping = NULL; 1494 clear_compound_head(p); 1495 if (!demote) 1496 set_page_refcounted(p); 1497 } 1498 1499 __folio_clear_head(folio); 1500 } 1501 1502 static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio, 1503 unsigned int order) 1504 { 1505 __destroy_compound_gigantic_folio(folio, order, true); 1506 } 1507 1508 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1509 static void destroy_compound_gigantic_folio(struct folio *folio, 1510 unsigned int order) 1511 { 1512 __destroy_compound_gigantic_folio(folio, order, false); 1513 } 1514 1515 static void free_gigantic_folio(struct folio *folio, unsigned int order) 1516 { 1517 /* 1518 * If the page isn't allocated using the cma allocator, 1519 * cma_release() returns false. 1520 */ 1521 #ifdef CONFIG_CMA 1522 int nid = folio_nid(folio); 1523 1524 if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order)) 1525 return; 1526 #endif 1527 1528 free_contig_range(folio_pfn(folio), 1 << order); 1529 } 1530 1531 #ifdef CONFIG_CONTIG_ALLOC 1532 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1533 int nid, nodemask_t *nodemask) 1534 { 1535 struct page *page; 1536 unsigned long nr_pages = pages_per_huge_page(h); 1537 if (nid == NUMA_NO_NODE) 1538 nid = numa_mem_id(); 1539 1540 #ifdef CONFIG_CMA 1541 { 1542 int node; 1543 1544 if (hugetlb_cma[nid]) { 1545 page = cma_alloc(hugetlb_cma[nid], nr_pages, 1546 huge_page_order(h), true); 1547 if (page) 1548 return page_folio(page); 1549 } 1550 1551 if (!(gfp_mask & __GFP_THISNODE)) { 1552 for_each_node_mask(node, *nodemask) { 1553 if (node == nid || !hugetlb_cma[node]) 1554 continue; 1555 1556 page = cma_alloc(hugetlb_cma[node], nr_pages, 1557 huge_page_order(h), true); 1558 if (page) 1559 return page_folio(page); 1560 } 1561 } 1562 } 1563 #endif 1564 1565 page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); 1566 return page ? page_folio(page) : NULL; 1567 } 1568 1569 #else /* !CONFIG_CONTIG_ALLOC */ 1570 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1571 int nid, nodemask_t *nodemask) 1572 { 1573 return NULL; 1574 } 1575 #endif /* CONFIG_CONTIG_ALLOC */ 1576 1577 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1578 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1579 int nid, nodemask_t *nodemask) 1580 { 1581 return NULL; 1582 } 1583 static inline void free_gigantic_folio(struct folio *folio, 1584 unsigned int order) { } 1585 static inline void destroy_compound_gigantic_folio(struct folio *folio, 1586 unsigned int order) { } 1587 #endif 1588 1589 static inline void __clear_hugetlb_destructor(struct hstate *h, 1590 struct folio *folio) 1591 { 1592 lockdep_assert_held(&hugetlb_lock); 1593 1594 folio_clear_hugetlb(folio); 1595 } 1596 1597 /* 1598 * Remove hugetlb folio from lists. 1599 * If vmemmap exists for the folio, update dtor so that the folio appears 1600 * as just a compound page. Otherwise, wait until after allocating vmemmap 1601 * to update dtor. 1602 * 1603 * A reference is held on the folio, except in the case of demote. 1604 * 1605 * Must be called with hugetlb lock held. 1606 */ 1607 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1608 bool adjust_surplus, 1609 bool demote) 1610 { 1611 int nid = folio_nid(folio); 1612 1613 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); 1614 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); 1615 1616 lockdep_assert_held(&hugetlb_lock); 1617 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1618 return; 1619 1620 list_del(&folio->lru); 1621 1622 if (folio_test_hugetlb_freed(folio)) { 1623 h->free_huge_pages--; 1624 h->free_huge_pages_node[nid]--; 1625 } 1626 if (adjust_surplus) { 1627 h->surplus_huge_pages--; 1628 h->surplus_huge_pages_node[nid]--; 1629 } 1630 1631 /* 1632 * We can only clear the hugetlb destructor after allocating vmemmap 1633 * pages. Otherwise, someone (memory error handling) may try to write 1634 * to tail struct pages. 1635 */ 1636 if (!folio_test_hugetlb_vmemmap_optimized(folio)) 1637 __clear_hugetlb_destructor(h, folio); 1638 1639 /* 1640 * In the case of demote we do not ref count the page as it will soon 1641 * be turned into a page of smaller size. 1642 */ 1643 if (!demote) 1644 folio_ref_unfreeze(folio, 1); 1645 1646 h->nr_huge_pages--; 1647 h->nr_huge_pages_node[nid]--; 1648 } 1649 1650 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1651 bool adjust_surplus) 1652 { 1653 __remove_hugetlb_folio(h, folio, adjust_surplus, false); 1654 } 1655 1656 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio, 1657 bool adjust_surplus) 1658 { 1659 __remove_hugetlb_folio(h, folio, adjust_surplus, true); 1660 } 1661 1662 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, 1663 bool adjust_surplus) 1664 { 1665 int zeroed; 1666 int nid = folio_nid(folio); 1667 1668 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); 1669 1670 lockdep_assert_held(&hugetlb_lock); 1671 1672 INIT_LIST_HEAD(&folio->lru); 1673 h->nr_huge_pages++; 1674 h->nr_huge_pages_node[nid]++; 1675 1676 if (adjust_surplus) { 1677 h->surplus_huge_pages++; 1678 h->surplus_huge_pages_node[nid]++; 1679 } 1680 1681 folio_set_hugetlb(folio); 1682 folio_change_private(folio, NULL); 1683 /* 1684 * We have to set hugetlb_vmemmap_optimized again as above 1685 * folio_change_private(folio, NULL) cleared it. 1686 */ 1687 folio_set_hugetlb_vmemmap_optimized(folio); 1688 1689 /* 1690 * This folio is about to be managed by the hugetlb allocator and 1691 * should have no users. Drop our reference, and check for others 1692 * just in case. 1693 */ 1694 zeroed = folio_put_testzero(folio); 1695 if (unlikely(!zeroed)) 1696 /* 1697 * It is VERY unlikely soneone else has taken a ref 1698 * on the folio. In this case, we simply return as 1699 * free_huge_folio() will be called when this other ref 1700 * is dropped. 1701 */ 1702 return; 1703 1704 arch_clear_hugepage_flags(&folio->page); 1705 enqueue_hugetlb_folio(h, folio); 1706 } 1707 1708 static void __update_and_free_hugetlb_folio(struct hstate *h, 1709 struct folio *folio) 1710 { 1711 bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio); 1712 1713 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1714 return; 1715 1716 /* 1717 * If we don't know which subpages are hwpoisoned, we can't free 1718 * the hugepage, so it's leaked intentionally. 1719 */ 1720 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1721 return; 1722 1723 if (hugetlb_vmemmap_restore(h, &folio->page)) { 1724 spin_lock_irq(&hugetlb_lock); 1725 /* 1726 * If we cannot allocate vmemmap pages, just refuse to free the 1727 * page and put the page back on the hugetlb free list and treat 1728 * as a surplus page. 1729 */ 1730 add_hugetlb_folio(h, folio, true); 1731 spin_unlock_irq(&hugetlb_lock); 1732 return; 1733 } 1734 1735 /* 1736 * Move PageHWPoison flag from head page to the raw error pages, 1737 * which makes any healthy subpages reusable. 1738 */ 1739 if (unlikely(folio_test_hwpoison(folio))) 1740 folio_clear_hugetlb_hwpoison(folio); 1741 1742 /* 1743 * If vmemmap pages were allocated above, then we need to clear the 1744 * hugetlb destructor under the hugetlb lock. 1745 */ 1746 if (clear_dtor) { 1747 spin_lock_irq(&hugetlb_lock); 1748 __clear_hugetlb_destructor(h, folio); 1749 spin_unlock_irq(&hugetlb_lock); 1750 } 1751 1752 /* 1753 * Non-gigantic pages demoted from CMA allocated gigantic pages 1754 * need to be given back to CMA in free_gigantic_folio. 1755 */ 1756 if (hstate_is_gigantic(h) || 1757 hugetlb_cma_folio(folio, huge_page_order(h))) { 1758 destroy_compound_gigantic_folio(folio, huge_page_order(h)); 1759 free_gigantic_folio(folio, huge_page_order(h)); 1760 } else { 1761 __free_pages(&folio->page, huge_page_order(h)); 1762 } 1763 } 1764 1765 /* 1766 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot 1767 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1768 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1769 * the vmemmap pages. 1770 * 1771 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1772 * freed and frees them one-by-one. As the page->mapping pointer is going 1773 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1774 * structure of a lockless linked list of huge pages to be freed. 1775 */ 1776 static LLIST_HEAD(hpage_freelist); 1777 1778 static void free_hpage_workfn(struct work_struct *work) 1779 { 1780 struct llist_node *node; 1781 1782 node = llist_del_all(&hpage_freelist); 1783 1784 while (node) { 1785 struct page *page; 1786 struct hstate *h; 1787 1788 page = container_of((struct address_space **)node, 1789 struct page, mapping); 1790 node = node->next; 1791 page->mapping = NULL; 1792 /* 1793 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in 1794 * folio_hstate() is going to trigger because a previous call to 1795 * remove_hugetlb_folio() will clear the hugetlb bit, so do 1796 * not use folio_hstate() directly. 1797 */ 1798 h = size_to_hstate(page_size(page)); 1799 1800 __update_and_free_hugetlb_folio(h, page_folio(page)); 1801 1802 cond_resched(); 1803 } 1804 } 1805 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1806 1807 static inline void flush_free_hpage_work(struct hstate *h) 1808 { 1809 if (hugetlb_vmemmap_optimizable(h)) 1810 flush_work(&free_hpage_work); 1811 } 1812 1813 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, 1814 bool atomic) 1815 { 1816 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { 1817 __update_and_free_hugetlb_folio(h, folio); 1818 return; 1819 } 1820 1821 /* 1822 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1823 * 1824 * Only call schedule_work() if hpage_freelist is previously 1825 * empty. Otherwise, schedule_work() had been called but the workfn 1826 * hasn't retrieved the list yet. 1827 */ 1828 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) 1829 schedule_work(&free_hpage_work); 1830 } 1831 1832 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) 1833 { 1834 struct page *page, *t_page; 1835 struct folio *folio; 1836 1837 list_for_each_entry_safe(page, t_page, list, lru) { 1838 folio = page_folio(page); 1839 update_and_free_hugetlb_folio(h, folio, false); 1840 cond_resched(); 1841 } 1842 } 1843 1844 struct hstate *size_to_hstate(unsigned long size) 1845 { 1846 struct hstate *h; 1847 1848 for_each_hstate(h) { 1849 if (huge_page_size(h) == size) 1850 return h; 1851 } 1852 return NULL; 1853 } 1854 1855 void free_huge_folio(struct folio *folio) 1856 { 1857 /* 1858 * Can't pass hstate in here because it is called from the 1859 * compound page destructor. 1860 */ 1861 struct hstate *h = folio_hstate(folio); 1862 int nid = folio_nid(folio); 1863 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); 1864 bool restore_reserve; 1865 unsigned long flags; 1866 1867 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1868 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); 1869 1870 hugetlb_set_folio_subpool(folio, NULL); 1871 if (folio_test_anon(folio)) 1872 __ClearPageAnonExclusive(&folio->page); 1873 folio->mapping = NULL; 1874 restore_reserve = folio_test_hugetlb_restore_reserve(folio); 1875 folio_clear_hugetlb_restore_reserve(folio); 1876 1877 /* 1878 * If HPageRestoreReserve was set on page, page allocation consumed a 1879 * reservation. If the page was associated with a subpool, there 1880 * would have been a page reserved in the subpool before allocation 1881 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1882 * reservation, do not call hugepage_subpool_put_pages() as this will 1883 * remove the reserved page from the subpool. 1884 */ 1885 if (!restore_reserve) { 1886 /* 1887 * A return code of zero implies that the subpool will be 1888 * under its minimum size if the reservation is not restored 1889 * after page is free. Therefore, force restore_reserve 1890 * operation. 1891 */ 1892 if (hugepage_subpool_put_pages(spool, 1) == 0) 1893 restore_reserve = true; 1894 } 1895 1896 spin_lock_irqsave(&hugetlb_lock, flags); 1897 folio_clear_hugetlb_migratable(folio); 1898 hugetlb_cgroup_uncharge_folio(hstate_index(h), 1899 pages_per_huge_page(h), folio); 1900 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 1901 pages_per_huge_page(h), folio); 1902 if (restore_reserve) 1903 h->resv_huge_pages++; 1904 1905 if (folio_test_hugetlb_temporary(folio)) { 1906 remove_hugetlb_folio(h, folio, false); 1907 spin_unlock_irqrestore(&hugetlb_lock, flags); 1908 update_and_free_hugetlb_folio(h, folio, true); 1909 } else if (h->surplus_huge_pages_node[nid]) { 1910 /* remove the page from active list */ 1911 remove_hugetlb_folio(h, folio, true); 1912 spin_unlock_irqrestore(&hugetlb_lock, flags); 1913 update_and_free_hugetlb_folio(h, folio, true); 1914 } else { 1915 arch_clear_hugepage_flags(&folio->page); 1916 enqueue_hugetlb_folio(h, folio); 1917 spin_unlock_irqrestore(&hugetlb_lock, flags); 1918 } 1919 } 1920 1921 /* 1922 * Must be called with the hugetlb lock held 1923 */ 1924 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1925 { 1926 lockdep_assert_held(&hugetlb_lock); 1927 h->nr_huge_pages++; 1928 h->nr_huge_pages_node[nid]++; 1929 } 1930 1931 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1932 { 1933 hugetlb_vmemmap_optimize(h, &folio->page); 1934 INIT_LIST_HEAD(&folio->lru); 1935 folio_set_hugetlb(folio); 1936 hugetlb_set_folio_subpool(folio, NULL); 1937 set_hugetlb_cgroup(folio, NULL); 1938 set_hugetlb_cgroup_rsvd(folio, NULL); 1939 } 1940 1941 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) 1942 { 1943 __prep_new_hugetlb_folio(h, folio); 1944 spin_lock_irq(&hugetlb_lock); 1945 __prep_account_new_huge_page(h, nid); 1946 spin_unlock_irq(&hugetlb_lock); 1947 } 1948 1949 static bool __prep_compound_gigantic_folio(struct folio *folio, 1950 unsigned int order, bool demote) 1951 { 1952 int i, j; 1953 int nr_pages = 1 << order; 1954 struct page *p; 1955 1956 __folio_clear_reserved(folio); 1957 for (i = 0; i < nr_pages; i++) { 1958 p = folio_page(folio, i); 1959 1960 /* 1961 * For gigantic hugepages allocated through bootmem at 1962 * boot, it's safer to be consistent with the not-gigantic 1963 * hugepages and clear the PG_reserved bit from all tail pages 1964 * too. Otherwise drivers using get_user_pages() to access tail 1965 * pages may get the reference counting wrong if they see 1966 * PG_reserved set on a tail page (despite the head page not 1967 * having PG_reserved set). Enforcing this consistency between 1968 * head and tail pages allows drivers to optimize away a check 1969 * on the head page when they need know if put_page() is needed 1970 * after get_user_pages(). 1971 */ 1972 if (i != 0) /* head page cleared above */ 1973 __ClearPageReserved(p); 1974 /* 1975 * Subtle and very unlikely 1976 * 1977 * Gigantic 'page allocators' such as memblock or cma will 1978 * return a set of pages with each page ref counted. We need 1979 * to turn this set of pages into a compound page with tail 1980 * page ref counts set to zero. Code such as speculative page 1981 * cache adding could take a ref on a 'to be' tail page. 1982 * We need to respect any increased ref count, and only set 1983 * the ref count to zero if count is currently 1. If count 1984 * is not 1, we return an error. An error return indicates 1985 * the set of pages can not be converted to a gigantic page. 1986 * The caller who allocated the pages should then discard the 1987 * pages using the appropriate free interface. 1988 * 1989 * In the case of demote, the ref count will be zero. 1990 */ 1991 if (!demote) { 1992 if (!page_ref_freeze(p, 1)) { 1993 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n"); 1994 goto out_error; 1995 } 1996 } else { 1997 VM_BUG_ON_PAGE(page_count(p), p); 1998 } 1999 if (i != 0) 2000 set_compound_head(p, &folio->page); 2001 } 2002 __folio_set_head(folio); 2003 /* we rely on prep_new_hugetlb_folio to set the destructor */ 2004 folio_set_order(folio, order); 2005 atomic_set(&folio->_entire_mapcount, -1); 2006 atomic_set(&folio->_nr_pages_mapped, 0); 2007 atomic_set(&folio->_pincount, 0); 2008 return true; 2009 2010 out_error: 2011 /* undo page modifications made above */ 2012 for (j = 0; j < i; j++) { 2013 p = folio_page(folio, j); 2014 if (j != 0) 2015 clear_compound_head(p); 2016 set_page_refcounted(p); 2017 } 2018 /* need to clear PG_reserved on remaining tail pages */ 2019 for (; j < nr_pages; j++) { 2020 p = folio_page(folio, j); 2021 __ClearPageReserved(p); 2022 } 2023 return false; 2024 } 2025 2026 static bool prep_compound_gigantic_folio(struct folio *folio, 2027 unsigned int order) 2028 { 2029 return __prep_compound_gigantic_folio(folio, order, false); 2030 } 2031 2032 static bool prep_compound_gigantic_folio_for_demote(struct folio *folio, 2033 unsigned int order) 2034 { 2035 return __prep_compound_gigantic_folio(folio, order, true); 2036 } 2037 2038 /* 2039 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 2040 * transparent huge pages. See the PageTransHuge() documentation for more 2041 * details. 2042 */ 2043 int PageHuge(struct page *page) 2044 { 2045 struct folio *folio; 2046 2047 if (!PageCompound(page)) 2048 return 0; 2049 folio = page_folio(page); 2050 return folio_test_hugetlb(folio); 2051 } 2052 EXPORT_SYMBOL_GPL(PageHuge); 2053 2054 /* 2055 * Find and lock address space (mapping) in write mode. 2056 * 2057 * Upon entry, the page is locked which means that page_mapping() is 2058 * stable. Due to locking order, we can only trylock_write. If we can 2059 * not get the lock, simply return NULL to caller. 2060 */ 2061 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) 2062 { 2063 struct address_space *mapping = page_mapping(hpage); 2064 2065 if (!mapping) 2066 return mapping; 2067 2068 if (i_mmap_trylock_write(mapping)) 2069 return mapping; 2070 2071 return NULL; 2072 } 2073 2074 pgoff_t hugetlb_basepage_index(struct page *page) 2075 { 2076 struct page *page_head = compound_head(page); 2077 pgoff_t index = page_index(page_head); 2078 unsigned long compound_idx; 2079 2080 if (compound_order(page_head) > MAX_ORDER) 2081 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 2082 else 2083 compound_idx = page - page_head; 2084 2085 return (index << compound_order(page_head)) + compound_idx; 2086 } 2087 2088 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, 2089 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2090 nodemask_t *node_alloc_noretry) 2091 { 2092 int order = huge_page_order(h); 2093 struct page *page; 2094 bool alloc_try_hard = true; 2095 bool retry = true; 2096 2097 /* 2098 * By default we always try hard to allocate the page with 2099 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in 2100 * a loop (to adjust global huge page counts) and previous allocation 2101 * failed, do not continue to try hard on the same node. Use the 2102 * node_alloc_noretry bitmap to manage this state information. 2103 */ 2104 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 2105 alloc_try_hard = false; 2106 gfp_mask |= __GFP_COMP|__GFP_NOWARN; 2107 if (alloc_try_hard) 2108 gfp_mask |= __GFP_RETRY_MAYFAIL; 2109 if (nid == NUMA_NO_NODE) 2110 nid = numa_mem_id(); 2111 retry: 2112 page = __alloc_pages(gfp_mask, order, nid, nmask); 2113 2114 /* Freeze head page */ 2115 if (page && !page_ref_freeze(page, 1)) { 2116 __free_pages(page, order); 2117 if (retry) { /* retry once */ 2118 retry = false; 2119 goto retry; 2120 } 2121 /* WOW! twice in a row. */ 2122 pr_warn("HugeTLB head page unexpected inflated ref count\n"); 2123 page = NULL; 2124 } 2125 2126 /* 2127 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this 2128 * indicates an overall state change. Clear bit so that we resume 2129 * normal 'try hard' allocations. 2130 */ 2131 if (node_alloc_noretry && page && !alloc_try_hard) 2132 node_clear(nid, *node_alloc_noretry); 2133 2134 /* 2135 * If we tried hard to get a page but failed, set bit so that 2136 * subsequent attempts will not try as hard until there is an 2137 * overall state change. 2138 */ 2139 if (node_alloc_noretry && !page && alloc_try_hard) 2140 node_set(nid, *node_alloc_noretry); 2141 2142 if (!page) { 2143 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 2144 return NULL; 2145 } 2146 2147 __count_vm_event(HTLB_BUDDY_PGALLOC); 2148 return page_folio(page); 2149 } 2150 2151 /* 2152 * Common helper to allocate a fresh hugetlb page. All specific allocators 2153 * should use this function to get new hugetlb pages 2154 * 2155 * Note that returned page is 'frozen': ref count of head page and all tail 2156 * pages is zero. 2157 */ 2158 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, 2159 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2160 nodemask_t *node_alloc_noretry) 2161 { 2162 struct folio *folio; 2163 bool retry = false; 2164 2165 retry: 2166 if (hstate_is_gigantic(h)) 2167 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2168 else 2169 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, 2170 nid, nmask, node_alloc_noretry); 2171 if (!folio) 2172 return NULL; 2173 if (hstate_is_gigantic(h)) { 2174 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) { 2175 /* 2176 * Rare failure to convert pages to compound page. 2177 * Free pages and try again - ONCE! 2178 */ 2179 free_gigantic_folio(folio, huge_page_order(h)); 2180 if (!retry) { 2181 retry = true; 2182 goto retry; 2183 } 2184 return NULL; 2185 } 2186 } 2187 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 2188 2189 return folio; 2190 } 2191 2192 /* 2193 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved 2194 * manner. 2195 */ 2196 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 2197 nodemask_t *node_alloc_noretry) 2198 { 2199 struct folio *folio; 2200 int nr_nodes, node; 2201 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2202 2203 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 2204 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node, 2205 nodes_allowed, node_alloc_noretry); 2206 if (folio) { 2207 free_huge_folio(folio); /* free it into the hugepage allocator */ 2208 return 1; 2209 } 2210 } 2211 2212 return 0; 2213 } 2214 2215 /* 2216 * Remove huge page from pool from next node to free. Attempt to keep 2217 * persistent huge pages more or less balanced over allowed nodes. 2218 * This routine only 'removes' the hugetlb page. The caller must make 2219 * an additional call to free the page to low level allocators. 2220 * Called with hugetlb_lock locked. 2221 */ 2222 static struct page *remove_pool_huge_page(struct hstate *h, 2223 nodemask_t *nodes_allowed, 2224 bool acct_surplus) 2225 { 2226 int nr_nodes, node; 2227 struct page *page = NULL; 2228 struct folio *folio; 2229 2230 lockdep_assert_held(&hugetlb_lock); 2231 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2232 /* 2233 * If we're returning unused surplus pages, only examine 2234 * nodes with surplus pages. 2235 */ 2236 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2237 !list_empty(&h->hugepage_freelists[node])) { 2238 page = list_entry(h->hugepage_freelists[node].next, 2239 struct page, lru); 2240 folio = page_folio(page); 2241 remove_hugetlb_folio(h, folio, acct_surplus); 2242 break; 2243 } 2244 } 2245 2246 return page; 2247 } 2248 2249 /* 2250 * Dissolve a given free hugepage into free buddy pages. This function does 2251 * nothing for in-use hugepages and non-hugepages. 2252 * This function returns values like below: 2253 * 2254 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2255 * when the system is under memory pressure and the feature of 2256 * freeing unused vmemmap pages associated with each hugetlb page 2257 * is enabled. 2258 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2259 * (allocated or reserved.) 2260 * 0: successfully dissolved free hugepages or the page is not a 2261 * hugepage (considered as already dissolved) 2262 */ 2263 int dissolve_free_huge_page(struct page *page) 2264 { 2265 int rc = -EBUSY; 2266 struct folio *folio = page_folio(page); 2267 2268 retry: 2269 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2270 if (!folio_test_hugetlb(folio)) 2271 return 0; 2272 2273 spin_lock_irq(&hugetlb_lock); 2274 if (!folio_test_hugetlb(folio)) { 2275 rc = 0; 2276 goto out; 2277 } 2278 2279 if (!folio_ref_count(folio)) { 2280 struct hstate *h = folio_hstate(folio); 2281 if (!available_huge_pages(h)) 2282 goto out; 2283 2284 /* 2285 * We should make sure that the page is already on the free list 2286 * when it is dissolved. 2287 */ 2288 if (unlikely(!folio_test_hugetlb_freed(folio))) { 2289 spin_unlock_irq(&hugetlb_lock); 2290 cond_resched(); 2291 2292 /* 2293 * Theoretically, we should return -EBUSY when we 2294 * encounter this race. In fact, we have a chance 2295 * to successfully dissolve the page if we do a 2296 * retry. Because the race window is quite small. 2297 * If we seize this opportunity, it is an optimization 2298 * for increasing the success rate of dissolving page. 2299 */ 2300 goto retry; 2301 } 2302 2303 remove_hugetlb_folio(h, folio, false); 2304 h->max_huge_pages--; 2305 spin_unlock_irq(&hugetlb_lock); 2306 2307 /* 2308 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap 2309 * before freeing the page. update_and_free_hugtlb_folio will fail to 2310 * free the page if it can not allocate required vmemmap. We 2311 * need to adjust max_huge_pages if the page is not freed. 2312 * Attempt to allocate vmemmmap here so that we can take 2313 * appropriate action on failure. 2314 */ 2315 rc = hugetlb_vmemmap_restore(h, &folio->page); 2316 if (!rc) { 2317 update_and_free_hugetlb_folio(h, folio, false); 2318 } else { 2319 spin_lock_irq(&hugetlb_lock); 2320 add_hugetlb_folio(h, folio, false); 2321 h->max_huge_pages++; 2322 spin_unlock_irq(&hugetlb_lock); 2323 } 2324 2325 return rc; 2326 } 2327 out: 2328 spin_unlock_irq(&hugetlb_lock); 2329 return rc; 2330 } 2331 2332 /* 2333 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2334 * make specified memory blocks removable from the system. 2335 * Note that this will dissolve a free gigantic hugepage completely, if any 2336 * part of it lies within the given range. 2337 * Also note that if dissolve_free_huge_page() returns with an error, all 2338 * free hugepages that were dissolved before that error are lost. 2339 */ 2340 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 2341 { 2342 unsigned long pfn; 2343 struct page *page; 2344 int rc = 0; 2345 unsigned int order; 2346 struct hstate *h; 2347 2348 if (!hugepages_supported()) 2349 return rc; 2350 2351 order = huge_page_order(&default_hstate); 2352 for_each_hstate(h) 2353 order = min(order, huge_page_order(h)); 2354 2355 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { 2356 page = pfn_to_page(pfn); 2357 rc = dissolve_free_huge_page(page); 2358 if (rc) 2359 break; 2360 } 2361 2362 return rc; 2363 } 2364 2365 /* 2366 * Allocates a fresh surplus page from the page allocator. 2367 */ 2368 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, 2369 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2370 { 2371 struct folio *folio = NULL; 2372 2373 if (hstate_is_gigantic(h)) 2374 return NULL; 2375 2376 spin_lock_irq(&hugetlb_lock); 2377 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2378 goto out_unlock; 2379 spin_unlock_irq(&hugetlb_lock); 2380 2381 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2382 if (!folio) 2383 return NULL; 2384 2385 spin_lock_irq(&hugetlb_lock); 2386 /* 2387 * We could have raced with the pool size change. 2388 * Double check that and simply deallocate the new page 2389 * if we would end up overcommiting the surpluses. Abuse 2390 * temporary page to workaround the nasty free_huge_folio 2391 * codeflow 2392 */ 2393 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2394 folio_set_hugetlb_temporary(folio); 2395 spin_unlock_irq(&hugetlb_lock); 2396 free_huge_folio(folio); 2397 return NULL; 2398 } 2399 2400 h->surplus_huge_pages++; 2401 h->surplus_huge_pages_node[folio_nid(folio)]++; 2402 2403 out_unlock: 2404 spin_unlock_irq(&hugetlb_lock); 2405 2406 return folio; 2407 } 2408 2409 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, 2410 int nid, nodemask_t *nmask) 2411 { 2412 struct folio *folio; 2413 2414 if (hstate_is_gigantic(h)) 2415 return NULL; 2416 2417 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2418 if (!folio) 2419 return NULL; 2420 2421 /* fresh huge pages are frozen */ 2422 folio_ref_unfreeze(folio, 1); 2423 /* 2424 * We do not account these pages as surplus because they are only 2425 * temporary and will be released properly on the last reference 2426 */ 2427 folio_set_hugetlb_temporary(folio); 2428 2429 return folio; 2430 } 2431 2432 /* 2433 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2434 */ 2435 static 2436 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, 2437 struct vm_area_struct *vma, unsigned long addr) 2438 { 2439 struct folio *folio = NULL; 2440 struct mempolicy *mpol; 2441 gfp_t gfp_mask = htlb_alloc_mask(h); 2442 int nid; 2443 nodemask_t *nodemask; 2444 2445 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2446 if (mpol_is_preferred_many(mpol)) { 2447 gfp_t gfp = gfp_mask | __GFP_NOWARN; 2448 2449 gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2450 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); 2451 2452 /* Fallback to all nodes if page==NULL */ 2453 nodemask = NULL; 2454 } 2455 2456 if (!folio) 2457 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); 2458 mpol_cond_put(mpol); 2459 return folio; 2460 } 2461 2462 /* folio migration callback function */ 2463 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 2464 nodemask_t *nmask, gfp_t gfp_mask) 2465 { 2466 spin_lock_irq(&hugetlb_lock); 2467 if (available_huge_pages(h)) { 2468 struct folio *folio; 2469 2470 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 2471 preferred_nid, nmask); 2472 if (folio) { 2473 spin_unlock_irq(&hugetlb_lock); 2474 return folio; 2475 } 2476 } 2477 spin_unlock_irq(&hugetlb_lock); 2478 2479 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); 2480 } 2481 2482 /* mempolicy aware migration callback */ 2483 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, 2484 unsigned long address) 2485 { 2486 struct mempolicy *mpol; 2487 nodemask_t *nodemask; 2488 struct folio *folio; 2489 gfp_t gfp_mask; 2490 int node; 2491 2492 gfp_mask = htlb_alloc_mask(h); 2493 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 2494 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask); 2495 mpol_cond_put(mpol); 2496 2497 return folio; 2498 } 2499 2500 /* 2501 * Increase the hugetlb pool such that it can accommodate a reservation 2502 * of size 'delta'. 2503 */ 2504 static int gather_surplus_pages(struct hstate *h, long delta) 2505 __must_hold(&hugetlb_lock) 2506 { 2507 LIST_HEAD(surplus_list); 2508 struct folio *folio, *tmp; 2509 int ret; 2510 long i; 2511 long needed, allocated; 2512 bool alloc_ok = true; 2513 2514 lockdep_assert_held(&hugetlb_lock); 2515 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2516 if (needed <= 0) { 2517 h->resv_huge_pages += delta; 2518 return 0; 2519 } 2520 2521 allocated = 0; 2522 2523 ret = -ENOMEM; 2524 retry: 2525 spin_unlock_irq(&hugetlb_lock); 2526 for (i = 0; i < needed; i++) { 2527 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2528 NUMA_NO_NODE, NULL); 2529 if (!folio) { 2530 alloc_ok = false; 2531 break; 2532 } 2533 list_add(&folio->lru, &surplus_list); 2534 cond_resched(); 2535 } 2536 allocated += i; 2537 2538 /* 2539 * After retaking hugetlb_lock, we need to recalculate 'needed' 2540 * because either resv_huge_pages or free_huge_pages may have changed. 2541 */ 2542 spin_lock_irq(&hugetlb_lock); 2543 needed = (h->resv_huge_pages + delta) - 2544 (h->free_huge_pages + allocated); 2545 if (needed > 0) { 2546 if (alloc_ok) 2547 goto retry; 2548 /* 2549 * We were not able to allocate enough pages to 2550 * satisfy the entire reservation so we free what 2551 * we've allocated so far. 2552 */ 2553 goto free; 2554 } 2555 /* 2556 * The surplus_list now contains _at_least_ the number of extra pages 2557 * needed to accommodate the reservation. Add the appropriate number 2558 * of pages to the hugetlb pool and free the extras back to the buddy 2559 * allocator. Commit the entire reservation here to prevent another 2560 * process from stealing the pages as they are added to the pool but 2561 * before they are reserved. 2562 */ 2563 needed += allocated; 2564 h->resv_huge_pages += delta; 2565 ret = 0; 2566 2567 /* Free the needed pages to the hugetlb pool */ 2568 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) { 2569 if ((--needed) < 0) 2570 break; 2571 /* Add the page to the hugetlb allocator */ 2572 enqueue_hugetlb_folio(h, folio); 2573 } 2574 free: 2575 spin_unlock_irq(&hugetlb_lock); 2576 2577 /* 2578 * Free unnecessary surplus pages to the buddy allocator. 2579 * Pages have no ref count, call free_huge_folio directly. 2580 */ 2581 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) 2582 free_huge_folio(folio); 2583 spin_lock_irq(&hugetlb_lock); 2584 2585 return ret; 2586 } 2587 2588 /* 2589 * This routine has two main purposes: 2590 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2591 * in unused_resv_pages. This corresponds to the prior adjustments made 2592 * to the associated reservation map. 2593 * 2) Free any unused surplus pages that may have been allocated to satisfy 2594 * the reservation. As many as unused_resv_pages may be freed. 2595 */ 2596 static void return_unused_surplus_pages(struct hstate *h, 2597 unsigned long unused_resv_pages) 2598 { 2599 unsigned long nr_pages; 2600 struct page *page; 2601 LIST_HEAD(page_list); 2602 2603 lockdep_assert_held(&hugetlb_lock); 2604 /* Uncommit the reservation */ 2605 h->resv_huge_pages -= unused_resv_pages; 2606 2607 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2608 goto out; 2609 2610 /* 2611 * Part (or even all) of the reservation could have been backed 2612 * by pre-allocated pages. Only free surplus pages. 2613 */ 2614 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2615 2616 /* 2617 * We want to release as many surplus pages as possible, spread 2618 * evenly across all nodes with memory. Iterate across these nodes 2619 * until we can no longer free unreserved surplus pages. This occurs 2620 * when the nodes with surplus pages have no free pages. 2621 * remove_pool_huge_page() will balance the freed pages across the 2622 * on-line nodes with memory and will handle the hstate accounting. 2623 */ 2624 while (nr_pages--) { 2625 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); 2626 if (!page) 2627 goto out; 2628 2629 list_add(&page->lru, &page_list); 2630 } 2631 2632 out: 2633 spin_unlock_irq(&hugetlb_lock); 2634 update_and_free_pages_bulk(h, &page_list); 2635 spin_lock_irq(&hugetlb_lock); 2636 } 2637 2638 2639 /* 2640 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2641 * are used by the huge page allocation routines to manage reservations. 2642 * 2643 * vma_needs_reservation is called to determine if the huge page at addr 2644 * within the vma has an associated reservation. If a reservation is 2645 * needed, the value 1 is returned. The caller is then responsible for 2646 * managing the global reservation and subpool usage counts. After 2647 * the huge page has been allocated, vma_commit_reservation is called 2648 * to add the page to the reservation map. If the page allocation fails, 2649 * the reservation must be ended instead of committed. vma_end_reservation 2650 * is called in such cases. 2651 * 2652 * In the normal case, vma_commit_reservation returns the same value 2653 * as the preceding vma_needs_reservation call. The only time this 2654 * is not the case is if a reserve map was changed between calls. It 2655 * is the responsibility of the caller to notice the difference and 2656 * take appropriate action. 2657 * 2658 * vma_add_reservation is used in error paths where a reservation must 2659 * be restored when a newly allocated huge page must be freed. It is 2660 * to be called after calling vma_needs_reservation to determine if a 2661 * reservation exists. 2662 * 2663 * vma_del_reservation is used in error paths where an entry in the reserve 2664 * map was created during huge page allocation and must be removed. It is to 2665 * be called after calling vma_needs_reservation to determine if a reservation 2666 * exists. 2667 */ 2668 enum vma_resv_mode { 2669 VMA_NEEDS_RESV, 2670 VMA_COMMIT_RESV, 2671 VMA_END_RESV, 2672 VMA_ADD_RESV, 2673 VMA_DEL_RESV, 2674 }; 2675 static long __vma_reservation_common(struct hstate *h, 2676 struct vm_area_struct *vma, unsigned long addr, 2677 enum vma_resv_mode mode) 2678 { 2679 struct resv_map *resv; 2680 pgoff_t idx; 2681 long ret; 2682 long dummy_out_regions_needed; 2683 2684 resv = vma_resv_map(vma); 2685 if (!resv) 2686 return 1; 2687 2688 idx = vma_hugecache_offset(h, vma, addr); 2689 switch (mode) { 2690 case VMA_NEEDS_RESV: 2691 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2692 /* We assume that vma_reservation_* routines always operate on 2693 * 1 page, and that adding to resv map a 1 page entry can only 2694 * ever require 1 region. 2695 */ 2696 VM_BUG_ON(dummy_out_regions_needed != 1); 2697 break; 2698 case VMA_COMMIT_RESV: 2699 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2700 /* region_add calls of range 1 should never fail. */ 2701 VM_BUG_ON(ret < 0); 2702 break; 2703 case VMA_END_RESV: 2704 region_abort(resv, idx, idx + 1, 1); 2705 ret = 0; 2706 break; 2707 case VMA_ADD_RESV: 2708 if (vma->vm_flags & VM_MAYSHARE) { 2709 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2710 /* region_add calls of range 1 should never fail. */ 2711 VM_BUG_ON(ret < 0); 2712 } else { 2713 region_abort(resv, idx, idx + 1, 1); 2714 ret = region_del(resv, idx, idx + 1); 2715 } 2716 break; 2717 case VMA_DEL_RESV: 2718 if (vma->vm_flags & VM_MAYSHARE) { 2719 region_abort(resv, idx, idx + 1, 1); 2720 ret = region_del(resv, idx, idx + 1); 2721 } else { 2722 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2723 /* region_add calls of range 1 should never fail. */ 2724 VM_BUG_ON(ret < 0); 2725 } 2726 break; 2727 default: 2728 BUG(); 2729 } 2730 2731 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2732 return ret; 2733 /* 2734 * We know private mapping must have HPAGE_RESV_OWNER set. 2735 * 2736 * In most cases, reserves always exist for private mappings. 2737 * However, a file associated with mapping could have been 2738 * hole punched or truncated after reserves were consumed. 2739 * As subsequent fault on such a range will not use reserves. 2740 * Subtle - The reserve map for private mappings has the 2741 * opposite meaning than that of shared mappings. If NO 2742 * entry is in the reserve map, it means a reservation exists. 2743 * If an entry exists in the reserve map, it means the 2744 * reservation has already been consumed. As a result, the 2745 * return value of this routine is the opposite of the 2746 * value returned from reserve map manipulation routines above. 2747 */ 2748 if (ret > 0) 2749 return 0; 2750 if (ret == 0) 2751 return 1; 2752 return ret; 2753 } 2754 2755 static long vma_needs_reservation(struct hstate *h, 2756 struct vm_area_struct *vma, unsigned long addr) 2757 { 2758 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2759 } 2760 2761 static long vma_commit_reservation(struct hstate *h, 2762 struct vm_area_struct *vma, unsigned long addr) 2763 { 2764 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2765 } 2766 2767 static void vma_end_reservation(struct hstate *h, 2768 struct vm_area_struct *vma, unsigned long addr) 2769 { 2770 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2771 } 2772 2773 static long vma_add_reservation(struct hstate *h, 2774 struct vm_area_struct *vma, unsigned long addr) 2775 { 2776 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2777 } 2778 2779 static long vma_del_reservation(struct hstate *h, 2780 struct vm_area_struct *vma, unsigned long addr) 2781 { 2782 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2783 } 2784 2785 /* 2786 * This routine is called to restore reservation information on error paths. 2787 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(), 2788 * and the hugetlb mutex should remain held when calling this routine. 2789 * 2790 * It handles two specific cases: 2791 * 1) A reservation was in place and the folio consumed the reservation. 2792 * hugetlb_restore_reserve is set in the folio. 2793 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is 2794 * not set. However, alloc_hugetlb_folio always updates the reserve map. 2795 * 2796 * In case 1, free_huge_folio later in the error path will increment the 2797 * global reserve count. But, free_huge_folio does not have enough context 2798 * to adjust the reservation map. This case deals primarily with private 2799 * mappings. Adjust the reserve map here to be consistent with global 2800 * reserve count adjustments to be made by free_huge_folio. Make sure the 2801 * reserve map indicates there is a reservation present. 2802 * 2803 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. 2804 */ 2805 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2806 unsigned long address, struct folio *folio) 2807 { 2808 long rc = vma_needs_reservation(h, vma, address); 2809 2810 if (folio_test_hugetlb_restore_reserve(folio)) { 2811 if (unlikely(rc < 0)) 2812 /* 2813 * Rare out of memory condition in reserve map 2814 * manipulation. Clear hugetlb_restore_reserve so 2815 * that global reserve count will not be incremented 2816 * by free_huge_folio. This will make it appear 2817 * as though the reservation for this folio was 2818 * consumed. This may prevent the task from 2819 * faulting in the folio at a later time. This 2820 * is better than inconsistent global huge page 2821 * accounting of reserve counts. 2822 */ 2823 folio_clear_hugetlb_restore_reserve(folio); 2824 else if (rc) 2825 (void)vma_add_reservation(h, vma, address); 2826 else 2827 vma_end_reservation(h, vma, address); 2828 } else { 2829 if (!rc) { 2830 /* 2831 * This indicates there is an entry in the reserve map 2832 * not added by alloc_hugetlb_folio. We know it was added 2833 * before the alloc_hugetlb_folio call, otherwise 2834 * hugetlb_restore_reserve would be set on the folio. 2835 * Remove the entry so that a subsequent allocation 2836 * does not consume a reservation. 2837 */ 2838 rc = vma_del_reservation(h, vma, address); 2839 if (rc < 0) 2840 /* 2841 * VERY rare out of memory condition. Since 2842 * we can not delete the entry, set 2843 * hugetlb_restore_reserve so that the reserve 2844 * count will be incremented when the folio 2845 * is freed. This reserve will be consumed 2846 * on a subsequent allocation. 2847 */ 2848 folio_set_hugetlb_restore_reserve(folio); 2849 } else if (rc < 0) { 2850 /* 2851 * Rare out of memory condition from 2852 * vma_needs_reservation call. Memory allocation is 2853 * only attempted if a new entry is needed. Therefore, 2854 * this implies there is not an entry in the 2855 * reserve map. 2856 * 2857 * For shared mappings, no entry in the map indicates 2858 * no reservation. We are done. 2859 */ 2860 if (!(vma->vm_flags & VM_MAYSHARE)) 2861 /* 2862 * For private mappings, no entry indicates 2863 * a reservation is present. Since we can 2864 * not add an entry, set hugetlb_restore_reserve 2865 * on the folio so reserve count will be 2866 * incremented when freed. This reserve will 2867 * be consumed on a subsequent allocation. 2868 */ 2869 folio_set_hugetlb_restore_reserve(folio); 2870 } else 2871 /* 2872 * No reservation present, do nothing 2873 */ 2874 vma_end_reservation(h, vma, address); 2875 } 2876 } 2877 2878 /* 2879 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve 2880 * the old one 2881 * @h: struct hstate old page belongs to 2882 * @old_folio: Old folio to dissolve 2883 * @list: List to isolate the page in case we need to 2884 * Returns 0 on success, otherwise negated error. 2885 */ 2886 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, 2887 struct folio *old_folio, struct list_head *list) 2888 { 2889 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2890 int nid = folio_nid(old_folio); 2891 struct folio *new_folio; 2892 int ret = 0; 2893 2894 /* 2895 * Before dissolving the folio, we need to allocate a new one for the 2896 * pool to remain stable. Here, we allocate the folio and 'prep' it 2897 * by doing everything but actually updating counters and adding to 2898 * the pool. This simplifies and let us do most of the processing 2899 * under the lock. 2900 */ 2901 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL); 2902 if (!new_folio) 2903 return -ENOMEM; 2904 __prep_new_hugetlb_folio(h, new_folio); 2905 2906 retry: 2907 spin_lock_irq(&hugetlb_lock); 2908 if (!folio_test_hugetlb(old_folio)) { 2909 /* 2910 * Freed from under us. Drop new_folio too. 2911 */ 2912 goto free_new; 2913 } else if (folio_ref_count(old_folio)) { 2914 bool isolated; 2915 2916 /* 2917 * Someone has grabbed the folio, try to isolate it here. 2918 * Fail with -EBUSY if not possible. 2919 */ 2920 spin_unlock_irq(&hugetlb_lock); 2921 isolated = isolate_hugetlb(old_folio, list); 2922 ret = isolated ? 0 : -EBUSY; 2923 spin_lock_irq(&hugetlb_lock); 2924 goto free_new; 2925 } else if (!folio_test_hugetlb_freed(old_folio)) { 2926 /* 2927 * Folio's refcount is 0 but it has not been enqueued in the 2928 * freelist yet. Race window is small, so we can succeed here if 2929 * we retry. 2930 */ 2931 spin_unlock_irq(&hugetlb_lock); 2932 cond_resched(); 2933 goto retry; 2934 } else { 2935 /* 2936 * Ok, old_folio is still a genuine free hugepage. Remove it from 2937 * the freelist and decrease the counters. These will be 2938 * incremented again when calling __prep_account_new_huge_page() 2939 * and enqueue_hugetlb_folio() for new_folio. The counters will 2940 * remain stable since this happens under the lock. 2941 */ 2942 remove_hugetlb_folio(h, old_folio, false); 2943 2944 /* 2945 * Ref count on new_folio is already zero as it was dropped 2946 * earlier. It can be directly added to the pool free list. 2947 */ 2948 __prep_account_new_huge_page(h, nid); 2949 enqueue_hugetlb_folio(h, new_folio); 2950 2951 /* 2952 * Folio has been replaced, we can safely free the old one. 2953 */ 2954 spin_unlock_irq(&hugetlb_lock); 2955 update_and_free_hugetlb_folio(h, old_folio, false); 2956 } 2957 2958 return ret; 2959 2960 free_new: 2961 spin_unlock_irq(&hugetlb_lock); 2962 /* Folio has a zero ref count, but needs a ref to be freed */ 2963 folio_ref_unfreeze(new_folio, 1); 2964 update_and_free_hugetlb_folio(h, new_folio, false); 2965 2966 return ret; 2967 } 2968 2969 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) 2970 { 2971 struct hstate *h; 2972 struct folio *folio = page_folio(page); 2973 int ret = -EBUSY; 2974 2975 /* 2976 * The page might have been dissolved from under our feet, so make sure 2977 * to carefully check the state under the lock. 2978 * Return success when racing as if we dissolved the page ourselves. 2979 */ 2980 spin_lock_irq(&hugetlb_lock); 2981 if (folio_test_hugetlb(folio)) { 2982 h = folio_hstate(folio); 2983 } else { 2984 spin_unlock_irq(&hugetlb_lock); 2985 return 0; 2986 } 2987 spin_unlock_irq(&hugetlb_lock); 2988 2989 /* 2990 * Fence off gigantic pages as there is a cyclic dependency between 2991 * alloc_contig_range and them. Return -ENOMEM as this has the effect 2992 * of bailing out right away without further retrying. 2993 */ 2994 if (hstate_is_gigantic(h)) 2995 return -ENOMEM; 2996 2997 if (folio_ref_count(folio) && isolate_hugetlb(folio, list)) 2998 ret = 0; 2999 else if (!folio_ref_count(folio)) 3000 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); 3001 3002 return ret; 3003 } 3004 3005 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 3006 unsigned long addr, int avoid_reserve) 3007 { 3008 struct hugepage_subpool *spool = subpool_vma(vma); 3009 struct hstate *h = hstate_vma(vma); 3010 struct folio *folio; 3011 long map_chg, map_commit; 3012 long gbl_chg; 3013 int ret, idx; 3014 struct hugetlb_cgroup *h_cg = NULL; 3015 bool deferred_reserve; 3016 3017 idx = hstate_index(h); 3018 /* 3019 * Examine the region/reserve map to determine if the process 3020 * has a reservation for the page to be allocated. A return 3021 * code of zero indicates a reservation exists (no change). 3022 */ 3023 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 3024 if (map_chg < 0) 3025 return ERR_PTR(-ENOMEM); 3026 3027 /* 3028 * Processes that did not create the mapping will have no 3029 * reserves as indicated by the region/reserve map. Check 3030 * that the allocation will not exceed the subpool limit. 3031 * Allocations for MAP_NORESERVE mappings also need to be 3032 * checked against any subpool limit. 3033 */ 3034 if (map_chg || avoid_reserve) { 3035 gbl_chg = hugepage_subpool_get_pages(spool, 1); 3036 if (gbl_chg < 0) { 3037 vma_end_reservation(h, vma, addr); 3038 return ERR_PTR(-ENOSPC); 3039 } 3040 3041 /* 3042 * Even though there was no reservation in the region/reserve 3043 * map, there could be reservations associated with the 3044 * subpool that can be used. This would be indicated if the 3045 * return value of hugepage_subpool_get_pages() is zero. 3046 * However, if avoid_reserve is specified we still avoid even 3047 * the subpool reservations. 3048 */ 3049 if (avoid_reserve) 3050 gbl_chg = 1; 3051 } 3052 3053 /* If this allocation is not consuming a reservation, charge it now. 3054 */ 3055 deferred_reserve = map_chg || avoid_reserve; 3056 if (deferred_reserve) { 3057 ret = hugetlb_cgroup_charge_cgroup_rsvd( 3058 idx, pages_per_huge_page(h), &h_cg); 3059 if (ret) 3060 goto out_subpool_put; 3061 } 3062 3063 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 3064 if (ret) 3065 goto out_uncharge_cgroup_reservation; 3066 3067 spin_lock_irq(&hugetlb_lock); 3068 /* 3069 * glb_chg is passed to indicate whether or not a page must be taken 3070 * from the global free pool (global change). gbl_chg == 0 indicates 3071 * a reservation exists for the allocation. 3072 */ 3073 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); 3074 if (!folio) { 3075 spin_unlock_irq(&hugetlb_lock); 3076 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); 3077 if (!folio) 3078 goto out_uncharge_cgroup; 3079 spin_lock_irq(&hugetlb_lock); 3080 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 3081 folio_set_hugetlb_restore_reserve(folio); 3082 h->resv_huge_pages--; 3083 } 3084 list_add(&folio->lru, &h->hugepage_activelist); 3085 folio_ref_unfreeze(folio, 1); 3086 /* Fall through */ 3087 } 3088 3089 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); 3090 /* If allocation is not consuming a reservation, also store the 3091 * hugetlb_cgroup pointer on the page. 3092 */ 3093 if (deferred_reserve) { 3094 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 3095 h_cg, folio); 3096 } 3097 3098 spin_unlock_irq(&hugetlb_lock); 3099 3100 hugetlb_set_folio_subpool(folio, spool); 3101 3102 map_commit = vma_commit_reservation(h, vma, addr); 3103 if (unlikely(map_chg > map_commit)) { 3104 /* 3105 * The page was added to the reservation map between 3106 * vma_needs_reservation and vma_commit_reservation. 3107 * This indicates a race with hugetlb_reserve_pages. 3108 * Adjust for the subpool count incremented above AND 3109 * in hugetlb_reserve_pages for the same page. Also, 3110 * the reservation count added in hugetlb_reserve_pages 3111 * no longer applies. 3112 */ 3113 long rsv_adjust; 3114 3115 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 3116 hugetlb_acct_memory(h, -rsv_adjust); 3117 if (deferred_reserve) 3118 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 3119 pages_per_huge_page(h), folio); 3120 } 3121 return folio; 3122 3123 out_uncharge_cgroup: 3124 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 3125 out_uncharge_cgroup_reservation: 3126 if (deferred_reserve) 3127 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 3128 h_cg); 3129 out_subpool_put: 3130 if (map_chg || avoid_reserve) 3131 hugepage_subpool_put_pages(spool, 1); 3132 vma_end_reservation(h, vma, addr); 3133 return ERR_PTR(-ENOSPC); 3134 } 3135 3136 int alloc_bootmem_huge_page(struct hstate *h, int nid) 3137 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 3138 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 3139 { 3140 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 3141 int nr_nodes, node; 3142 3143 /* do node specific alloc */ 3144 if (nid != NUMA_NO_NODE) { 3145 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 3146 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3147 if (!m) 3148 return 0; 3149 goto found; 3150 } 3151 /* allocate from next node when distributing huge pages */ 3152 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 3153 m = memblock_alloc_try_nid_raw( 3154 huge_page_size(h), huge_page_size(h), 3155 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 3156 /* 3157 * Use the beginning of the huge page to store the 3158 * huge_bootmem_page struct (until gather_bootmem 3159 * puts them into the mem_map). 3160 */ 3161 if (!m) 3162 return 0; 3163 goto found; 3164 } 3165 3166 found: 3167 /* Put them into a private list first because mem_map is not up yet */ 3168 INIT_LIST_HEAD(&m->list); 3169 list_add(&m->list, &huge_boot_pages); 3170 m->hstate = h; 3171 return 1; 3172 } 3173 3174 /* 3175 * Put bootmem huge pages into the standard lists after mem_map is up. 3176 * Note: This only applies to gigantic (order > MAX_ORDER) pages. 3177 */ 3178 static void __init gather_bootmem_prealloc(void) 3179 { 3180 struct huge_bootmem_page *m; 3181 3182 list_for_each_entry(m, &huge_boot_pages, list) { 3183 struct page *page = virt_to_page(m); 3184 struct folio *folio = page_folio(page); 3185 struct hstate *h = m->hstate; 3186 3187 VM_BUG_ON(!hstate_is_gigantic(h)); 3188 WARN_ON(folio_ref_count(folio) != 1); 3189 if (prep_compound_gigantic_folio(folio, huge_page_order(h))) { 3190 WARN_ON(folio_test_reserved(folio)); 3191 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 3192 free_huge_folio(folio); /* add to the hugepage allocator */ 3193 } else { 3194 /* VERY unlikely inflated ref count on a tail page */ 3195 free_gigantic_folio(folio, huge_page_order(h)); 3196 } 3197 3198 /* 3199 * We need to restore the 'stolen' pages to totalram_pages 3200 * in order to fix confusing memory reports from free(1) and 3201 * other side-effects, like CommitLimit going negative. 3202 */ 3203 adjust_managed_page_count(page, pages_per_huge_page(h)); 3204 cond_resched(); 3205 } 3206 } 3207 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3208 { 3209 unsigned long i; 3210 char buf[32]; 3211 3212 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3213 if (hstate_is_gigantic(h)) { 3214 if (!alloc_bootmem_huge_page(h, nid)) 3215 break; 3216 } else { 3217 struct folio *folio; 3218 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3219 3220 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, 3221 &node_states[N_MEMORY], NULL); 3222 if (!folio) 3223 break; 3224 free_huge_folio(folio); /* free it into the hugepage allocator */ 3225 } 3226 cond_resched(); 3227 } 3228 if (i == h->max_huge_pages_node[nid]) 3229 return; 3230 3231 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3232 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3233 h->max_huge_pages_node[nid], buf, nid, i); 3234 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3235 h->max_huge_pages_node[nid] = i; 3236 } 3237 3238 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3239 { 3240 unsigned long i; 3241 nodemask_t *node_alloc_noretry; 3242 bool node_specific_alloc = false; 3243 3244 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ 3245 if (hstate_is_gigantic(h) && hugetlb_cma_size) { 3246 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3247 return; 3248 } 3249 3250 /* do node specific alloc */ 3251 for_each_online_node(i) { 3252 if (h->max_huge_pages_node[i] > 0) { 3253 hugetlb_hstate_alloc_pages_onenode(h, i); 3254 node_specific_alloc = true; 3255 } 3256 } 3257 3258 if (node_specific_alloc) 3259 return; 3260 3261 /* below will do all node balanced alloc */ 3262 if (!hstate_is_gigantic(h)) { 3263 /* 3264 * Bit mask controlling how hard we retry per-node allocations. 3265 * Ignore errors as lower level routines can deal with 3266 * node_alloc_noretry == NULL. If this kmalloc fails at boot 3267 * time, we are likely in bigger trouble. 3268 */ 3269 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry), 3270 GFP_KERNEL); 3271 } else { 3272 /* allocations done at boot time */ 3273 node_alloc_noretry = NULL; 3274 } 3275 3276 /* bit mask controlling how hard we retry per-node allocations */ 3277 if (node_alloc_noretry) 3278 nodes_clear(*node_alloc_noretry); 3279 3280 for (i = 0; i < h->max_huge_pages; ++i) { 3281 if (hstate_is_gigantic(h)) { 3282 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3283 break; 3284 } else if (!alloc_pool_huge_page(h, 3285 &node_states[N_MEMORY], 3286 node_alloc_noretry)) 3287 break; 3288 cond_resched(); 3289 } 3290 if (i < h->max_huge_pages) { 3291 char buf[32]; 3292 3293 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3294 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3295 h->max_huge_pages, buf, i); 3296 h->max_huge_pages = i; 3297 } 3298 kfree(node_alloc_noretry); 3299 } 3300 3301 static void __init hugetlb_init_hstates(void) 3302 { 3303 struct hstate *h, *h2; 3304 3305 for_each_hstate(h) { 3306 /* oversize hugepages were init'ed in early boot */ 3307 if (!hstate_is_gigantic(h)) 3308 hugetlb_hstate_alloc_pages(h); 3309 3310 /* 3311 * Set demote order for each hstate. Note that 3312 * h->demote_order is initially 0. 3313 * - We can not demote gigantic pages if runtime freeing 3314 * is not supported, so skip this. 3315 * - If CMA allocation is possible, we can not demote 3316 * HUGETLB_PAGE_ORDER or smaller size pages. 3317 */ 3318 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3319 continue; 3320 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) 3321 continue; 3322 for_each_hstate(h2) { 3323 if (h2 == h) 3324 continue; 3325 if (h2->order < h->order && 3326 h2->order > h->demote_order) 3327 h->demote_order = h2->order; 3328 } 3329 } 3330 } 3331 3332 static void __init report_hugepages(void) 3333 { 3334 struct hstate *h; 3335 3336 for_each_hstate(h) { 3337 char buf[32]; 3338 3339 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3340 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", 3341 buf, h->free_huge_pages); 3342 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", 3343 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); 3344 } 3345 } 3346 3347 #ifdef CONFIG_HIGHMEM 3348 static void try_to_free_low(struct hstate *h, unsigned long count, 3349 nodemask_t *nodes_allowed) 3350 { 3351 int i; 3352 LIST_HEAD(page_list); 3353 3354 lockdep_assert_held(&hugetlb_lock); 3355 if (hstate_is_gigantic(h)) 3356 return; 3357 3358 /* 3359 * Collect pages to be freed on a list, and free after dropping lock 3360 */ 3361 for_each_node_mask(i, *nodes_allowed) { 3362 struct page *page, *next; 3363 struct list_head *freel = &h->hugepage_freelists[i]; 3364 list_for_each_entry_safe(page, next, freel, lru) { 3365 if (count >= h->nr_huge_pages) 3366 goto out; 3367 if (PageHighMem(page)) 3368 continue; 3369 remove_hugetlb_folio(h, page_folio(page), false); 3370 list_add(&page->lru, &page_list); 3371 } 3372 } 3373 3374 out: 3375 spin_unlock_irq(&hugetlb_lock); 3376 update_and_free_pages_bulk(h, &page_list); 3377 spin_lock_irq(&hugetlb_lock); 3378 } 3379 #else 3380 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3381 nodemask_t *nodes_allowed) 3382 { 3383 } 3384 #endif 3385 3386 /* 3387 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3388 * balanced by operating on them in a round-robin fashion. 3389 * Returns 1 if an adjustment was made. 3390 */ 3391 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3392 int delta) 3393 { 3394 int nr_nodes, node; 3395 3396 lockdep_assert_held(&hugetlb_lock); 3397 VM_BUG_ON(delta != -1 && delta != 1); 3398 3399 if (delta < 0) { 3400 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 3401 if (h->surplus_huge_pages_node[node]) 3402 goto found; 3403 } 3404 } else { 3405 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3406 if (h->surplus_huge_pages_node[node] < 3407 h->nr_huge_pages_node[node]) 3408 goto found; 3409 } 3410 } 3411 return 0; 3412 3413 found: 3414 h->surplus_huge_pages += delta; 3415 h->surplus_huge_pages_node[node] += delta; 3416 return 1; 3417 } 3418 3419 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3420 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3421 nodemask_t *nodes_allowed) 3422 { 3423 unsigned long min_count, ret; 3424 struct page *page; 3425 LIST_HEAD(page_list); 3426 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3427 3428 /* 3429 * Bit mask controlling how hard we retry per-node allocations. 3430 * If we can not allocate the bit mask, do not attempt to allocate 3431 * the requested huge pages. 3432 */ 3433 if (node_alloc_noretry) 3434 nodes_clear(*node_alloc_noretry); 3435 else 3436 return -ENOMEM; 3437 3438 /* 3439 * resize_lock mutex prevents concurrent adjustments to number of 3440 * pages in hstate via the proc/sysfs interfaces. 3441 */ 3442 mutex_lock(&h->resize_lock); 3443 flush_free_hpage_work(h); 3444 spin_lock_irq(&hugetlb_lock); 3445 3446 /* 3447 * Check for a node specific request. 3448 * Changing node specific huge page count may require a corresponding 3449 * change to the global count. In any case, the passed node mask 3450 * (nodes_allowed) will restrict alloc/free to the specified node. 3451 */ 3452 if (nid != NUMA_NO_NODE) { 3453 unsigned long old_count = count; 3454 3455 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 3456 /* 3457 * User may have specified a large count value which caused the 3458 * above calculation to overflow. In this case, they wanted 3459 * to allocate as many huge pages as possible. Set count to 3460 * largest possible value to align with their intention. 3461 */ 3462 if (count < old_count) 3463 count = ULONG_MAX; 3464 } 3465 3466 /* 3467 * Gigantic pages runtime allocation depend on the capability for large 3468 * page range allocation. 3469 * If the system does not provide this feature, return an error when 3470 * the user tries to allocate gigantic pages but let the user free the 3471 * boottime allocated gigantic pages. 3472 */ 3473 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3474 if (count > persistent_huge_pages(h)) { 3475 spin_unlock_irq(&hugetlb_lock); 3476 mutex_unlock(&h->resize_lock); 3477 NODEMASK_FREE(node_alloc_noretry); 3478 return -EINVAL; 3479 } 3480 /* Fall through to decrease pool */ 3481 } 3482 3483 /* 3484 * Increase the pool size 3485 * First take pages out of surplus state. Then make up the 3486 * remaining difference by allocating fresh huge pages. 3487 * 3488 * We might race with alloc_surplus_hugetlb_folio() here and be unable 3489 * to convert a surplus huge page to a normal huge page. That is 3490 * not critical, though, it just means the overall size of the 3491 * pool might be one hugepage larger than it needs to be, but 3492 * within all the constraints specified by the sysctls. 3493 */ 3494 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3495 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3496 break; 3497 } 3498 3499 while (count > persistent_huge_pages(h)) { 3500 /* 3501 * If this allocation races such that we no longer need the 3502 * page, free_huge_folio will handle it by freeing the page 3503 * and reducing the surplus. 3504 */ 3505 spin_unlock_irq(&hugetlb_lock); 3506 3507 /* yield cpu to avoid soft lockup */ 3508 cond_resched(); 3509 3510 ret = alloc_pool_huge_page(h, nodes_allowed, 3511 node_alloc_noretry); 3512 spin_lock_irq(&hugetlb_lock); 3513 if (!ret) 3514 goto out; 3515 3516 /* Bail for signals. Probably ctrl-c from user */ 3517 if (signal_pending(current)) 3518 goto out; 3519 } 3520 3521 /* 3522 * Decrease the pool size 3523 * First return free pages to the buddy allocator (being careful 3524 * to keep enough around to satisfy reservations). Then place 3525 * pages into surplus state as needed so the pool will shrink 3526 * to the desired size as pages become free. 3527 * 3528 * By placing pages into the surplus state independent of the 3529 * overcommit value, we are allowing the surplus pool size to 3530 * exceed overcommit. There are few sane options here. Since 3531 * alloc_surplus_hugetlb_folio() is checking the global counter, 3532 * though, we'll note that we're not allowed to exceed surplus 3533 * and won't grow the pool anywhere else. Not until one of the 3534 * sysctls are changed, or the surplus pages go out of use. 3535 */ 3536 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 3537 min_count = max(count, min_count); 3538 try_to_free_low(h, min_count, nodes_allowed); 3539 3540 /* 3541 * Collect pages to be removed on list without dropping lock 3542 */ 3543 while (min_count < persistent_huge_pages(h)) { 3544 page = remove_pool_huge_page(h, nodes_allowed, 0); 3545 if (!page) 3546 break; 3547 3548 list_add(&page->lru, &page_list); 3549 } 3550 /* free the pages after dropping lock */ 3551 spin_unlock_irq(&hugetlb_lock); 3552 update_and_free_pages_bulk(h, &page_list); 3553 flush_free_hpage_work(h); 3554 spin_lock_irq(&hugetlb_lock); 3555 3556 while (count < persistent_huge_pages(h)) { 3557 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 3558 break; 3559 } 3560 out: 3561 h->max_huge_pages = persistent_huge_pages(h); 3562 spin_unlock_irq(&hugetlb_lock); 3563 mutex_unlock(&h->resize_lock); 3564 3565 NODEMASK_FREE(node_alloc_noretry); 3566 3567 return 0; 3568 } 3569 3570 static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) 3571 { 3572 int i, nid = folio_nid(folio); 3573 struct hstate *target_hstate; 3574 struct page *subpage; 3575 struct folio *inner_folio; 3576 int rc = 0; 3577 3578 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); 3579 3580 remove_hugetlb_folio_for_demote(h, folio, false); 3581 spin_unlock_irq(&hugetlb_lock); 3582 3583 rc = hugetlb_vmemmap_restore(h, &folio->page); 3584 if (rc) { 3585 /* Allocation of vmemmmap failed, we can not demote folio */ 3586 spin_lock_irq(&hugetlb_lock); 3587 folio_ref_unfreeze(folio, 1); 3588 add_hugetlb_folio(h, folio, false); 3589 return rc; 3590 } 3591 3592 /* 3593 * Use destroy_compound_hugetlb_folio_for_demote for all huge page 3594 * sizes as it will not ref count folios. 3595 */ 3596 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h)); 3597 3598 /* 3599 * Taking target hstate mutex synchronizes with set_max_huge_pages. 3600 * Without the mutex, pages added to target hstate could be marked 3601 * as surplus. 3602 * 3603 * Note that we already hold h->resize_lock. To prevent deadlock, 3604 * use the convention of always taking larger size hstate mutex first. 3605 */ 3606 mutex_lock(&target_hstate->resize_lock); 3607 for (i = 0; i < pages_per_huge_page(h); 3608 i += pages_per_huge_page(target_hstate)) { 3609 subpage = folio_page(folio, i); 3610 inner_folio = page_folio(subpage); 3611 if (hstate_is_gigantic(target_hstate)) 3612 prep_compound_gigantic_folio_for_demote(inner_folio, 3613 target_hstate->order); 3614 else 3615 prep_compound_page(subpage, target_hstate->order); 3616 folio_change_private(inner_folio, NULL); 3617 prep_new_hugetlb_folio(target_hstate, inner_folio, nid); 3618 free_huge_folio(inner_folio); 3619 } 3620 mutex_unlock(&target_hstate->resize_lock); 3621 3622 spin_lock_irq(&hugetlb_lock); 3623 3624 /* 3625 * Not absolutely necessary, but for consistency update max_huge_pages 3626 * based on pool changes for the demoted page. 3627 */ 3628 h->max_huge_pages--; 3629 target_hstate->max_huge_pages += 3630 pages_per_huge_page(h) / pages_per_huge_page(target_hstate); 3631 3632 return rc; 3633 } 3634 3635 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 3636 __must_hold(&hugetlb_lock) 3637 { 3638 int nr_nodes, node; 3639 struct folio *folio; 3640 3641 lockdep_assert_held(&hugetlb_lock); 3642 3643 /* We should never get here if no demote order */ 3644 if (!h->demote_order) { 3645 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 3646 return -EINVAL; /* internal error */ 3647 } 3648 3649 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3650 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) { 3651 if (folio_test_hwpoison(folio)) 3652 continue; 3653 return demote_free_hugetlb_folio(h, folio); 3654 } 3655 } 3656 3657 /* 3658 * Only way to get here is if all pages on free lists are poisoned. 3659 * Return -EBUSY so that caller will not retry. 3660 */ 3661 return -EBUSY; 3662 } 3663 3664 #define HSTATE_ATTR_RO(_name) \ 3665 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3666 3667 #define HSTATE_ATTR_WO(_name) \ 3668 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 3669 3670 #define HSTATE_ATTR(_name) \ 3671 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3672 3673 static struct kobject *hugepages_kobj; 3674 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3675 3676 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 3677 3678 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 3679 { 3680 int i; 3681 3682 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3683 if (hstate_kobjs[i] == kobj) { 3684 if (nidp) 3685 *nidp = NUMA_NO_NODE; 3686 return &hstates[i]; 3687 } 3688 3689 return kobj_to_node_hstate(kobj, nidp); 3690 } 3691 3692 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 3693 struct kobj_attribute *attr, char *buf) 3694 { 3695 struct hstate *h; 3696 unsigned long nr_huge_pages; 3697 int nid; 3698 3699 h = kobj_to_hstate(kobj, &nid); 3700 if (nid == NUMA_NO_NODE) 3701 nr_huge_pages = h->nr_huge_pages; 3702 else 3703 nr_huge_pages = h->nr_huge_pages_node[nid]; 3704 3705 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 3706 } 3707 3708 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 3709 struct hstate *h, int nid, 3710 unsigned long count, size_t len) 3711 { 3712 int err; 3713 nodemask_t nodes_allowed, *n_mask; 3714 3715 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3716 return -EINVAL; 3717 3718 if (nid == NUMA_NO_NODE) { 3719 /* 3720 * global hstate attribute 3721 */ 3722 if (!(obey_mempolicy && 3723 init_nodemask_of_mempolicy(&nodes_allowed))) 3724 n_mask = &node_states[N_MEMORY]; 3725 else 3726 n_mask = &nodes_allowed; 3727 } else { 3728 /* 3729 * Node specific request. count adjustment happens in 3730 * set_max_huge_pages() after acquiring hugetlb_lock. 3731 */ 3732 init_nodemask_of_node(&nodes_allowed, nid); 3733 n_mask = &nodes_allowed; 3734 } 3735 3736 err = set_max_huge_pages(h, count, nid, n_mask); 3737 3738 return err ? err : len; 3739 } 3740 3741 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 3742 struct kobject *kobj, const char *buf, 3743 size_t len) 3744 { 3745 struct hstate *h; 3746 unsigned long count; 3747 int nid; 3748 int err; 3749 3750 err = kstrtoul(buf, 10, &count); 3751 if (err) 3752 return err; 3753 3754 h = kobj_to_hstate(kobj, &nid); 3755 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 3756 } 3757 3758 static ssize_t nr_hugepages_show(struct kobject *kobj, 3759 struct kobj_attribute *attr, char *buf) 3760 { 3761 return nr_hugepages_show_common(kobj, attr, buf); 3762 } 3763 3764 static ssize_t nr_hugepages_store(struct kobject *kobj, 3765 struct kobj_attribute *attr, const char *buf, size_t len) 3766 { 3767 return nr_hugepages_store_common(false, kobj, buf, len); 3768 } 3769 HSTATE_ATTR(nr_hugepages); 3770 3771 #ifdef CONFIG_NUMA 3772 3773 /* 3774 * hstate attribute for optionally mempolicy-based constraint on persistent 3775 * huge page alloc/free. 3776 */ 3777 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 3778 struct kobj_attribute *attr, 3779 char *buf) 3780 { 3781 return nr_hugepages_show_common(kobj, attr, buf); 3782 } 3783 3784 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 3785 struct kobj_attribute *attr, const char *buf, size_t len) 3786 { 3787 return nr_hugepages_store_common(true, kobj, buf, len); 3788 } 3789 HSTATE_ATTR(nr_hugepages_mempolicy); 3790 #endif 3791 3792 3793 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 3794 struct kobj_attribute *attr, char *buf) 3795 { 3796 struct hstate *h = kobj_to_hstate(kobj, NULL); 3797 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 3798 } 3799 3800 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 3801 struct kobj_attribute *attr, const char *buf, size_t count) 3802 { 3803 int err; 3804 unsigned long input; 3805 struct hstate *h = kobj_to_hstate(kobj, NULL); 3806 3807 if (hstate_is_gigantic(h)) 3808 return -EINVAL; 3809 3810 err = kstrtoul(buf, 10, &input); 3811 if (err) 3812 return err; 3813 3814 spin_lock_irq(&hugetlb_lock); 3815 h->nr_overcommit_huge_pages = input; 3816 spin_unlock_irq(&hugetlb_lock); 3817 3818 return count; 3819 } 3820 HSTATE_ATTR(nr_overcommit_hugepages); 3821 3822 static ssize_t free_hugepages_show(struct kobject *kobj, 3823 struct kobj_attribute *attr, char *buf) 3824 { 3825 struct hstate *h; 3826 unsigned long free_huge_pages; 3827 int nid; 3828 3829 h = kobj_to_hstate(kobj, &nid); 3830 if (nid == NUMA_NO_NODE) 3831 free_huge_pages = h->free_huge_pages; 3832 else 3833 free_huge_pages = h->free_huge_pages_node[nid]; 3834 3835 return sysfs_emit(buf, "%lu\n", free_huge_pages); 3836 } 3837 HSTATE_ATTR_RO(free_hugepages); 3838 3839 static ssize_t resv_hugepages_show(struct kobject *kobj, 3840 struct kobj_attribute *attr, char *buf) 3841 { 3842 struct hstate *h = kobj_to_hstate(kobj, NULL); 3843 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 3844 } 3845 HSTATE_ATTR_RO(resv_hugepages); 3846 3847 static ssize_t surplus_hugepages_show(struct kobject *kobj, 3848 struct kobj_attribute *attr, char *buf) 3849 { 3850 struct hstate *h; 3851 unsigned long surplus_huge_pages; 3852 int nid; 3853 3854 h = kobj_to_hstate(kobj, &nid); 3855 if (nid == NUMA_NO_NODE) 3856 surplus_huge_pages = h->surplus_huge_pages; 3857 else 3858 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 3859 3860 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 3861 } 3862 HSTATE_ATTR_RO(surplus_hugepages); 3863 3864 static ssize_t demote_store(struct kobject *kobj, 3865 struct kobj_attribute *attr, const char *buf, size_t len) 3866 { 3867 unsigned long nr_demote; 3868 unsigned long nr_available; 3869 nodemask_t nodes_allowed, *n_mask; 3870 struct hstate *h; 3871 int err; 3872 int nid; 3873 3874 err = kstrtoul(buf, 10, &nr_demote); 3875 if (err) 3876 return err; 3877 h = kobj_to_hstate(kobj, &nid); 3878 3879 if (nid != NUMA_NO_NODE) { 3880 init_nodemask_of_node(&nodes_allowed, nid); 3881 n_mask = &nodes_allowed; 3882 } else { 3883 n_mask = &node_states[N_MEMORY]; 3884 } 3885 3886 /* Synchronize with other sysfs operations modifying huge pages */ 3887 mutex_lock(&h->resize_lock); 3888 spin_lock_irq(&hugetlb_lock); 3889 3890 while (nr_demote) { 3891 /* 3892 * Check for available pages to demote each time thorough the 3893 * loop as demote_pool_huge_page will drop hugetlb_lock. 3894 */ 3895 if (nid != NUMA_NO_NODE) 3896 nr_available = h->free_huge_pages_node[nid]; 3897 else 3898 nr_available = h->free_huge_pages; 3899 nr_available -= h->resv_huge_pages; 3900 if (!nr_available) 3901 break; 3902 3903 err = demote_pool_huge_page(h, n_mask); 3904 if (err) 3905 break; 3906 3907 nr_demote--; 3908 } 3909 3910 spin_unlock_irq(&hugetlb_lock); 3911 mutex_unlock(&h->resize_lock); 3912 3913 if (err) 3914 return err; 3915 return len; 3916 } 3917 HSTATE_ATTR_WO(demote); 3918 3919 static ssize_t demote_size_show(struct kobject *kobj, 3920 struct kobj_attribute *attr, char *buf) 3921 { 3922 struct hstate *h = kobj_to_hstate(kobj, NULL); 3923 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 3924 3925 return sysfs_emit(buf, "%lukB\n", demote_size); 3926 } 3927 3928 static ssize_t demote_size_store(struct kobject *kobj, 3929 struct kobj_attribute *attr, 3930 const char *buf, size_t count) 3931 { 3932 struct hstate *h, *demote_hstate; 3933 unsigned long demote_size; 3934 unsigned int demote_order; 3935 3936 demote_size = (unsigned long)memparse(buf, NULL); 3937 3938 demote_hstate = size_to_hstate(demote_size); 3939 if (!demote_hstate) 3940 return -EINVAL; 3941 demote_order = demote_hstate->order; 3942 if (demote_order < HUGETLB_PAGE_ORDER) 3943 return -EINVAL; 3944 3945 /* demote order must be smaller than hstate order */ 3946 h = kobj_to_hstate(kobj, NULL); 3947 if (demote_order >= h->order) 3948 return -EINVAL; 3949 3950 /* resize_lock synchronizes access to demote size and writes */ 3951 mutex_lock(&h->resize_lock); 3952 h->demote_order = demote_order; 3953 mutex_unlock(&h->resize_lock); 3954 3955 return count; 3956 } 3957 HSTATE_ATTR(demote_size); 3958 3959 static struct attribute *hstate_attrs[] = { 3960 &nr_hugepages_attr.attr, 3961 &nr_overcommit_hugepages_attr.attr, 3962 &free_hugepages_attr.attr, 3963 &resv_hugepages_attr.attr, 3964 &surplus_hugepages_attr.attr, 3965 #ifdef CONFIG_NUMA 3966 &nr_hugepages_mempolicy_attr.attr, 3967 #endif 3968 NULL, 3969 }; 3970 3971 static const struct attribute_group hstate_attr_group = { 3972 .attrs = hstate_attrs, 3973 }; 3974 3975 static struct attribute *hstate_demote_attrs[] = { 3976 &demote_size_attr.attr, 3977 &demote_attr.attr, 3978 NULL, 3979 }; 3980 3981 static const struct attribute_group hstate_demote_attr_group = { 3982 .attrs = hstate_demote_attrs, 3983 }; 3984 3985 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 3986 struct kobject **hstate_kobjs, 3987 const struct attribute_group *hstate_attr_group) 3988 { 3989 int retval; 3990 int hi = hstate_index(h); 3991 3992 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 3993 if (!hstate_kobjs[hi]) 3994 return -ENOMEM; 3995 3996 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 3997 if (retval) { 3998 kobject_put(hstate_kobjs[hi]); 3999 hstate_kobjs[hi] = NULL; 4000 return retval; 4001 } 4002 4003 if (h->demote_order) { 4004 retval = sysfs_create_group(hstate_kobjs[hi], 4005 &hstate_demote_attr_group); 4006 if (retval) { 4007 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 4008 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group); 4009 kobject_put(hstate_kobjs[hi]); 4010 hstate_kobjs[hi] = NULL; 4011 return retval; 4012 } 4013 } 4014 4015 return 0; 4016 } 4017 4018 #ifdef CONFIG_NUMA 4019 static bool hugetlb_sysfs_initialized __ro_after_init; 4020 4021 /* 4022 * node_hstate/s - associate per node hstate attributes, via their kobjects, 4023 * with node devices in node_devices[] using a parallel array. The array 4024 * index of a node device or _hstate == node id. 4025 * This is here to avoid any static dependency of the node device driver, in 4026 * the base kernel, on the hugetlb module. 4027 */ 4028 struct node_hstate { 4029 struct kobject *hugepages_kobj; 4030 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4031 }; 4032 static struct node_hstate node_hstates[MAX_NUMNODES]; 4033 4034 /* 4035 * A subset of global hstate attributes for node devices 4036 */ 4037 static struct attribute *per_node_hstate_attrs[] = { 4038 &nr_hugepages_attr.attr, 4039 &free_hugepages_attr.attr, 4040 &surplus_hugepages_attr.attr, 4041 NULL, 4042 }; 4043 4044 static const struct attribute_group per_node_hstate_attr_group = { 4045 .attrs = per_node_hstate_attrs, 4046 }; 4047 4048 /* 4049 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 4050 * Returns node id via non-NULL nidp. 4051 */ 4052 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4053 { 4054 int nid; 4055 4056 for (nid = 0; nid < nr_node_ids; nid++) { 4057 struct node_hstate *nhs = &node_hstates[nid]; 4058 int i; 4059 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4060 if (nhs->hstate_kobjs[i] == kobj) { 4061 if (nidp) 4062 *nidp = nid; 4063 return &hstates[i]; 4064 } 4065 } 4066 4067 BUG(); 4068 return NULL; 4069 } 4070 4071 /* 4072 * Unregister hstate attributes from a single node device. 4073 * No-op if no hstate attributes attached. 4074 */ 4075 void hugetlb_unregister_node(struct node *node) 4076 { 4077 struct hstate *h; 4078 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4079 4080 if (!nhs->hugepages_kobj) 4081 return; /* no hstate attributes */ 4082 4083 for_each_hstate(h) { 4084 int idx = hstate_index(h); 4085 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; 4086 4087 if (!hstate_kobj) 4088 continue; 4089 if (h->demote_order) 4090 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group); 4091 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group); 4092 kobject_put(hstate_kobj); 4093 nhs->hstate_kobjs[idx] = NULL; 4094 } 4095 4096 kobject_put(nhs->hugepages_kobj); 4097 nhs->hugepages_kobj = NULL; 4098 } 4099 4100 4101 /* 4102 * Register hstate attributes for a single node device. 4103 * No-op if attributes already registered. 4104 */ 4105 void hugetlb_register_node(struct node *node) 4106 { 4107 struct hstate *h; 4108 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4109 int err; 4110 4111 if (!hugetlb_sysfs_initialized) 4112 return; 4113 4114 if (nhs->hugepages_kobj) 4115 return; /* already allocated */ 4116 4117 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 4118 &node->dev.kobj); 4119 if (!nhs->hugepages_kobj) 4120 return; 4121 4122 for_each_hstate(h) { 4123 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 4124 nhs->hstate_kobjs, 4125 &per_node_hstate_attr_group); 4126 if (err) { 4127 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 4128 h->name, node->dev.id); 4129 hugetlb_unregister_node(node); 4130 break; 4131 } 4132 } 4133 } 4134 4135 /* 4136 * hugetlb init time: register hstate attributes for all registered node 4137 * devices of nodes that have memory. All on-line nodes should have 4138 * registered their associated device by this time. 4139 */ 4140 static void __init hugetlb_register_all_nodes(void) 4141 { 4142 int nid; 4143 4144 for_each_online_node(nid) 4145 hugetlb_register_node(node_devices[nid]); 4146 } 4147 #else /* !CONFIG_NUMA */ 4148 4149 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4150 { 4151 BUG(); 4152 if (nidp) 4153 *nidp = -1; 4154 return NULL; 4155 } 4156 4157 static void hugetlb_register_all_nodes(void) { } 4158 4159 #endif 4160 4161 #ifdef CONFIG_CMA 4162 static void __init hugetlb_cma_check(void); 4163 #else 4164 static inline __init void hugetlb_cma_check(void) 4165 { 4166 } 4167 #endif 4168 4169 static void __init hugetlb_sysfs_init(void) 4170 { 4171 struct hstate *h; 4172 int err; 4173 4174 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 4175 if (!hugepages_kobj) 4176 return; 4177 4178 for_each_hstate(h) { 4179 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 4180 hstate_kobjs, &hstate_attr_group); 4181 if (err) 4182 pr_err("HugeTLB: Unable to add hstate %s", h->name); 4183 } 4184 4185 #ifdef CONFIG_NUMA 4186 hugetlb_sysfs_initialized = true; 4187 #endif 4188 hugetlb_register_all_nodes(); 4189 } 4190 4191 #ifdef CONFIG_SYSCTL 4192 static void hugetlb_sysctl_init(void); 4193 #else 4194 static inline void hugetlb_sysctl_init(void) { } 4195 #endif 4196 4197 static int __init hugetlb_init(void) 4198 { 4199 int i; 4200 4201 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4202 __NR_HPAGEFLAGS); 4203 4204 if (!hugepages_supported()) { 4205 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4206 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4207 return 0; 4208 } 4209 4210 /* 4211 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4212 * architectures depend on setup being done here. 4213 */ 4214 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4215 if (!parsed_default_hugepagesz) { 4216 /* 4217 * If we did not parse a default huge page size, set 4218 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4219 * number of huge pages for this default size was implicitly 4220 * specified, set that here as well. 4221 * Note that the implicit setting will overwrite an explicit 4222 * setting. A warning will be printed in this case. 4223 */ 4224 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4225 if (default_hstate_max_huge_pages) { 4226 if (default_hstate.max_huge_pages) { 4227 char buf[32]; 4228 4229 string_get_size(huge_page_size(&default_hstate), 4230 1, STRING_UNITS_2, buf, 32); 4231 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4232 default_hstate.max_huge_pages, buf); 4233 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4234 default_hstate_max_huge_pages); 4235 } 4236 default_hstate.max_huge_pages = 4237 default_hstate_max_huge_pages; 4238 4239 for_each_online_node(i) 4240 default_hstate.max_huge_pages_node[i] = 4241 default_hugepages_in_node[i]; 4242 } 4243 } 4244 4245 hugetlb_cma_check(); 4246 hugetlb_init_hstates(); 4247 gather_bootmem_prealloc(); 4248 report_hugepages(); 4249 4250 hugetlb_sysfs_init(); 4251 hugetlb_cgroup_file_init(); 4252 hugetlb_sysctl_init(); 4253 4254 #ifdef CONFIG_SMP 4255 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4256 #else 4257 num_fault_mutexes = 1; 4258 #endif 4259 hugetlb_fault_mutex_table = 4260 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4261 GFP_KERNEL); 4262 BUG_ON(!hugetlb_fault_mutex_table); 4263 4264 for (i = 0; i < num_fault_mutexes; i++) 4265 mutex_init(&hugetlb_fault_mutex_table[i]); 4266 return 0; 4267 } 4268 subsys_initcall(hugetlb_init); 4269 4270 /* Overwritten by architectures with more huge page sizes */ 4271 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4272 { 4273 return size == HPAGE_SIZE; 4274 } 4275 4276 void __init hugetlb_add_hstate(unsigned int order) 4277 { 4278 struct hstate *h; 4279 unsigned long i; 4280 4281 if (size_to_hstate(PAGE_SIZE << order)) { 4282 return; 4283 } 4284 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4285 BUG_ON(order == 0); 4286 h = &hstates[hugetlb_max_hstate++]; 4287 mutex_init(&h->resize_lock); 4288 h->order = order; 4289 h->mask = ~(huge_page_size(h) - 1); 4290 for (i = 0; i < MAX_NUMNODES; ++i) 4291 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4292 INIT_LIST_HEAD(&h->hugepage_activelist); 4293 h->next_nid_to_alloc = first_memory_node; 4294 h->next_nid_to_free = first_memory_node; 4295 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4296 huge_page_size(h)/SZ_1K); 4297 4298 parsed_hstate = h; 4299 } 4300 4301 bool __init __weak hugetlb_node_alloc_supported(void) 4302 { 4303 return true; 4304 } 4305 4306 static void __init hugepages_clear_pages_in_node(void) 4307 { 4308 if (!hugetlb_max_hstate) { 4309 default_hstate_max_huge_pages = 0; 4310 memset(default_hugepages_in_node, 0, 4311 sizeof(default_hugepages_in_node)); 4312 } else { 4313 parsed_hstate->max_huge_pages = 0; 4314 memset(parsed_hstate->max_huge_pages_node, 0, 4315 sizeof(parsed_hstate->max_huge_pages_node)); 4316 } 4317 } 4318 4319 /* 4320 * hugepages command line processing 4321 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4322 * specification. If not, ignore the hugepages value. hugepages can also 4323 * be the first huge page command line option in which case it implicitly 4324 * specifies the number of huge pages for the default size. 4325 */ 4326 static int __init hugepages_setup(char *s) 4327 { 4328 unsigned long *mhp; 4329 static unsigned long *last_mhp; 4330 int node = NUMA_NO_NODE; 4331 int count; 4332 unsigned long tmp; 4333 char *p = s; 4334 4335 if (!parsed_valid_hugepagesz) { 4336 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4337 parsed_valid_hugepagesz = true; 4338 return 1; 4339 } 4340 4341 /* 4342 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4343 * yet, so this hugepages= parameter goes to the "default hstate". 4344 * Otherwise, it goes with the previously parsed hugepagesz or 4345 * default_hugepagesz. 4346 */ 4347 else if (!hugetlb_max_hstate) 4348 mhp = &default_hstate_max_huge_pages; 4349 else 4350 mhp = &parsed_hstate->max_huge_pages; 4351 4352 if (mhp == last_mhp) { 4353 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4354 return 1; 4355 } 4356 4357 while (*p) { 4358 count = 0; 4359 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4360 goto invalid; 4361 /* Parameter is node format */ 4362 if (p[count] == ':') { 4363 if (!hugetlb_node_alloc_supported()) { 4364 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4365 return 1; 4366 } 4367 if (tmp >= MAX_NUMNODES || !node_online(tmp)) 4368 goto invalid; 4369 node = array_index_nospec(tmp, MAX_NUMNODES); 4370 p += count + 1; 4371 /* Parse hugepages */ 4372 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4373 goto invalid; 4374 if (!hugetlb_max_hstate) 4375 default_hugepages_in_node[node] = tmp; 4376 else 4377 parsed_hstate->max_huge_pages_node[node] = tmp; 4378 *mhp += tmp; 4379 /* Go to parse next node*/ 4380 if (p[count] == ',') 4381 p += count + 1; 4382 else 4383 break; 4384 } else { 4385 if (p != s) 4386 goto invalid; 4387 *mhp = tmp; 4388 break; 4389 } 4390 } 4391 4392 /* 4393 * Global state is always initialized later in hugetlb_init. 4394 * But we need to allocate gigantic hstates here early to still 4395 * use the bootmem allocator. 4396 */ 4397 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) 4398 hugetlb_hstate_alloc_pages(parsed_hstate); 4399 4400 last_mhp = mhp; 4401 4402 return 1; 4403 4404 invalid: 4405 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4406 hugepages_clear_pages_in_node(); 4407 return 1; 4408 } 4409 __setup("hugepages=", hugepages_setup); 4410 4411 /* 4412 * hugepagesz command line processing 4413 * A specific huge page size can only be specified once with hugepagesz. 4414 * hugepagesz is followed by hugepages on the command line. The global 4415 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4416 * hugepagesz argument was valid. 4417 */ 4418 static int __init hugepagesz_setup(char *s) 4419 { 4420 unsigned long size; 4421 struct hstate *h; 4422 4423 parsed_valid_hugepagesz = false; 4424 size = (unsigned long)memparse(s, NULL); 4425 4426 if (!arch_hugetlb_valid_size(size)) { 4427 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4428 return 1; 4429 } 4430 4431 h = size_to_hstate(size); 4432 if (h) { 4433 /* 4434 * hstate for this size already exists. This is normally 4435 * an error, but is allowed if the existing hstate is the 4436 * default hstate. More specifically, it is only allowed if 4437 * the number of huge pages for the default hstate was not 4438 * previously specified. 4439 */ 4440 if (!parsed_default_hugepagesz || h != &default_hstate || 4441 default_hstate.max_huge_pages) { 4442 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4443 return 1; 4444 } 4445 4446 /* 4447 * No need to call hugetlb_add_hstate() as hstate already 4448 * exists. But, do set parsed_hstate so that a following 4449 * hugepages= parameter will be applied to this hstate. 4450 */ 4451 parsed_hstate = h; 4452 parsed_valid_hugepagesz = true; 4453 return 1; 4454 } 4455 4456 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4457 parsed_valid_hugepagesz = true; 4458 return 1; 4459 } 4460 __setup("hugepagesz=", hugepagesz_setup); 4461 4462 /* 4463 * default_hugepagesz command line input 4464 * Only one instance of default_hugepagesz allowed on command line. 4465 */ 4466 static int __init default_hugepagesz_setup(char *s) 4467 { 4468 unsigned long size; 4469 int i; 4470 4471 parsed_valid_hugepagesz = false; 4472 if (parsed_default_hugepagesz) { 4473 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4474 return 1; 4475 } 4476 4477 size = (unsigned long)memparse(s, NULL); 4478 4479 if (!arch_hugetlb_valid_size(size)) { 4480 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4481 return 1; 4482 } 4483 4484 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4485 parsed_valid_hugepagesz = true; 4486 parsed_default_hugepagesz = true; 4487 default_hstate_idx = hstate_index(size_to_hstate(size)); 4488 4489 /* 4490 * The number of default huge pages (for this size) could have been 4491 * specified as the first hugetlb parameter: hugepages=X. If so, 4492 * then default_hstate_max_huge_pages is set. If the default huge 4493 * page size is gigantic (> MAX_ORDER), then the pages must be 4494 * allocated here from bootmem allocator. 4495 */ 4496 if (default_hstate_max_huge_pages) { 4497 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 4498 for_each_online_node(i) 4499 default_hstate.max_huge_pages_node[i] = 4500 default_hugepages_in_node[i]; 4501 if (hstate_is_gigantic(&default_hstate)) 4502 hugetlb_hstate_alloc_pages(&default_hstate); 4503 default_hstate_max_huge_pages = 0; 4504 } 4505 4506 return 1; 4507 } 4508 __setup("default_hugepagesz=", default_hugepagesz_setup); 4509 4510 static nodemask_t *policy_mbind_nodemask(gfp_t gfp) 4511 { 4512 #ifdef CONFIG_NUMA 4513 struct mempolicy *mpol = get_task_policy(current); 4514 4515 /* 4516 * Only enforce MPOL_BIND policy which overlaps with cpuset policy 4517 * (from policy_nodemask) specifically for hugetlb case 4518 */ 4519 if (mpol->mode == MPOL_BIND && 4520 (apply_policy_zone(mpol, gfp_zone(gfp)) && 4521 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) 4522 return &mpol->nodes; 4523 #endif 4524 return NULL; 4525 } 4526 4527 static unsigned int allowed_mems_nr(struct hstate *h) 4528 { 4529 int node; 4530 unsigned int nr = 0; 4531 nodemask_t *mbind_nodemask; 4532 unsigned int *array = h->free_huge_pages_node; 4533 gfp_t gfp_mask = htlb_alloc_mask(h); 4534 4535 mbind_nodemask = policy_mbind_nodemask(gfp_mask); 4536 for_each_node_mask(node, cpuset_current_mems_allowed) { 4537 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) 4538 nr += array[node]; 4539 } 4540 4541 return nr; 4542 } 4543 4544 #ifdef CONFIG_SYSCTL 4545 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, 4546 void *buffer, size_t *length, 4547 loff_t *ppos, unsigned long *out) 4548 { 4549 struct ctl_table dup_table; 4550 4551 /* 4552 * In order to avoid races with __do_proc_doulongvec_minmax(), we 4553 * can duplicate the @table and alter the duplicate of it. 4554 */ 4555 dup_table = *table; 4556 dup_table.data = out; 4557 4558 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 4559 } 4560 4561 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 4562 struct ctl_table *table, int write, 4563 void *buffer, size_t *length, loff_t *ppos) 4564 { 4565 struct hstate *h = &default_hstate; 4566 unsigned long tmp = h->max_huge_pages; 4567 int ret; 4568 4569 if (!hugepages_supported()) 4570 return -EOPNOTSUPP; 4571 4572 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4573 &tmp); 4574 if (ret) 4575 goto out; 4576 4577 if (write) 4578 ret = __nr_hugepages_store_common(obey_mempolicy, h, 4579 NUMA_NO_NODE, tmp, *length); 4580 out: 4581 return ret; 4582 } 4583 4584 static int hugetlb_sysctl_handler(struct ctl_table *table, int write, 4585 void *buffer, size_t *length, loff_t *ppos) 4586 { 4587 4588 return hugetlb_sysctl_handler_common(false, table, write, 4589 buffer, length, ppos); 4590 } 4591 4592 #ifdef CONFIG_NUMA 4593 static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 4594 void *buffer, size_t *length, loff_t *ppos) 4595 { 4596 return hugetlb_sysctl_handler_common(true, table, write, 4597 buffer, length, ppos); 4598 } 4599 #endif /* CONFIG_NUMA */ 4600 4601 static int hugetlb_overcommit_handler(struct ctl_table *table, int write, 4602 void *buffer, size_t *length, loff_t *ppos) 4603 { 4604 struct hstate *h = &default_hstate; 4605 unsigned long tmp; 4606 int ret; 4607 4608 if (!hugepages_supported()) 4609 return -EOPNOTSUPP; 4610 4611 tmp = h->nr_overcommit_huge_pages; 4612 4613 if (write && hstate_is_gigantic(h)) 4614 return -EINVAL; 4615 4616 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4617 &tmp); 4618 if (ret) 4619 goto out; 4620 4621 if (write) { 4622 spin_lock_irq(&hugetlb_lock); 4623 h->nr_overcommit_huge_pages = tmp; 4624 spin_unlock_irq(&hugetlb_lock); 4625 } 4626 out: 4627 return ret; 4628 } 4629 4630 static struct ctl_table hugetlb_table[] = { 4631 { 4632 .procname = "nr_hugepages", 4633 .data = NULL, 4634 .maxlen = sizeof(unsigned long), 4635 .mode = 0644, 4636 .proc_handler = hugetlb_sysctl_handler, 4637 }, 4638 #ifdef CONFIG_NUMA 4639 { 4640 .procname = "nr_hugepages_mempolicy", 4641 .data = NULL, 4642 .maxlen = sizeof(unsigned long), 4643 .mode = 0644, 4644 .proc_handler = &hugetlb_mempolicy_sysctl_handler, 4645 }, 4646 #endif 4647 { 4648 .procname = "hugetlb_shm_group", 4649 .data = &sysctl_hugetlb_shm_group, 4650 .maxlen = sizeof(gid_t), 4651 .mode = 0644, 4652 .proc_handler = proc_dointvec, 4653 }, 4654 { 4655 .procname = "nr_overcommit_hugepages", 4656 .data = NULL, 4657 .maxlen = sizeof(unsigned long), 4658 .mode = 0644, 4659 .proc_handler = hugetlb_overcommit_handler, 4660 }, 4661 { } 4662 }; 4663 4664 static void hugetlb_sysctl_init(void) 4665 { 4666 register_sysctl_init("vm", hugetlb_table); 4667 } 4668 #endif /* CONFIG_SYSCTL */ 4669 4670 void hugetlb_report_meminfo(struct seq_file *m) 4671 { 4672 struct hstate *h; 4673 unsigned long total = 0; 4674 4675 if (!hugepages_supported()) 4676 return; 4677 4678 for_each_hstate(h) { 4679 unsigned long count = h->nr_huge_pages; 4680 4681 total += huge_page_size(h) * count; 4682 4683 if (h == &default_hstate) 4684 seq_printf(m, 4685 "HugePages_Total: %5lu\n" 4686 "HugePages_Free: %5lu\n" 4687 "HugePages_Rsvd: %5lu\n" 4688 "HugePages_Surp: %5lu\n" 4689 "Hugepagesize: %8lu kB\n", 4690 count, 4691 h->free_huge_pages, 4692 h->resv_huge_pages, 4693 h->surplus_huge_pages, 4694 huge_page_size(h) / SZ_1K); 4695 } 4696 4697 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 4698 } 4699 4700 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 4701 { 4702 struct hstate *h = &default_hstate; 4703 4704 if (!hugepages_supported()) 4705 return 0; 4706 4707 return sysfs_emit_at(buf, len, 4708 "Node %d HugePages_Total: %5u\n" 4709 "Node %d HugePages_Free: %5u\n" 4710 "Node %d HugePages_Surp: %5u\n", 4711 nid, h->nr_huge_pages_node[nid], 4712 nid, h->free_huge_pages_node[nid], 4713 nid, h->surplus_huge_pages_node[nid]); 4714 } 4715 4716 void hugetlb_show_meminfo_node(int nid) 4717 { 4718 struct hstate *h; 4719 4720 if (!hugepages_supported()) 4721 return; 4722 4723 for_each_hstate(h) 4724 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 4725 nid, 4726 h->nr_huge_pages_node[nid], 4727 h->free_huge_pages_node[nid], 4728 h->surplus_huge_pages_node[nid], 4729 huge_page_size(h) / SZ_1K); 4730 } 4731 4732 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 4733 { 4734 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 4735 K(atomic_long_read(&mm->hugetlb_usage))); 4736 } 4737 4738 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 4739 unsigned long hugetlb_total_pages(void) 4740 { 4741 struct hstate *h; 4742 unsigned long nr_total_pages = 0; 4743 4744 for_each_hstate(h) 4745 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 4746 return nr_total_pages; 4747 } 4748 4749 static int hugetlb_acct_memory(struct hstate *h, long delta) 4750 { 4751 int ret = -ENOMEM; 4752 4753 if (!delta) 4754 return 0; 4755 4756 spin_lock_irq(&hugetlb_lock); 4757 /* 4758 * When cpuset is configured, it breaks the strict hugetlb page 4759 * reservation as the accounting is done on a global variable. Such 4760 * reservation is completely rubbish in the presence of cpuset because 4761 * the reservation is not checked against page availability for the 4762 * current cpuset. Application can still potentially OOM'ed by kernel 4763 * with lack of free htlb page in cpuset that the task is in. 4764 * Attempt to enforce strict accounting with cpuset is almost 4765 * impossible (or too ugly) because cpuset is too fluid that 4766 * task or memory node can be dynamically moved between cpusets. 4767 * 4768 * The change of semantics for shared hugetlb mapping with cpuset is 4769 * undesirable. However, in order to preserve some of the semantics, 4770 * we fall back to check against current free page availability as 4771 * a best attempt and hopefully to minimize the impact of changing 4772 * semantics that cpuset has. 4773 * 4774 * Apart from cpuset, we also have memory policy mechanism that 4775 * also determines from which node the kernel will allocate memory 4776 * in a NUMA system. So similar to cpuset, we also should consider 4777 * the memory policy of the current task. Similar to the description 4778 * above. 4779 */ 4780 if (delta > 0) { 4781 if (gather_surplus_pages(h, delta) < 0) 4782 goto out; 4783 4784 if (delta > allowed_mems_nr(h)) { 4785 return_unused_surplus_pages(h, delta); 4786 goto out; 4787 } 4788 } 4789 4790 ret = 0; 4791 if (delta < 0) 4792 return_unused_surplus_pages(h, (unsigned long) -delta); 4793 4794 out: 4795 spin_unlock_irq(&hugetlb_lock); 4796 return ret; 4797 } 4798 4799 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 4800 { 4801 struct resv_map *resv = vma_resv_map(vma); 4802 4803 /* 4804 * HPAGE_RESV_OWNER indicates a private mapping. 4805 * This new VMA should share its siblings reservation map if present. 4806 * The VMA will only ever have a valid reservation map pointer where 4807 * it is being copied for another still existing VMA. As that VMA 4808 * has a reference to the reservation map it cannot disappear until 4809 * after this open call completes. It is therefore safe to take a 4810 * new reference here without additional locking. 4811 */ 4812 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 4813 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 4814 kref_get(&resv->refs); 4815 } 4816 4817 /* 4818 * vma_lock structure for sharable mappings is vma specific. 4819 * Clear old pointer (if copied via vm_area_dup) and allocate 4820 * new structure. Before clearing, make sure vma_lock is not 4821 * for this vma. 4822 */ 4823 if (vma->vm_flags & VM_MAYSHARE) { 4824 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 4825 4826 if (vma_lock) { 4827 if (vma_lock->vma != vma) { 4828 vma->vm_private_data = NULL; 4829 hugetlb_vma_lock_alloc(vma); 4830 } else 4831 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); 4832 } else 4833 hugetlb_vma_lock_alloc(vma); 4834 } 4835 } 4836 4837 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 4838 { 4839 struct hstate *h = hstate_vma(vma); 4840 struct resv_map *resv; 4841 struct hugepage_subpool *spool = subpool_vma(vma); 4842 unsigned long reserve, start, end; 4843 long gbl_reserve; 4844 4845 hugetlb_vma_lock_free(vma); 4846 4847 resv = vma_resv_map(vma); 4848 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4849 return; 4850 4851 start = vma_hugecache_offset(h, vma, vma->vm_start); 4852 end = vma_hugecache_offset(h, vma, vma->vm_end); 4853 4854 reserve = (end - start) - region_count(resv, start, end); 4855 hugetlb_cgroup_uncharge_counter(resv, start, end); 4856 if (reserve) { 4857 /* 4858 * Decrement reserve counts. The global reserve count may be 4859 * adjusted if the subpool has a minimum size. 4860 */ 4861 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 4862 hugetlb_acct_memory(h, -gbl_reserve); 4863 } 4864 4865 kref_put(&resv->refs, resv_map_release); 4866 } 4867 4868 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 4869 { 4870 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 4871 return -EINVAL; 4872 4873 /* 4874 * PMD sharing is only possible for PUD_SIZE-aligned address ranges 4875 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this 4876 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. 4877 */ 4878 if (addr & ~PUD_MASK) { 4879 /* 4880 * hugetlb_vm_op_split is called right before we attempt to 4881 * split the VMA. We will need to unshare PMDs in the old and 4882 * new VMAs, so let's unshare before we split. 4883 */ 4884 unsigned long floor = addr & PUD_MASK; 4885 unsigned long ceil = floor + PUD_SIZE; 4886 4887 if (floor >= vma->vm_start && ceil <= vma->vm_end) 4888 hugetlb_unshare_pmds(vma, floor, ceil); 4889 } 4890 4891 return 0; 4892 } 4893 4894 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 4895 { 4896 return huge_page_size(hstate_vma(vma)); 4897 } 4898 4899 /* 4900 * We cannot handle pagefaults against hugetlb pages at all. They cause 4901 * handle_mm_fault() to try to instantiate regular-sized pages in the 4902 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 4903 * this far. 4904 */ 4905 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 4906 { 4907 BUG(); 4908 return 0; 4909 } 4910 4911 /* 4912 * When a new function is introduced to vm_operations_struct and added 4913 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 4914 * This is because under System V memory model, mappings created via 4915 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 4916 * their original vm_ops are overwritten with shm_vm_ops. 4917 */ 4918 const struct vm_operations_struct hugetlb_vm_ops = { 4919 .fault = hugetlb_vm_op_fault, 4920 .open = hugetlb_vm_op_open, 4921 .close = hugetlb_vm_op_close, 4922 .may_split = hugetlb_vm_op_split, 4923 .pagesize = hugetlb_vm_op_pagesize, 4924 }; 4925 4926 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 4927 int writable) 4928 { 4929 pte_t entry; 4930 unsigned int shift = huge_page_shift(hstate_vma(vma)); 4931 4932 if (writable) { 4933 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 4934 vma->vm_page_prot))); 4935 } else { 4936 entry = huge_pte_wrprotect(mk_huge_pte(page, 4937 vma->vm_page_prot)); 4938 } 4939 entry = pte_mkyoung(entry); 4940 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 4941 4942 return entry; 4943 } 4944 4945 static void set_huge_ptep_writable(struct vm_area_struct *vma, 4946 unsigned long address, pte_t *ptep) 4947 { 4948 pte_t entry; 4949 4950 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 4951 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 4952 update_mmu_cache(vma, address, ptep); 4953 } 4954 4955 bool is_hugetlb_entry_migration(pte_t pte) 4956 { 4957 swp_entry_t swp; 4958 4959 if (huge_pte_none(pte) || pte_present(pte)) 4960 return false; 4961 swp = pte_to_swp_entry(pte); 4962 if (is_migration_entry(swp)) 4963 return true; 4964 else 4965 return false; 4966 } 4967 4968 static bool is_hugetlb_entry_hwpoisoned(pte_t pte) 4969 { 4970 swp_entry_t swp; 4971 4972 if (huge_pte_none(pte) || pte_present(pte)) 4973 return false; 4974 swp = pte_to_swp_entry(pte); 4975 if (is_hwpoison_entry(swp)) 4976 return true; 4977 else 4978 return false; 4979 } 4980 4981 static void 4982 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 4983 struct folio *new_folio, pte_t old, unsigned long sz) 4984 { 4985 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); 4986 4987 __folio_mark_uptodate(new_folio); 4988 hugepage_add_new_anon_rmap(new_folio, vma, addr); 4989 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) 4990 newpte = huge_pte_mkuffd_wp(newpte); 4991 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); 4992 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 4993 folio_set_hugetlb_migratable(new_folio); 4994 } 4995 4996 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 4997 struct vm_area_struct *dst_vma, 4998 struct vm_area_struct *src_vma) 4999 { 5000 pte_t *src_pte, *dst_pte, entry; 5001 struct folio *pte_folio; 5002 unsigned long addr; 5003 bool cow = is_cow_mapping(src_vma->vm_flags); 5004 struct hstate *h = hstate_vma(src_vma); 5005 unsigned long sz = huge_page_size(h); 5006 unsigned long npages = pages_per_huge_page(h); 5007 struct mmu_notifier_range range; 5008 unsigned long last_addr_mask; 5009 int ret = 0; 5010 5011 if (cow) { 5012 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src, 5013 src_vma->vm_start, 5014 src_vma->vm_end); 5015 mmu_notifier_invalidate_range_start(&range); 5016 vma_assert_write_locked(src_vma); 5017 raw_write_seqcount_begin(&src->write_protect_seq); 5018 } else { 5019 /* 5020 * For shared mappings the vma lock must be held before 5021 * calling hugetlb_walk() in the src vma. Otherwise, the 5022 * returned ptep could go away if part of a shared pmd and 5023 * another thread calls huge_pmd_unshare. 5024 */ 5025 hugetlb_vma_lock_read(src_vma); 5026 } 5027 5028 last_addr_mask = hugetlb_mask_last_page(h); 5029 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { 5030 spinlock_t *src_ptl, *dst_ptl; 5031 src_pte = hugetlb_walk(src_vma, addr, sz); 5032 if (!src_pte) { 5033 addr |= last_addr_mask; 5034 continue; 5035 } 5036 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); 5037 if (!dst_pte) { 5038 ret = -ENOMEM; 5039 break; 5040 } 5041 5042 /* 5043 * If the pagetables are shared don't copy or take references. 5044 * 5045 * dst_pte == src_pte is the common case of src/dest sharing. 5046 * However, src could have 'unshared' and dst shares with 5047 * another vma. So page_count of ptep page is checked instead 5048 * to reliably determine whether pte is shared. 5049 */ 5050 if (page_count(virt_to_page(dst_pte)) > 1) { 5051 addr |= last_addr_mask; 5052 continue; 5053 } 5054 5055 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5056 src_ptl = huge_pte_lockptr(h, src, src_pte); 5057 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5058 entry = huge_ptep_get(src_pte); 5059 again: 5060 if (huge_pte_none(entry)) { 5061 /* 5062 * Skip if src entry none. 5063 */ 5064 ; 5065 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5066 if (!userfaultfd_wp(dst_vma)) 5067 entry = huge_pte_clear_uffd_wp(entry); 5068 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5069 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5070 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5071 bool uffd_wp = pte_swp_uffd_wp(entry); 5072 5073 if (!is_readable_migration_entry(swp_entry) && cow) { 5074 /* 5075 * COW mappings require pages in both 5076 * parent and child to be set to read. 5077 */ 5078 swp_entry = make_readable_migration_entry( 5079 swp_offset(swp_entry)); 5080 entry = swp_entry_to_pte(swp_entry); 5081 if (userfaultfd_wp(src_vma) && uffd_wp) 5082 entry = pte_swp_mkuffd_wp(entry); 5083 set_huge_pte_at(src, addr, src_pte, entry, sz); 5084 } 5085 if (!userfaultfd_wp(dst_vma)) 5086 entry = huge_pte_clear_uffd_wp(entry); 5087 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5088 } else if (unlikely(is_pte_marker(entry))) { 5089 pte_marker marker = copy_pte_marker( 5090 pte_to_swp_entry(entry), dst_vma); 5091 5092 if (marker) 5093 set_huge_pte_at(dst, addr, dst_pte, 5094 make_pte_marker(marker), sz); 5095 } else { 5096 entry = huge_ptep_get(src_pte); 5097 pte_folio = page_folio(pte_page(entry)); 5098 folio_get(pte_folio); 5099 5100 /* 5101 * Failing to duplicate the anon rmap is a rare case 5102 * where we see pinned hugetlb pages while they're 5103 * prone to COW. We need to do the COW earlier during 5104 * fork. 5105 * 5106 * When pre-allocating the page or copying data, we 5107 * need to be without the pgtable locks since we could 5108 * sleep during the process. 5109 */ 5110 if (!folio_test_anon(pte_folio)) { 5111 page_dup_file_rmap(&pte_folio->page, true); 5112 } else if (page_try_dup_anon_rmap(&pte_folio->page, 5113 true, src_vma)) { 5114 pte_t src_pte_old = entry; 5115 struct folio *new_folio; 5116 5117 spin_unlock(src_ptl); 5118 spin_unlock(dst_ptl); 5119 /* Do not use reserve as it's private owned */ 5120 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); 5121 if (IS_ERR(new_folio)) { 5122 folio_put(pte_folio); 5123 ret = PTR_ERR(new_folio); 5124 break; 5125 } 5126 ret = copy_user_large_folio(new_folio, 5127 pte_folio, 5128 addr, dst_vma); 5129 folio_put(pte_folio); 5130 if (ret) { 5131 folio_put(new_folio); 5132 break; 5133 } 5134 5135 /* Install the new hugetlb folio if src pte stable */ 5136 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5137 src_ptl = huge_pte_lockptr(h, src, src_pte); 5138 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5139 entry = huge_ptep_get(src_pte); 5140 if (!pte_same(src_pte_old, entry)) { 5141 restore_reserve_on_error(h, dst_vma, addr, 5142 new_folio); 5143 folio_put(new_folio); 5144 /* huge_ptep of dst_pte won't change as in child */ 5145 goto again; 5146 } 5147 hugetlb_install_folio(dst_vma, dst_pte, addr, 5148 new_folio, src_pte_old, sz); 5149 spin_unlock(src_ptl); 5150 spin_unlock(dst_ptl); 5151 continue; 5152 } 5153 5154 if (cow) { 5155 /* 5156 * No need to notify as we are downgrading page 5157 * table protection not changing it to point 5158 * to a new page. 5159 * 5160 * See Documentation/mm/mmu_notifier.rst 5161 */ 5162 huge_ptep_set_wrprotect(src, addr, src_pte); 5163 entry = huge_pte_wrprotect(entry); 5164 } 5165 5166 if (!userfaultfd_wp(dst_vma)) 5167 entry = huge_pte_clear_uffd_wp(entry); 5168 5169 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5170 hugetlb_count_add(npages, dst); 5171 } 5172 spin_unlock(src_ptl); 5173 spin_unlock(dst_ptl); 5174 } 5175 5176 if (cow) { 5177 raw_write_seqcount_end(&src->write_protect_seq); 5178 mmu_notifier_invalidate_range_end(&range); 5179 } else { 5180 hugetlb_vma_unlock_read(src_vma); 5181 } 5182 5183 return ret; 5184 } 5185 5186 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5187 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, 5188 unsigned long sz) 5189 { 5190 struct hstate *h = hstate_vma(vma); 5191 struct mm_struct *mm = vma->vm_mm; 5192 spinlock_t *src_ptl, *dst_ptl; 5193 pte_t pte; 5194 5195 dst_ptl = huge_pte_lock(h, mm, dst_pte); 5196 src_ptl = huge_pte_lockptr(h, mm, src_pte); 5197 5198 /* 5199 * We don't have to worry about the ordering of src and dst ptlocks 5200 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. 5201 */ 5202 if (src_ptl != dst_ptl) 5203 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5204 5205 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 5206 set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); 5207 5208 if (src_ptl != dst_ptl) 5209 spin_unlock(src_ptl); 5210 spin_unlock(dst_ptl); 5211 } 5212 5213 int move_hugetlb_page_tables(struct vm_area_struct *vma, 5214 struct vm_area_struct *new_vma, 5215 unsigned long old_addr, unsigned long new_addr, 5216 unsigned long len) 5217 { 5218 struct hstate *h = hstate_vma(vma); 5219 struct address_space *mapping = vma->vm_file->f_mapping; 5220 unsigned long sz = huge_page_size(h); 5221 struct mm_struct *mm = vma->vm_mm; 5222 unsigned long old_end = old_addr + len; 5223 unsigned long last_addr_mask; 5224 pte_t *src_pte, *dst_pte; 5225 struct mmu_notifier_range range; 5226 bool shared_pmd = false; 5227 5228 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, 5229 old_end); 5230 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5231 /* 5232 * In case of shared PMDs, we should cover the maximum possible 5233 * range. 5234 */ 5235 flush_cache_range(vma, range.start, range.end); 5236 5237 mmu_notifier_invalidate_range_start(&range); 5238 last_addr_mask = hugetlb_mask_last_page(h); 5239 /* Prevent race with file truncation */ 5240 hugetlb_vma_lock_write(vma); 5241 i_mmap_lock_write(mapping); 5242 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 5243 src_pte = hugetlb_walk(vma, old_addr, sz); 5244 if (!src_pte) { 5245 old_addr |= last_addr_mask; 5246 new_addr |= last_addr_mask; 5247 continue; 5248 } 5249 if (huge_pte_none(huge_ptep_get(src_pte))) 5250 continue; 5251 5252 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { 5253 shared_pmd = true; 5254 old_addr |= last_addr_mask; 5255 new_addr |= last_addr_mask; 5256 continue; 5257 } 5258 5259 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 5260 if (!dst_pte) 5261 break; 5262 5263 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz); 5264 } 5265 5266 if (shared_pmd) 5267 flush_hugetlb_tlb_range(vma, range.start, range.end); 5268 else 5269 flush_hugetlb_tlb_range(vma, old_end - len, old_end); 5270 mmu_notifier_invalidate_range_end(&range); 5271 i_mmap_unlock_write(mapping); 5272 hugetlb_vma_unlock_write(vma); 5273 5274 return len + old_addr - old_end; 5275 } 5276 5277 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 5278 unsigned long start, unsigned long end, 5279 struct page *ref_page, zap_flags_t zap_flags) 5280 { 5281 struct mm_struct *mm = vma->vm_mm; 5282 unsigned long address; 5283 pte_t *ptep; 5284 pte_t pte; 5285 spinlock_t *ptl; 5286 struct page *page; 5287 struct hstate *h = hstate_vma(vma); 5288 unsigned long sz = huge_page_size(h); 5289 unsigned long last_addr_mask; 5290 bool force_flush = false; 5291 5292 WARN_ON(!is_vm_hugetlb_page(vma)); 5293 BUG_ON(start & ~huge_page_mask(h)); 5294 BUG_ON(end & ~huge_page_mask(h)); 5295 5296 /* 5297 * This is a hugetlb vma, all the pte entries should point 5298 * to huge page. 5299 */ 5300 tlb_change_page_size(tlb, sz); 5301 tlb_start_vma(tlb, vma); 5302 5303 last_addr_mask = hugetlb_mask_last_page(h); 5304 address = start; 5305 for (; address < end; address += sz) { 5306 ptep = hugetlb_walk(vma, address, sz); 5307 if (!ptep) { 5308 address |= last_addr_mask; 5309 continue; 5310 } 5311 5312 ptl = huge_pte_lock(h, mm, ptep); 5313 if (huge_pmd_unshare(mm, vma, address, ptep)) { 5314 spin_unlock(ptl); 5315 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 5316 force_flush = true; 5317 address |= last_addr_mask; 5318 continue; 5319 } 5320 5321 pte = huge_ptep_get(ptep); 5322 if (huge_pte_none(pte)) { 5323 spin_unlock(ptl); 5324 continue; 5325 } 5326 5327 /* 5328 * Migrating hugepage or HWPoisoned hugepage is already 5329 * unmapped and its refcount is dropped, so just clear pte here. 5330 */ 5331 if (unlikely(!pte_present(pte))) { 5332 /* 5333 * If the pte was wr-protected by uffd-wp in any of the 5334 * swap forms, meanwhile the caller does not want to 5335 * drop the uffd-wp bit in this zap, then replace the 5336 * pte with a marker. 5337 */ 5338 if (pte_swp_uffd_wp_any(pte) && 5339 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5340 set_huge_pte_at(mm, address, ptep, 5341 make_pte_marker(PTE_MARKER_UFFD_WP), 5342 sz); 5343 else 5344 huge_pte_clear(mm, address, ptep, sz); 5345 spin_unlock(ptl); 5346 continue; 5347 } 5348 5349 page = pte_page(pte); 5350 /* 5351 * If a reference page is supplied, it is because a specific 5352 * page is being unmapped, not a range. Ensure the page we 5353 * are about to unmap is the actual page of interest. 5354 */ 5355 if (ref_page) { 5356 if (page != ref_page) { 5357 spin_unlock(ptl); 5358 continue; 5359 } 5360 /* 5361 * Mark the VMA as having unmapped its page so that 5362 * future faults in this VMA will fail rather than 5363 * looking like data was lost 5364 */ 5365 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5366 } 5367 5368 pte = huge_ptep_get_and_clear(mm, address, ptep); 5369 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5370 if (huge_pte_dirty(pte)) 5371 set_page_dirty(page); 5372 /* Leave a uffd-wp pte marker if needed */ 5373 if (huge_pte_uffd_wp(pte) && 5374 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5375 set_huge_pte_at(mm, address, ptep, 5376 make_pte_marker(PTE_MARKER_UFFD_WP), 5377 sz); 5378 hugetlb_count_sub(pages_per_huge_page(h), mm); 5379 page_remove_rmap(page, vma, true); 5380 5381 spin_unlock(ptl); 5382 tlb_remove_page_size(tlb, page, huge_page_size(h)); 5383 /* 5384 * Bail out after unmapping reference page if supplied 5385 */ 5386 if (ref_page) 5387 break; 5388 } 5389 tlb_end_vma(tlb, vma); 5390 5391 /* 5392 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 5393 * could defer the flush until now, since by holding i_mmap_rwsem we 5394 * guaranteed that the last refernece would not be dropped. But we must 5395 * do the flushing before we return, as otherwise i_mmap_rwsem will be 5396 * dropped and the last reference to the shared PMDs page might be 5397 * dropped as well. 5398 * 5399 * In theory we could defer the freeing of the PMD pages as well, but 5400 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 5401 * detect sharing, so we cannot defer the release of the page either. 5402 * Instead, do flush now. 5403 */ 5404 if (force_flush) 5405 tlb_flush_mmu_tlbonly(tlb); 5406 } 5407 5408 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 5409 struct vm_area_struct *vma, unsigned long start, 5410 unsigned long end, struct page *ref_page, 5411 zap_flags_t zap_flags) 5412 { 5413 hugetlb_vma_lock_write(vma); 5414 i_mmap_lock_write(vma->vm_file->f_mapping); 5415 5416 /* mmu notification performed in caller */ 5417 __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags); 5418 5419 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ 5420 /* 5421 * Unlock and free the vma lock before releasing i_mmap_rwsem. 5422 * When the vma_lock is freed, this makes the vma ineligible 5423 * for pmd sharing. And, i_mmap_rwsem is required to set up 5424 * pmd sharing. This is important as page tables for this 5425 * unmapped range will be asynchrously deleted. If the page 5426 * tables are shared, there will be issues when accessed by 5427 * someone else. 5428 */ 5429 __hugetlb_vma_unlock_write_free(vma); 5430 i_mmap_unlock_write(vma->vm_file->f_mapping); 5431 } else { 5432 i_mmap_unlock_write(vma->vm_file->f_mapping); 5433 hugetlb_vma_unlock_write(vma); 5434 } 5435 } 5436 5437 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 5438 unsigned long end, struct page *ref_page, 5439 zap_flags_t zap_flags) 5440 { 5441 struct mmu_notifier_range range; 5442 struct mmu_gather tlb; 5443 5444 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 5445 start, end); 5446 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5447 mmu_notifier_invalidate_range_start(&range); 5448 tlb_gather_mmu(&tlb, vma->vm_mm); 5449 5450 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); 5451 5452 mmu_notifier_invalidate_range_end(&range); 5453 tlb_finish_mmu(&tlb); 5454 } 5455 5456 /* 5457 * This is called when the original mapper is failing to COW a MAP_PRIVATE 5458 * mapping it owns the reserve page for. The intention is to unmap the page 5459 * from other VMAs and let the children be SIGKILLed if they are faulting the 5460 * same region. 5461 */ 5462 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 5463 struct page *page, unsigned long address) 5464 { 5465 struct hstate *h = hstate_vma(vma); 5466 struct vm_area_struct *iter_vma; 5467 struct address_space *mapping; 5468 pgoff_t pgoff; 5469 5470 /* 5471 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 5472 * from page cache lookup which is in HPAGE_SIZE units. 5473 */ 5474 address = address & huge_page_mask(h); 5475 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 5476 vma->vm_pgoff; 5477 mapping = vma->vm_file->f_mapping; 5478 5479 /* 5480 * Take the mapping lock for the duration of the table walk. As 5481 * this mapping should be shared between all the VMAs, 5482 * __unmap_hugepage_range() is called as the lock is already held 5483 */ 5484 i_mmap_lock_write(mapping); 5485 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 5486 /* Do not unmap the current VMA */ 5487 if (iter_vma == vma) 5488 continue; 5489 5490 /* 5491 * Shared VMAs have their own reserves and do not affect 5492 * MAP_PRIVATE accounting but it is possible that a shared 5493 * VMA is using the same page so check and skip such VMAs. 5494 */ 5495 if (iter_vma->vm_flags & VM_MAYSHARE) 5496 continue; 5497 5498 /* 5499 * Unmap the page from other VMAs without their own reserves. 5500 * They get marked to be SIGKILLed if they fault in these 5501 * areas. This is because a future no-page fault on this VMA 5502 * could insert a zeroed page instead of the data existing 5503 * from the time of fork. This would look like data corruption 5504 */ 5505 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 5506 unmap_hugepage_range(iter_vma, address, 5507 address + huge_page_size(h), page, 0); 5508 } 5509 i_mmap_unlock_write(mapping); 5510 } 5511 5512 /* 5513 * hugetlb_wp() should be called with page lock of the original hugepage held. 5514 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 5515 * cannot race with other handlers or page migration. 5516 * Keep the pte_same checks anyway to make transition from the mutex easier. 5517 */ 5518 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, 5519 unsigned long address, pte_t *ptep, unsigned int flags, 5520 struct folio *pagecache_folio, spinlock_t *ptl) 5521 { 5522 const bool unshare = flags & FAULT_FLAG_UNSHARE; 5523 pte_t pte = huge_ptep_get(ptep); 5524 struct hstate *h = hstate_vma(vma); 5525 struct folio *old_folio; 5526 struct folio *new_folio; 5527 int outside_reserve = 0; 5528 vm_fault_t ret = 0; 5529 unsigned long haddr = address & huge_page_mask(h); 5530 struct mmu_notifier_range range; 5531 5532 /* 5533 * Never handle CoW for uffd-wp protected pages. It should be only 5534 * handled when the uffd-wp protection is removed. 5535 * 5536 * Note that only the CoW optimization path (in hugetlb_no_page()) 5537 * can trigger this, because hugetlb_fault() will always resolve 5538 * uffd-wp bit first. 5539 */ 5540 if (!unshare && huge_pte_uffd_wp(pte)) 5541 return 0; 5542 5543 /* 5544 * hugetlb does not support FOLL_FORCE-style write faults that keep the 5545 * PTE mapped R/O such as maybe_mkwrite() would do. 5546 */ 5547 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) 5548 return VM_FAULT_SIGSEGV; 5549 5550 /* Let's take out MAP_SHARED mappings first. */ 5551 if (vma->vm_flags & VM_MAYSHARE) { 5552 set_huge_ptep_writable(vma, haddr, ptep); 5553 return 0; 5554 } 5555 5556 old_folio = page_folio(pte_page(pte)); 5557 5558 delayacct_wpcopy_start(); 5559 5560 retry_avoidcopy: 5561 /* 5562 * If no-one else is actually using this page, we're the exclusive 5563 * owner and can reuse this page. 5564 */ 5565 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { 5566 if (!PageAnonExclusive(&old_folio->page)) 5567 page_move_anon_rmap(&old_folio->page, vma); 5568 if (likely(!unshare)) 5569 set_huge_ptep_writable(vma, haddr, ptep); 5570 5571 delayacct_wpcopy_end(); 5572 return 0; 5573 } 5574 VM_BUG_ON_PAGE(folio_test_anon(old_folio) && 5575 PageAnonExclusive(&old_folio->page), &old_folio->page); 5576 5577 /* 5578 * If the process that created a MAP_PRIVATE mapping is about to 5579 * perform a COW due to a shared page count, attempt to satisfy 5580 * the allocation without using the existing reserves. The pagecache 5581 * page is used to determine if the reserve at this address was 5582 * consumed or not. If reserves were used, a partial faulted mapping 5583 * at the time of fork() could consume its reserves on COW instead 5584 * of the full address range. 5585 */ 5586 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 5587 old_folio != pagecache_folio) 5588 outside_reserve = 1; 5589 5590 folio_get(old_folio); 5591 5592 /* 5593 * Drop page table lock as buddy allocator may be called. It will 5594 * be acquired again before returning to the caller, as expected. 5595 */ 5596 spin_unlock(ptl); 5597 new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve); 5598 5599 if (IS_ERR(new_folio)) { 5600 /* 5601 * If a process owning a MAP_PRIVATE mapping fails to COW, 5602 * it is due to references held by a child and an insufficient 5603 * huge page pool. To guarantee the original mappers 5604 * reliability, unmap the page from child processes. The child 5605 * may get SIGKILLed if it later faults. 5606 */ 5607 if (outside_reserve) { 5608 struct address_space *mapping = vma->vm_file->f_mapping; 5609 pgoff_t idx; 5610 u32 hash; 5611 5612 folio_put(old_folio); 5613 /* 5614 * Drop hugetlb_fault_mutex and vma_lock before 5615 * unmapping. unmapping needs to hold vma_lock 5616 * in write mode. Dropping vma_lock in read mode 5617 * here is OK as COW mappings do not interact with 5618 * PMD sharing. 5619 * 5620 * Reacquire both after unmap operation. 5621 */ 5622 idx = vma_hugecache_offset(h, vma, haddr); 5623 hash = hugetlb_fault_mutex_hash(mapping, idx); 5624 hugetlb_vma_unlock_read(vma); 5625 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5626 5627 unmap_ref_private(mm, vma, &old_folio->page, haddr); 5628 5629 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5630 hugetlb_vma_lock_read(vma); 5631 spin_lock(ptl); 5632 ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); 5633 if (likely(ptep && 5634 pte_same(huge_ptep_get(ptep), pte))) 5635 goto retry_avoidcopy; 5636 /* 5637 * race occurs while re-acquiring page table 5638 * lock, and our job is done. 5639 */ 5640 delayacct_wpcopy_end(); 5641 return 0; 5642 } 5643 5644 ret = vmf_error(PTR_ERR(new_folio)); 5645 goto out_release_old; 5646 } 5647 5648 /* 5649 * When the original hugepage is shared one, it does not have 5650 * anon_vma prepared. 5651 */ 5652 if (unlikely(anon_vma_prepare(vma))) { 5653 ret = VM_FAULT_OOM; 5654 goto out_release_all; 5655 } 5656 5657 if (copy_user_large_folio(new_folio, old_folio, address, vma)) { 5658 ret = VM_FAULT_HWPOISON_LARGE; 5659 goto out_release_all; 5660 } 5661 __folio_mark_uptodate(new_folio); 5662 5663 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr, 5664 haddr + huge_page_size(h)); 5665 mmu_notifier_invalidate_range_start(&range); 5666 5667 /* 5668 * Retake the page table lock to check for racing updates 5669 * before the page tables are altered 5670 */ 5671 spin_lock(ptl); 5672 ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); 5673 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 5674 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare); 5675 5676 /* Break COW or unshare */ 5677 huge_ptep_clear_flush(vma, haddr, ptep); 5678 page_remove_rmap(&old_folio->page, vma, true); 5679 hugepage_add_new_anon_rmap(new_folio, vma, haddr); 5680 if (huge_pte_uffd_wp(pte)) 5681 newpte = huge_pte_mkuffd_wp(newpte); 5682 set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h)); 5683 folio_set_hugetlb_migratable(new_folio); 5684 /* Make the old page be freed below */ 5685 new_folio = old_folio; 5686 } 5687 spin_unlock(ptl); 5688 mmu_notifier_invalidate_range_end(&range); 5689 out_release_all: 5690 /* 5691 * No restore in case of successful pagetable update (Break COW or 5692 * unshare) 5693 */ 5694 if (new_folio != old_folio) 5695 restore_reserve_on_error(h, vma, haddr, new_folio); 5696 folio_put(new_folio); 5697 out_release_old: 5698 folio_put(old_folio); 5699 5700 spin_lock(ptl); /* Caller expects lock to be held */ 5701 5702 delayacct_wpcopy_end(); 5703 return ret; 5704 } 5705 5706 /* 5707 * Return whether there is a pagecache page to back given address within VMA. 5708 */ 5709 static bool hugetlbfs_pagecache_present(struct hstate *h, 5710 struct vm_area_struct *vma, unsigned long address) 5711 { 5712 struct address_space *mapping = vma->vm_file->f_mapping; 5713 pgoff_t idx = vma_hugecache_offset(h, vma, address); 5714 struct folio *folio; 5715 5716 folio = filemap_get_folio(mapping, idx); 5717 if (IS_ERR(folio)) 5718 return false; 5719 folio_put(folio); 5720 return true; 5721 } 5722 5723 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 5724 pgoff_t idx) 5725 { 5726 struct inode *inode = mapping->host; 5727 struct hstate *h = hstate_inode(inode); 5728 int err; 5729 5730 __folio_set_locked(folio); 5731 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); 5732 5733 if (unlikely(err)) { 5734 __folio_clear_locked(folio); 5735 return err; 5736 } 5737 folio_clear_hugetlb_restore_reserve(folio); 5738 5739 /* 5740 * mark folio dirty so that it will not be removed from cache/file 5741 * by non-hugetlbfs specific code paths. 5742 */ 5743 folio_mark_dirty(folio); 5744 5745 spin_lock(&inode->i_lock); 5746 inode->i_blocks += blocks_per_huge_page(h); 5747 spin_unlock(&inode->i_lock); 5748 return 0; 5749 } 5750 5751 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, 5752 struct address_space *mapping, 5753 pgoff_t idx, 5754 unsigned int flags, 5755 unsigned long haddr, 5756 unsigned long addr, 5757 unsigned long reason) 5758 { 5759 u32 hash; 5760 struct vm_fault vmf = { 5761 .vma = vma, 5762 .address = haddr, 5763 .real_address = addr, 5764 .flags = flags, 5765 5766 /* 5767 * Hard to debug if it ends up being 5768 * used by a callee that assumes 5769 * something about the other 5770 * uninitialized fields... same as in 5771 * memory.c 5772 */ 5773 }; 5774 5775 /* 5776 * vma_lock and hugetlb_fault_mutex must be dropped before handling 5777 * userfault. Also mmap_lock could be dropped due to handling 5778 * userfault, any vma operation should be careful from here. 5779 */ 5780 hugetlb_vma_unlock_read(vma); 5781 hash = hugetlb_fault_mutex_hash(mapping, idx); 5782 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5783 return handle_userfault(&vmf, reason); 5784 } 5785 5786 /* 5787 * Recheck pte with pgtable lock. Returns true if pte didn't change, or 5788 * false if pte changed or is changing. 5789 */ 5790 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, 5791 pte_t *ptep, pte_t old_pte) 5792 { 5793 spinlock_t *ptl; 5794 bool same; 5795 5796 ptl = huge_pte_lock(h, mm, ptep); 5797 same = pte_same(huge_ptep_get(ptep), old_pte); 5798 spin_unlock(ptl); 5799 5800 return same; 5801 } 5802 5803 static vm_fault_t hugetlb_no_page(struct mm_struct *mm, 5804 struct vm_area_struct *vma, 5805 struct address_space *mapping, pgoff_t idx, 5806 unsigned long address, pte_t *ptep, 5807 pte_t old_pte, unsigned int flags) 5808 { 5809 struct hstate *h = hstate_vma(vma); 5810 vm_fault_t ret = VM_FAULT_SIGBUS; 5811 int anon_rmap = 0; 5812 unsigned long size; 5813 struct folio *folio; 5814 pte_t new_pte; 5815 spinlock_t *ptl; 5816 unsigned long haddr = address & huge_page_mask(h); 5817 bool new_folio, new_pagecache_folio = false; 5818 u32 hash = hugetlb_fault_mutex_hash(mapping, idx); 5819 5820 /* 5821 * Currently, we are forced to kill the process in the event the 5822 * original mapper has unmapped pages from the child due to a failed 5823 * COW/unsharing. Warn that such a situation has occurred as it may not 5824 * be obvious. 5825 */ 5826 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 5827 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 5828 current->pid); 5829 goto out; 5830 } 5831 5832 /* 5833 * Use page lock to guard against racing truncation 5834 * before we get page_table_lock. 5835 */ 5836 new_folio = false; 5837 folio = filemap_lock_folio(mapping, idx); 5838 if (IS_ERR(folio)) { 5839 size = i_size_read(mapping->host) >> huge_page_shift(h); 5840 if (idx >= size) 5841 goto out; 5842 /* Check for page in userfault range */ 5843 if (userfaultfd_missing(vma)) { 5844 /* 5845 * Since hugetlb_no_page() was examining pte 5846 * without pgtable lock, we need to re-test under 5847 * lock because the pte may not be stable and could 5848 * have changed from under us. Try to detect 5849 * either changed or during-changing ptes and retry 5850 * properly when needed. 5851 * 5852 * Note that userfaultfd is actually fine with 5853 * false positives (e.g. caused by pte changed), 5854 * but not wrong logical events (e.g. caused by 5855 * reading a pte during changing). The latter can 5856 * confuse the userspace, so the strictness is very 5857 * much preferred. E.g., MISSING event should 5858 * never happen on the page after UFFDIO_COPY has 5859 * correctly installed the page and returned. 5860 */ 5861 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { 5862 ret = 0; 5863 goto out; 5864 } 5865 5866 return hugetlb_handle_userfault(vma, mapping, idx, flags, 5867 haddr, address, 5868 VM_UFFD_MISSING); 5869 } 5870 5871 folio = alloc_hugetlb_folio(vma, haddr, 0); 5872 if (IS_ERR(folio)) { 5873 /* 5874 * Returning error will result in faulting task being 5875 * sent SIGBUS. The hugetlb fault mutex prevents two 5876 * tasks from racing to fault in the same page which 5877 * could result in false unable to allocate errors. 5878 * Page migration does not take the fault mutex, but 5879 * does a clear then write of pte's under page table 5880 * lock. Page fault code could race with migration, 5881 * notice the clear pte and try to allocate a page 5882 * here. Before returning error, get ptl and make 5883 * sure there really is no pte entry. 5884 */ 5885 if (hugetlb_pte_stable(h, mm, ptep, old_pte)) 5886 ret = vmf_error(PTR_ERR(folio)); 5887 else 5888 ret = 0; 5889 goto out; 5890 } 5891 clear_huge_page(&folio->page, address, pages_per_huge_page(h)); 5892 __folio_mark_uptodate(folio); 5893 new_folio = true; 5894 5895 if (vma->vm_flags & VM_MAYSHARE) { 5896 int err = hugetlb_add_to_page_cache(folio, mapping, idx); 5897 if (err) { 5898 /* 5899 * err can't be -EEXIST which implies someone 5900 * else consumed the reservation since hugetlb 5901 * fault mutex is held when add a hugetlb page 5902 * to the page cache. So it's safe to call 5903 * restore_reserve_on_error() here. 5904 */ 5905 restore_reserve_on_error(h, vma, haddr, folio); 5906 folio_put(folio); 5907 goto out; 5908 } 5909 new_pagecache_folio = true; 5910 } else { 5911 folio_lock(folio); 5912 if (unlikely(anon_vma_prepare(vma))) { 5913 ret = VM_FAULT_OOM; 5914 goto backout_unlocked; 5915 } 5916 anon_rmap = 1; 5917 } 5918 } else { 5919 /* 5920 * If memory error occurs between mmap() and fault, some process 5921 * don't have hwpoisoned swap entry for errored virtual address. 5922 * So we need to block hugepage fault by PG_hwpoison bit check. 5923 */ 5924 if (unlikely(folio_test_hwpoison(folio))) { 5925 ret = VM_FAULT_HWPOISON_LARGE | 5926 VM_FAULT_SET_HINDEX(hstate_index(h)); 5927 goto backout_unlocked; 5928 } 5929 5930 /* Check for page in userfault range. */ 5931 if (userfaultfd_minor(vma)) { 5932 folio_unlock(folio); 5933 folio_put(folio); 5934 /* See comment in userfaultfd_missing() block above */ 5935 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { 5936 ret = 0; 5937 goto out; 5938 } 5939 return hugetlb_handle_userfault(vma, mapping, idx, flags, 5940 haddr, address, 5941 VM_UFFD_MINOR); 5942 } 5943 } 5944 5945 /* 5946 * If we are going to COW a private mapping later, we examine the 5947 * pending reservations for this page now. This will ensure that 5948 * any allocations necessary to record that reservation occur outside 5949 * the spinlock. 5950 */ 5951 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5952 if (vma_needs_reservation(h, vma, haddr) < 0) { 5953 ret = VM_FAULT_OOM; 5954 goto backout_unlocked; 5955 } 5956 /* Just decrements count, does not deallocate */ 5957 vma_end_reservation(h, vma, haddr); 5958 } 5959 5960 ptl = huge_pte_lock(h, mm, ptep); 5961 ret = 0; 5962 /* If pte changed from under us, retry */ 5963 if (!pte_same(huge_ptep_get(ptep), old_pte)) 5964 goto backout; 5965 5966 if (anon_rmap) 5967 hugepage_add_new_anon_rmap(folio, vma, haddr); 5968 else 5969 page_dup_file_rmap(&folio->page, true); 5970 new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) 5971 && (vma->vm_flags & VM_SHARED))); 5972 /* 5973 * If this pte was previously wr-protected, keep it wr-protected even 5974 * if populated. 5975 */ 5976 if (unlikely(pte_marker_uffd_wp(old_pte))) 5977 new_pte = huge_pte_mkuffd_wp(new_pte); 5978 set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h)); 5979 5980 hugetlb_count_add(pages_per_huge_page(h), mm); 5981 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5982 /* Optimization, do the COW without a second fault */ 5983 ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl); 5984 } 5985 5986 spin_unlock(ptl); 5987 5988 /* 5989 * Only set hugetlb_migratable in newly allocated pages. Existing pages 5990 * found in the pagecache may not have hugetlb_migratable if they have 5991 * been isolated for migration. 5992 */ 5993 if (new_folio) 5994 folio_set_hugetlb_migratable(folio); 5995 5996 folio_unlock(folio); 5997 out: 5998 hugetlb_vma_unlock_read(vma); 5999 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6000 return ret; 6001 6002 backout: 6003 spin_unlock(ptl); 6004 backout_unlocked: 6005 if (new_folio && !new_pagecache_folio) 6006 restore_reserve_on_error(h, vma, haddr, folio); 6007 6008 folio_unlock(folio); 6009 folio_put(folio); 6010 goto out; 6011 } 6012 6013 #ifdef CONFIG_SMP 6014 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6015 { 6016 unsigned long key[2]; 6017 u32 hash; 6018 6019 key[0] = (unsigned long) mapping; 6020 key[1] = idx; 6021 6022 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 6023 6024 return hash & (num_fault_mutexes - 1); 6025 } 6026 #else 6027 /* 6028 * For uniprocessor systems we always use a single mutex, so just 6029 * return 0 and avoid the hashing overhead. 6030 */ 6031 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6032 { 6033 return 0; 6034 } 6035 #endif 6036 6037 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 6038 unsigned long address, unsigned int flags) 6039 { 6040 pte_t *ptep, entry; 6041 spinlock_t *ptl; 6042 vm_fault_t ret; 6043 u32 hash; 6044 pgoff_t idx; 6045 struct folio *folio = NULL; 6046 struct folio *pagecache_folio = NULL; 6047 struct hstate *h = hstate_vma(vma); 6048 struct address_space *mapping; 6049 int need_wait_lock = 0; 6050 unsigned long haddr = address & huge_page_mask(h); 6051 6052 /* TODO: Handle faults under the VMA lock */ 6053 if (flags & FAULT_FLAG_VMA_LOCK) { 6054 vma_end_read(vma); 6055 return VM_FAULT_RETRY; 6056 } 6057 6058 /* 6059 * Serialize hugepage allocation and instantiation, so that we don't 6060 * get spurious allocation failures if two CPUs race to instantiate 6061 * the same page in the page cache. 6062 */ 6063 mapping = vma->vm_file->f_mapping; 6064 idx = vma_hugecache_offset(h, vma, haddr); 6065 hash = hugetlb_fault_mutex_hash(mapping, idx); 6066 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6067 6068 /* 6069 * Acquire vma lock before calling huge_pte_alloc and hold 6070 * until finished with ptep. This prevents huge_pmd_unshare from 6071 * being called elsewhere and making the ptep no longer valid. 6072 */ 6073 hugetlb_vma_lock_read(vma); 6074 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); 6075 if (!ptep) { 6076 hugetlb_vma_unlock_read(vma); 6077 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6078 return VM_FAULT_OOM; 6079 } 6080 6081 entry = huge_ptep_get(ptep); 6082 if (huge_pte_none_mostly(entry)) { 6083 if (is_pte_marker(entry)) { 6084 pte_marker marker = 6085 pte_marker_get(pte_to_swp_entry(entry)); 6086 6087 if (marker & PTE_MARKER_POISONED) { 6088 ret = VM_FAULT_HWPOISON_LARGE; 6089 goto out_mutex; 6090 } 6091 } 6092 6093 /* 6094 * Other PTE markers should be handled the same way as none PTE. 6095 * 6096 * hugetlb_no_page will drop vma lock and hugetlb fault 6097 * mutex internally, which make us return immediately. 6098 */ 6099 return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, 6100 entry, flags); 6101 } 6102 6103 ret = 0; 6104 6105 /* 6106 * entry could be a migration/hwpoison entry at this point, so this 6107 * check prevents the kernel from going below assuming that we have 6108 * an active hugepage in pagecache. This goto expects the 2nd page 6109 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will 6110 * properly handle it. 6111 */ 6112 if (!pte_present(entry)) { 6113 if (unlikely(is_hugetlb_entry_migration(entry))) { 6114 /* 6115 * Release the hugetlb fault lock now, but retain 6116 * the vma lock, because it is needed to guard the 6117 * huge_pte_lockptr() later in 6118 * migration_entry_wait_huge(). The vma lock will 6119 * be released there. 6120 */ 6121 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6122 migration_entry_wait_huge(vma, ptep); 6123 return 0; 6124 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 6125 ret = VM_FAULT_HWPOISON_LARGE | 6126 VM_FAULT_SET_HINDEX(hstate_index(h)); 6127 goto out_mutex; 6128 } 6129 6130 /* 6131 * If we are going to COW/unshare the mapping later, we examine the 6132 * pending reservations for this page now. This will ensure that any 6133 * allocations necessary to record that reservation occur outside the 6134 * spinlock. Also lookup the pagecache page now as it is used to 6135 * determine if a reservation has been consumed. 6136 */ 6137 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6138 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) { 6139 if (vma_needs_reservation(h, vma, haddr) < 0) { 6140 ret = VM_FAULT_OOM; 6141 goto out_mutex; 6142 } 6143 /* Just decrements count, does not deallocate */ 6144 vma_end_reservation(h, vma, haddr); 6145 6146 pagecache_folio = filemap_lock_folio(mapping, idx); 6147 if (IS_ERR(pagecache_folio)) 6148 pagecache_folio = NULL; 6149 } 6150 6151 ptl = huge_pte_lock(h, mm, ptep); 6152 6153 /* Check for a racing update before calling hugetlb_wp() */ 6154 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 6155 goto out_ptl; 6156 6157 /* Handle userfault-wp first, before trying to lock more pages */ 6158 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) && 6159 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 6160 struct vm_fault vmf = { 6161 .vma = vma, 6162 .address = haddr, 6163 .real_address = address, 6164 .flags = flags, 6165 }; 6166 6167 spin_unlock(ptl); 6168 if (pagecache_folio) { 6169 folio_unlock(pagecache_folio); 6170 folio_put(pagecache_folio); 6171 } 6172 hugetlb_vma_unlock_read(vma); 6173 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6174 return handle_userfault(&vmf, VM_UFFD_WP); 6175 } 6176 6177 /* 6178 * hugetlb_wp() requires page locks of pte_page(entry) and 6179 * pagecache_folio, so here we need take the former one 6180 * when folio != pagecache_folio or !pagecache_folio. 6181 */ 6182 folio = page_folio(pte_page(entry)); 6183 if (folio != pagecache_folio) 6184 if (!folio_trylock(folio)) { 6185 need_wait_lock = 1; 6186 goto out_ptl; 6187 } 6188 6189 folio_get(folio); 6190 6191 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6192 if (!huge_pte_write(entry)) { 6193 ret = hugetlb_wp(mm, vma, address, ptep, flags, 6194 pagecache_folio, ptl); 6195 goto out_put_page; 6196 } else if (likely(flags & FAULT_FLAG_WRITE)) { 6197 entry = huge_pte_mkdirty(entry); 6198 } 6199 } 6200 entry = pte_mkyoung(entry); 6201 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 6202 flags & FAULT_FLAG_WRITE)) 6203 update_mmu_cache(vma, haddr, ptep); 6204 out_put_page: 6205 if (folio != pagecache_folio) 6206 folio_unlock(folio); 6207 folio_put(folio); 6208 out_ptl: 6209 spin_unlock(ptl); 6210 6211 if (pagecache_folio) { 6212 folio_unlock(pagecache_folio); 6213 folio_put(pagecache_folio); 6214 } 6215 out_mutex: 6216 hugetlb_vma_unlock_read(vma); 6217 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6218 /* 6219 * Generally it's safe to hold refcount during waiting page lock. But 6220 * here we just wait to defer the next page fault to avoid busy loop and 6221 * the page is not used after unlocked before returning from the current 6222 * page fault. So we are safe from accessing freed page, even if we wait 6223 * here without taking refcount. 6224 */ 6225 if (need_wait_lock) 6226 folio_wait_locked(folio); 6227 return ret; 6228 } 6229 6230 #ifdef CONFIG_USERFAULTFD 6231 /* 6232 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte 6233 * with modifications for hugetlb pages. 6234 */ 6235 int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 6236 struct vm_area_struct *dst_vma, 6237 unsigned long dst_addr, 6238 unsigned long src_addr, 6239 uffd_flags_t flags, 6240 struct folio **foliop) 6241 { 6242 struct mm_struct *dst_mm = dst_vma->vm_mm; 6243 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); 6244 bool wp_enabled = (flags & MFILL_ATOMIC_WP); 6245 struct hstate *h = hstate_vma(dst_vma); 6246 struct address_space *mapping = dst_vma->vm_file->f_mapping; 6247 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 6248 unsigned long size; 6249 int vm_shared = dst_vma->vm_flags & VM_SHARED; 6250 pte_t _dst_pte; 6251 spinlock_t *ptl; 6252 int ret = -ENOMEM; 6253 struct folio *folio; 6254 int writable; 6255 bool folio_in_pagecache = false; 6256 6257 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { 6258 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6259 6260 /* Don't overwrite any existing PTEs (even markers) */ 6261 if (!huge_pte_none(huge_ptep_get(dst_pte))) { 6262 spin_unlock(ptl); 6263 return -EEXIST; 6264 } 6265 6266 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 6267 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, 6268 huge_page_size(h)); 6269 6270 /* No need to invalidate - it was non-present before */ 6271 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6272 6273 spin_unlock(ptl); 6274 return 0; 6275 } 6276 6277 if (is_continue) { 6278 ret = -EFAULT; 6279 folio = filemap_lock_folio(mapping, idx); 6280 if (IS_ERR(folio)) 6281 goto out; 6282 folio_in_pagecache = true; 6283 } else if (!*foliop) { 6284 /* If a folio already exists, then it's UFFDIO_COPY for 6285 * a non-missing case. Return -EEXIST. 6286 */ 6287 if (vm_shared && 6288 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6289 ret = -EEXIST; 6290 goto out; 6291 } 6292 6293 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); 6294 if (IS_ERR(folio)) { 6295 ret = -ENOMEM; 6296 goto out; 6297 } 6298 6299 ret = copy_folio_from_user(folio, (const void __user *) src_addr, 6300 false); 6301 6302 /* fallback to copy_from_user outside mmap_lock */ 6303 if (unlikely(ret)) { 6304 ret = -ENOENT; 6305 /* Free the allocated folio which may have 6306 * consumed a reservation. 6307 */ 6308 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6309 folio_put(folio); 6310 6311 /* Allocate a temporary folio to hold the copied 6312 * contents. 6313 */ 6314 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); 6315 if (!folio) { 6316 ret = -ENOMEM; 6317 goto out; 6318 } 6319 *foliop = folio; 6320 /* Set the outparam foliop and return to the caller to 6321 * copy the contents outside the lock. Don't free the 6322 * folio. 6323 */ 6324 goto out; 6325 } 6326 } else { 6327 if (vm_shared && 6328 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6329 folio_put(*foliop); 6330 ret = -EEXIST; 6331 *foliop = NULL; 6332 goto out; 6333 } 6334 6335 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); 6336 if (IS_ERR(folio)) { 6337 folio_put(*foliop); 6338 ret = -ENOMEM; 6339 *foliop = NULL; 6340 goto out; 6341 } 6342 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); 6343 folio_put(*foliop); 6344 *foliop = NULL; 6345 if (ret) { 6346 folio_put(folio); 6347 goto out; 6348 } 6349 } 6350 6351 /* 6352 * The memory barrier inside __folio_mark_uptodate makes sure that 6353 * preceding stores to the page contents become visible before 6354 * the set_pte_at() write. 6355 */ 6356 __folio_mark_uptodate(folio); 6357 6358 /* Add shared, newly allocated pages to the page cache. */ 6359 if (vm_shared && !is_continue) { 6360 size = i_size_read(mapping->host) >> huge_page_shift(h); 6361 ret = -EFAULT; 6362 if (idx >= size) 6363 goto out_release_nounlock; 6364 6365 /* 6366 * Serialization between remove_inode_hugepages() and 6367 * hugetlb_add_to_page_cache() below happens through the 6368 * hugetlb_fault_mutex_table that here must be hold by 6369 * the caller. 6370 */ 6371 ret = hugetlb_add_to_page_cache(folio, mapping, idx); 6372 if (ret) 6373 goto out_release_nounlock; 6374 folio_in_pagecache = true; 6375 } 6376 6377 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6378 6379 ret = -EIO; 6380 if (folio_test_hwpoison(folio)) 6381 goto out_release_unlock; 6382 6383 /* 6384 * We allow to overwrite a pte marker: consider when both MISSING|WP 6385 * registered, we firstly wr-protect a none pte which has no page cache 6386 * page backing it, then access the page. 6387 */ 6388 ret = -EEXIST; 6389 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte))) 6390 goto out_release_unlock; 6391 6392 if (folio_in_pagecache) 6393 page_dup_file_rmap(&folio->page, true); 6394 else 6395 hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr); 6396 6397 /* 6398 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY 6399 * with wp flag set, don't set pte write bit. 6400 */ 6401 if (wp_enabled || (is_continue && !vm_shared)) 6402 writable = 0; 6403 else 6404 writable = dst_vma->vm_flags & VM_WRITE; 6405 6406 _dst_pte = make_huge_pte(dst_vma, &folio->page, writable); 6407 /* 6408 * Always mark UFFDIO_COPY page dirty; note that this may not be 6409 * extremely important for hugetlbfs for now since swapping is not 6410 * supported, but we should still be clear in that this page cannot be 6411 * thrown away at will, even if write bit not set. 6412 */ 6413 _dst_pte = huge_pte_mkdirty(_dst_pte); 6414 _dst_pte = pte_mkyoung(_dst_pte); 6415 6416 if (wp_enabled) 6417 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6418 6419 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h)); 6420 6421 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6422 6423 /* No need to invalidate - it was non-present before */ 6424 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6425 6426 spin_unlock(ptl); 6427 if (!is_continue) 6428 folio_set_hugetlb_migratable(folio); 6429 if (vm_shared || is_continue) 6430 folio_unlock(folio); 6431 ret = 0; 6432 out: 6433 return ret; 6434 out_release_unlock: 6435 spin_unlock(ptl); 6436 if (vm_shared || is_continue) 6437 folio_unlock(folio); 6438 out_release_nounlock: 6439 if (!folio_in_pagecache) 6440 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6441 folio_put(folio); 6442 goto out; 6443 } 6444 #endif /* CONFIG_USERFAULTFD */ 6445 6446 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, 6447 unsigned long address, unsigned int flags, 6448 unsigned int *page_mask) 6449 { 6450 struct hstate *h = hstate_vma(vma); 6451 struct mm_struct *mm = vma->vm_mm; 6452 unsigned long haddr = address & huge_page_mask(h); 6453 struct page *page = NULL; 6454 spinlock_t *ptl; 6455 pte_t *pte, entry; 6456 int ret; 6457 6458 hugetlb_vma_lock_read(vma); 6459 pte = hugetlb_walk(vma, haddr, huge_page_size(h)); 6460 if (!pte) 6461 goto out_unlock; 6462 6463 ptl = huge_pte_lock(h, mm, pte); 6464 entry = huge_ptep_get(pte); 6465 if (pte_present(entry)) { 6466 page = pte_page(entry); 6467 6468 if (!huge_pte_write(entry)) { 6469 if (flags & FOLL_WRITE) { 6470 page = NULL; 6471 goto out; 6472 } 6473 6474 if (gup_must_unshare(vma, flags, page)) { 6475 /* Tell the caller to do unsharing */ 6476 page = ERR_PTR(-EMLINK); 6477 goto out; 6478 } 6479 } 6480 6481 page += ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); 6482 6483 /* 6484 * Note that page may be a sub-page, and with vmemmap 6485 * optimizations the page struct may be read only. 6486 * try_grab_page() will increase the ref count on the 6487 * head page, so this will be OK. 6488 * 6489 * try_grab_page() should always be able to get the page here, 6490 * because we hold the ptl lock and have verified pte_present(). 6491 */ 6492 ret = try_grab_page(page, flags); 6493 6494 if (WARN_ON_ONCE(ret)) { 6495 page = ERR_PTR(ret); 6496 goto out; 6497 } 6498 6499 *page_mask = (1U << huge_page_order(h)) - 1; 6500 } 6501 out: 6502 spin_unlock(ptl); 6503 out_unlock: 6504 hugetlb_vma_unlock_read(vma); 6505 6506 /* 6507 * Fixup retval for dump requests: if pagecache doesn't exist, 6508 * don't try to allocate a new page but just skip it. 6509 */ 6510 if (!page && (flags & FOLL_DUMP) && 6511 !hugetlbfs_pagecache_present(h, vma, address)) 6512 page = ERR_PTR(-EFAULT); 6513 6514 return page; 6515 } 6516 6517 long hugetlb_change_protection(struct vm_area_struct *vma, 6518 unsigned long address, unsigned long end, 6519 pgprot_t newprot, unsigned long cp_flags) 6520 { 6521 struct mm_struct *mm = vma->vm_mm; 6522 unsigned long start = address; 6523 pte_t *ptep; 6524 pte_t pte; 6525 struct hstate *h = hstate_vma(vma); 6526 long pages = 0, psize = huge_page_size(h); 6527 bool shared_pmd = false; 6528 struct mmu_notifier_range range; 6529 unsigned long last_addr_mask; 6530 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 6531 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 6532 6533 /* 6534 * In the case of shared PMDs, the area to flush could be beyond 6535 * start/end. Set range.start/range.end to cover the maximum possible 6536 * range if PMD sharing is possible. 6537 */ 6538 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 6539 0, mm, start, end); 6540 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6541 6542 BUG_ON(address >= end); 6543 flush_cache_range(vma, range.start, range.end); 6544 6545 mmu_notifier_invalidate_range_start(&range); 6546 hugetlb_vma_lock_write(vma); 6547 i_mmap_lock_write(vma->vm_file->f_mapping); 6548 last_addr_mask = hugetlb_mask_last_page(h); 6549 for (; address < end; address += psize) { 6550 spinlock_t *ptl; 6551 ptep = hugetlb_walk(vma, address, psize); 6552 if (!ptep) { 6553 if (!uffd_wp) { 6554 address |= last_addr_mask; 6555 continue; 6556 } 6557 /* 6558 * Userfaultfd wr-protect requires pgtable 6559 * pre-allocations to install pte markers. 6560 */ 6561 ptep = huge_pte_alloc(mm, vma, address, psize); 6562 if (!ptep) { 6563 pages = -ENOMEM; 6564 break; 6565 } 6566 } 6567 ptl = huge_pte_lock(h, mm, ptep); 6568 if (huge_pmd_unshare(mm, vma, address, ptep)) { 6569 /* 6570 * When uffd-wp is enabled on the vma, unshare 6571 * shouldn't happen at all. Warn about it if it 6572 * happened due to some reason. 6573 */ 6574 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); 6575 pages++; 6576 spin_unlock(ptl); 6577 shared_pmd = true; 6578 address |= last_addr_mask; 6579 continue; 6580 } 6581 pte = huge_ptep_get(ptep); 6582 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 6583 /* Nothing to do. */ 6584 } else if (unlikely(is_hugetlb_entry_migration(pte))) { 6585 swp_entry_t entry = pte_to_swp_entry(pte); 6586 struct page *page = pfn_swap_entry_to_page(entry); 6587 pte_t newpte = pte; 6588 6589 if (is_writable_migration_entry(entry)) { 6590 if (PageAnon(page)) 6591 entry = make_readable_exclusive_migration_entry( 6592 swp_offset(entry)); 6593 else 6594 entry = make_readable_migration_entry( 6595 swp_offset(entry)); 6596 newpte = swp_entry_to_pte(entry); 6597 pages++; 6598 } 6599 6600 if (uffd_wp) 6601 newpte = pte_swp_mkuffd_wp(newpte); 6602 else if (uffd_wp_resolve) 6603 newpte = pte_swp_clear_uffd_wp(newpte); 6604 if (!pte_same(pte, newpte)) 6605 set_huge_pte_at(mm, address, ptep, newpte, psize); 6606 } else if (unlikely(is_pte_marker(pte))) { 6607 /* No other markers apply for now. */ 6608 WARN_ON_ONCE(!pte_marker_uffd_wp(pte)); 6609 if (uffd_wp_resolve) 6610 /* Safe to modify directly (non-present->none). */ 6611 huge_pte_clear(mm, address, ptep, psize); 6612 } else if (!huge_pte_none(pte)) { 6613 pte_t old_pte; 6614 unsigned int shift = huge_page_shift(hstate_vma(vma)); 6615 6616 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 6617 pte = huge_pte_modify(old_pte, newprot); 6618 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 6619 if (uffd_wp) 6620 pte = huge_pte_mkuffd_wp(pte); 6621 else if (uffd_wp_resolve) 6622 pte = huge_pte_clear_uffd_wp(pte); 6623 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 6624 pages++; 6625 } else { 6626 /* None pte */ 6627 if (unlikely(uffd_wp)) 6628 /* Safe to modify directly (none->non-present). */ 6629 set_huge_pte_at(mm, address, ptep, 6630 make_pte_marker(PTE_MARKER_UFFD_WP), 6631 psize); 6632 } 6633 spin_unlock(ptl); 6634 } 6635 /* 6636 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 6637 * may have cleared our pud entry and done put_page on the page table: 6638 * once we release i_mmap_rwsem, another task can do the final put_page 6639 * and that page table be reused and filled with junk. If we actually 6640 * did unshare a page of pmds, flush the range corresponding to the pud. 6641 */ 6642 if (shared_pmd) 6643 flush_hugetlb_tlb_range(vma, range.start, range.end); 6644 else 6645 flush_hugetlb_tlb_range(vma, start, end); 6646 /* 6647 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are 6648 * downgrading page table protection not changing it to point to a new 6649 * page. 6650 * 6651 * See Documentation/mm/mmu_notifier.rst 6652 */ 6653 i_mmap_unlock_write(vma->vm_file->f_mapping); 6654 hugetlb_vma_unlock_write(vma); 6655 mmu_notifier_invalidate_range_end(&range); 6656 6657 return pages > 0 ? (pages << h->order) : pages; 6658 } 6659 6660 /* Return true if reservation was successful, false otherwise. */ 6661 bool hugetlb_reserve_pages(struct inode *inode, 6662 long from, long to, 6663 struct vm_area_struct *vma, 6664 vm_flags_t vm_flags) 6665 { 6666 long chg = -1, add = -1; 6667 struct hstate *h = hstate_inode(inode); 6668 struct hugepage_subpool *spool = subpool_inode(inode); 6669 struct resv_map *resv_map; 6670 struct hugetlb_cgroup *h_cg = NULL; 6671 long gbl_reserve, regions_needed = 0; 6672 6673 /* This should never happen */ 6674 if (from > to) { 6675 VM_WARN(1, "%s called with a negative range\n", __func__); 6676 return false; 6677 } 6678 6679 /* 6680 * vma specific semaphore used for pmd sharing and fault/truncation 6681 * synchronization 6682 */ 6683 hugetlb_vma_lock_alloc(vma); 6684 6685 /* 6686 * Only apply hugepage reservation if asked. At fault time, an 6687 * attempt will be made for VM_NORESERVE to allocate a page 6688 * without using reserves 6689 */ 6690 if (vm_flags & VM_NORESERVE) 6691 return true; 6692 6693 /* 6694 * Shared mappings base their reservation on the number of pages that 6695 * are already allocated on behalf of the file. Private mappings need 6696 * to reserve the full area even if read-only as mprotect() may be 6697 * called to make the mapping read-write. Assume !vma is a shm mapping 6698 */ 6699 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6700 /* 6701 * resv_map can not be NULL as hugetlb_reserve_pages is only 6702 * called for inodes for which resv_maps were created (see 6703 * hugetlbfs_get_inode). 6704 */ 6705 resv_map = inode_resv_map(inode); 6706 6707 chg = region_chg(resv_map, from, to, ®ions_needed); 6708 } else { 6709 /* Private mapping. */ 6710 resv_map = resv_map_alloc(); 6711 if (!resv_map) 6712 goto out_err; 6713 6714 chg = to - from; 6715 6716 set_vma_resv_map(vma, resv_map); 6717 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 6718 } 6719 6720 if (chg < 0) 6721 goto out_err; 6722 6723 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 6724 chg * pages_per_huge_page(h), &h_cg) < 0) 6725 goto out_err; 6726 6727 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 6728 /* For private mappings, the hugetlb_cgroup uncharge info hangs 6729 * of the resv_map. 6730 */ 6731 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 6732 } 6733 6734 /* 6735 * There must be enough pages in the subpool for the mapping. If 6736 * the subpool has a minimum size, there may be some global 6737 * reservations already in place (gbl_reserve). 6738 */ 6739 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 6740 if (gbl_reserve < 0) 6741 goto out_uncharge_cgroup; 6742 6743 /* 6744 * Check enough hugepages are available for the reservation. 6745 * Hand the pages back to the subpool if there are not 6746 */ 6747 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 6748 goto out_put_pages; 6749 6750 /* 6751 * Account for the reservations made. Shared mappings record regions 6752 * that have reservations as they are shared by multiple VMAs. 6753 * When the last VMA disappears, the region map says how much 6754 * the reservation was and the page cache tells how much of 6755 * the reservation was consumed. Private mappings are per-VMA and 6756 * only the consumed reservations are tracked. When the VMA 6757 * disappears, the original reservation is the VMA size and the 6758 * consumed reservations are stored in the map. Hence, nothing 6759 * else has to be done for private mappings here 6760 */ 6761 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6762 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 6763 6764 if (unlikely(add < 0)) { 6765 hugetlb_acct_memory(h, -gbl_reserve); 6766 goto out_put_pages; 6767 } else if (unlikely(chg > add)) { 6768 /* 6769 * pages in this range were added to the reserve 6770 * map between region_chg and region_add. This 6771 * indicates a race with alloc_hugetlb_folio. Adjust 6772 * the subpool and reserve counts modified above 6773 * based on the difference. 6774 */ 6775 long rsv_adjust; 6776 6777 /* 6778 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 6779 * reference to h_cg->css. See comment below for detail. 6780 */ 6781 hugetlb_cgroup_uncharge_cgroup_rsvd( 6782 hstate_index(h), 6783 (chg - add) * pages_per_huge_page(h), h_cg); 6784 6785 rsv_adjust = hugepage_subpool_put_pages(spool, 6786 chg - add); 6787 hugetlb_acct_memory(h, -rsv_adjust); 6788 } else if (h_cg) { 6789 /* 6790 * The file_regions will hold their own reference to 6791 * h_cg->css. So we should release the reference held 6792 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 6793 * done. 6794 */ 6795 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 6796 } 6797 } 6798 return true; 6799 6800 out_put_pages: 6801 /* put back original number of pages, chg */ 6802 (void)hugepage_subpool_put_pages(spool, chg); 6803 out_uncharge_cgroup: 6804 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 6805 chg * pages_per_huge_page(h), h_cg); 6806 out_err: 6807 hugetlb_vma_lock_free(vma); 6808 if (!vma || vma->vm_flags & VM_MAYSHARE) 6809 /* Only call region_abort if the region_chg succeeded but the 6810 * region_add failed or didn't run. 6811 */ 6812 if (chg >= 0 && add < 0) 6813 region_abort(resv_map, from, to, regions_needed); 6814 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 6815 kref_put(&resv_map->refs, resv_map_release); 6816 return false; 6817 } 6818 6819 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 6820 long freed) 6821 { 6822 struct hstate *h = hstate_inode(inode); 6823 struct resv_map *resv_map = inode_resv_map(inode); 6824 long chg = 0; 6825 struct hugepage_subpool *spool = subpool_inode(inode); 6826 long gbl_reserve; 6827 6828 /* 6829 * Since this routine can be called in the evict inode path for all 6830 * hugetlbfs inodes, resv_map could be NULL. 6831 */ 6832 if (resv_map) { 6833 chg = region_del(resv_map, start, end); 6834 /* 6835 * region_del() can fail in the rare case where a region 6836 * must be split and another region descriptor can not be 6837 * allocated. If end == LONG_MAX, it will not fail. 6838 */ 6839 if (chg < 0) 6840 return chg; 6841 } 6842 6843 spin_lock(&inode->i_lock); 6844 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 6845 spin_unlock(&inode->i_lock); 6846 6847 /* 6848 * If the subpool has a minimum size, the number of global 6849 * reservations to be released may be adjusted. 6850 * 6851 * Note that !resv_map implies freed == 0. So (chg - freed) 6852 * won't go negative. 6853 */ 6854 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 6855 hugetlb_acct_memory(h, -gbl_reserve); 6856 6857 return 0; 6858 } 6859 6860 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 6861 static unsigned long page_table_shareable(struct vm_area_struct *svma, 6862 struct vm_area_struct *vma, 6863 unsigned long addr, pgoff_t idx) 6864 { 6865 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 6866 svma->vm_start; 6867 unsigned long sbase = saddr & PUD_MASK; 6868 unsigned long s_end = sbase + PUD_SIZE; 6869 6870 /* Allow segments to share if only one is marked locked */ 6871 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; 6872 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; 6873 6874 /* 6875 * match the virtual addresses, permission and the alignment of the 6876 * page table page. 6877 * 6878 * Also, vma_lock (vm_private_data) is required for sharing. 6879 */ 6880 if (pmd_index(addr) != pmd_index(saddr) || 6881 vm_flags != svm_flags || 6882 !range_in_vma(svma, sbase, s_end) || 6883 !svma->vm_private_data) 6884 return 0; 6885 6886 return saddr; 6887 } 6888 6889 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 6890 { 6891 unsigned long start = addr & PUD_MASK; 6892 unsigned long end = start + PUD_SIZE; 6893 6894 #ifdef CONFIG_USERFAULTFD 6895 if (uffd_disable_huge_pmd_share(vma)) 6896 return false; 6897 #endif 6898 /* 6899 * check on proper vm_flags and page table alignment 6900 */ 6901 if (!(vma->vm_flags & VM_MAYSHARE)) 6902 return false; 6903 if (!vma->vm_private_data) /* vma lock required for sharing */ 6904 return false; 6905 if (!range_in_vma(vma, start, end)) 6906 return false; 6907 return true; 6908 } 6909 6910 /* 6911 * Determine if start,end range within vma could be mapped by shared pmd. 6912 * If yes, adjust start and end to cover range associated with possible 6913 * shared pmd mappings. 6914 */ 6915 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 6916 unsigned long *start, unsigned long *end) 6917 { 6918 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 6919 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 6920 6921 /* 6922 * vma needs to span at least one aligned PUD size, and the range 6923 * must be at least partially within in. 6924 */ 6925 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 6926 (*end <= v_start) || (*start >= v_end)) 6927 return; 6928 6929 /* Extend the range to be PUD aligned for a worst case scenario */ 6930 if (*start > v_start) 6931 *start = ALIGN_DOWN(*start, PUD_SIZE); 6932 6933 if (*end < v_end) 6934 *end = ALIGN(*end, PUD_SIZE); 6935 } 6936 6937 /* 6938 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 6939 * and returns the corresponding pte. While this is not necessary for the 6940 * !shared pmd case because we can allocate the pmd later as well, it makes the 6941 * code much cleaner. pmd allocation is essential for the shared case because 6942 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 6943 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 6944 * bad pmd for sharing. 6945 */ 6946 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 6947 unsigned long addr, pud_t *pud) 6948 { 6949 struct address_space *mapping = vma->vm_file->f_mapping; 6950 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 6951 vma->vm_pgoff; 6952 struct vm_area_struct *svma; 6953 unsigned long saddr; 6954 pte_t *spte = NULL; 6955 pte_t *pte; 6956 6957 i_mmap_lock_read(mapping); 6958 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 6959 if (svma == vma) 6960 continue; 6961 6962 saddr = page_table_shareable(svma, vma, addr, idx); 6963 if (saddr) { 6964 spte = hugetlb_walk(svma, saddr, 6965 vma_mmu_pagesize(svma)); 6966 if (spte) { 6967 get_page(virt_to_page(spte)); 6968 break; 6969 } 6970 } 6971 } 6972 6973 if (!spte) 6974 goto out; 6975 6976 spin_lock(&mm->page_table_lock); 6977 if (pud_none(*pud)) { 6978 pud_populate(mm, pud, 6979 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 6980 mm_inc_nr_pmds(mm); 6981 } else { 6982 put_page(virt_to_page(spte)); 6983 } 6984 spin_unlock(&mm->page_table_lock); 6985 out: 6986 pte = (pte_t *)pmd_alloc(mm, pud, addr); 6987 i_mmap_unlock_read(mapping); 6988 return pte; 6989 } 6990 6991 /* 6992 * unmap huge page backed by shared pte. 6993 * 6994 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 6995 * indicated by page_count > 1, unmap is achieved by clearing pud and 6996 * decrementing the ref count. If count == 1, the pte page is not shared. 6997 * 6998 * Called with page table lock held. 6999 * 7000 * returns: 1 successfully unmapped a shared pte page 7001 * 0 the underlying pte page is not shared, or it is the last user 7002 */ 7003 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7004 unsigned long addr, pte_t *ptep) 7005 { 7006 pgd_t *pgd = pgd_offset(mm, addr); 7007 p4d_t *p4d = p4d_offset(pgd, addr); 7008 pud_t *pud = pud_offset(p4d, addr); 7009 7010 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7011 hugetlb_vma_assert_locked(vma); 7012 BUG_ON(page_count(virt_to_page(ptep)) == 0); 7013 if (page_count(virt_to_page(ptep)) == 1) 7014 return 0; 7015 7016 pud_clear(pud); 7017 put_page(virt_to_page(ptep)); 7018 mm_dec_nr_pmds(mm); 7019 return 1; 7020 } 7021 7022 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 7023 7024 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7025 unsigned long addr, pud_t *pud) 7026 { 7027 return NULL; 7028 } 7029 7030 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7031 unsigned long addr, pte_t *ptep) 7032 { 7033 return 0; 7034 } 7035 7036 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7037 unsigned long *start, unsigned long *end) 7038 { 7039 } 7040 7041 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7042 { 7043 return false; 7044 } 7045 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 7046 7047 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 7048 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 7049 unsigned long addr, unsigned long sz) 7050 { 7051 pgd_t *pgd; 7052 p4d_t *p4d; 7053 pud_t *pud; 7054 pte_t *pte = NULL; 7055 7056 pgd = pgd_offset(mm, addr); 7057 p4d = p4d_alloc(mm, pgd, addr); 7058 if (!p4d) 7059 return NULL; 7060 pud = pud_alloc(mm, p4d, addr); 7061 if (pud) { 7062 if (sz == PUD_SIZE) { 7063 pte = (pte_t *)pud; 7064 } else { 7065 BUG_ON(sz != PMD_SIZE); 7066 if (want_pmd_share(vma, addr) && pud_none(*pud)) 7067 pte = huge_pmd_share(mm, vma, addr, pud); 7068 else 7069 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7070 } 7071 } 7072 7073 if (pte) { 7074 pte_t pteval = ptep_get_lockless(pte); 7075 7076 BUG_ON(pte_present(pteval) && !pte_huge(pteval)); 7077 } 7078 7079 return pte; 7080 } 7081 7082 /* 7083 * huge_pte_offset() - Walk the page table to resolve the hugepage 7084 * entry at address @addr 7085 * 7086 * Return: Pointer to page table entry (PUD or PMD) for 7087 * address @addr, or NULL if a !p*d_present() entry is encountered and the 7088 * size @sz doesn't match the hugepage size at this level of the page 7089 * table. 7090 */ 7091 pte_t *huge_pte_offset(struct mm_struct *mm, 7092 unsigned long addr, unsigned long sz) 7093 { 7094 pgd_t *pgd; 7095 p4d_t *p4d; 7096 pud_t *pud; 7097 pmd_t *pmd; 7098 7099 pgd = pgd_offset(mm, addr); 7100 if (!pgd_present(*pgd)) 7101 return NULL; 7102 p4d = p4d_offset(pgd, addr); 7103 if (!p4d_present(*p4d)) 7104 return NULL; 7105 7106 pud = pud_offset(p4d, addr); 7107 if (sz == PUD_SIZE) 7108 /* must be pud huge, non-present or none */ 7109 return (pte_t *)pud; 7110 if (!pud_present(*pud)) 7111 return NULL; 7112 /* must have a valid entry and size to go further */ 7113 7114 pmd = pmd_offset(pud, addr); 7115 /* must be pmd huge, non-present or none */ 7116 return (pte_t *)pmd; 7117 } 7118 7119 /* 7120 * Return a mask that can be used to update an address to the last huge 7121 * page in a page table page mapping size. Used to skip non-present 7122 * page table entries when linearly scanning address ranges. Architectures 7123 * with unique huge page to page table relationships can define their own 7124 * version of this routine. 7125 */ 7126 unsigned long hugetlb_mask_last_page(struct hstate *h) 7127 { 7128 unsigned long hp_size = huge_page_size(h); 7129 7130 if (hp_size == PUD_SIZE) 7131 return P4D_SIZE - PUD_SIZE; 7132 else if (hp_size == PMD_SIZE) 7133 return PUD_SIZE - PMD_SIZE; 7134 else 7135 return 0UL; 7136 } 7137 7138 #else 7139 7140 /* See description above. Architectures can provide their own version. */ 7141 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) 7142 { 7143 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 7144 if (huge_page_size(h) == PMD_SIZE) 7145 return PUD_SIZE - PMD_SIZE; 7146 #endif 7147 return 0UL; 7148 } 7149 7150 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 7151 7152 /* 7153 * These functions are overwritable if your architecture needs its own 7154 * behavior. 7155 */ 7156 bool isolate_hugetlb(struct folio *folio, struct list_head *list) 7157 { 7158 bool ret = true; 7159 7160 spin_lock_irq(&hugetlb_lock); 7161 if (!folio_test_hugetlb(folio) || 7162 !folio_test_hugetlb_migratable(folio) || 7163 !folio_try_get(folio)) { 7164 ret = false; 7165 goto unlock; 7166 } 7167 folio_clear_hugetlb_migratable(folio); 7168 list_move_tail(&folio->lru, list); 7169 unlock: 7170 spin_unlock_irq(&hugetlb_lock); 7171 return ret; 7172 } 7173 7174 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 7175 { 7176 int ret = 0; 7177 7178 *hugetlb = false; 7179 spin_lock_irq(&hugetlb_lock); 7180 if (folio_test_hugetlb(folio)) { 7181 *hugetlb = true; 7182 if (folio_test_hugetlb_freed(folio)) 7183 ret = 0; 7184 else if (folio_test_hugetlb_migratable(folio) || unpoison) 7185 ret = folio_try_get(folio); 7186 else 7187 ret = -EBUSY; 7188 } 7189 spin_unlock_irq(&hugetlb_lock); 7190 return ret; 7191 } 7192 7193 int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 7194 bool *migratable_cleared) 7195 { 7196 int ret; 7197 7198 spin_lock_irq(&hugetlb_lock); 7199 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); 7200 spin_unlock_irq(&hugetlb_lock); 7201 return ret; 7202 } 7203 7204 void folio_putback_active_hugetlb(struct folio *folio) 7205 { 7206 spin_lock_irq(&hugetlb_lock); 7207 folio_set_hugetlb_migratable(folio); 7208 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); 7209 spin_unlock_irq(&hugetlb_lock); 7210 folio_put(folio); 7211 } 7212 7213 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) 7214 { 7215 struct hstate *h = folio_hstate(old_folio); 7216 7217 hugetlb_cgroup_migrate(old_folio, new_folio); 7218 set_page_owner_migrate_reason(&new_folio->page, reason); 7219 7220 /* 7221 * transfer temporary state of the new hugetlb folio. This is 7222 * reverse to other transitions because the newpage is going to 7223 * be final while the old one will be freed so it takes over 7224 * the temporary status. 7225 * 7226 * Also note that we have to transfer the per-node surplus state 7227 * here as well otherwise the global surplus count will not match 7228 * the per-node's. 7229 */ 7230 if (folio_test_hugetlb_temporary(new_folio)) { 7231 int old_nid = folio_nid(old_folio); 7232 int new_nid = folio_nid(new_folio); 7233 7234 folio_set_hugetlb_temporary(old_folio); 7235 folio_clear_hugetlb_temporary(new_folio); 7236 7237 7238 /* 7239 * There is no need to transfer the per-node surplus state 7240 * when we do not cross the node. 7241 */ 7242 if (new_nid == old_nid) 7243 return; 7244 spin_lock_irq(&hugetlb_lock); 7245 if (h->surplus_huge_pages_node[old_nid]) { 7246 h->surplus_huge_pages_node[old_nid]--; 7247 h->surplus_huge_pages_node[new_nid]++; 7248 } 7249 spin_unlock_irq(&hugetlb_lock); 7250 } 7251 } 7252 7253 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 7254 unsigned long start, 7255 unsigned long end) 7256 { 7257 struct hstate *h = hstate_vma(vma); 7258 unsigned long sz = huge_page_size(h); 7259 struct mm_struct *mm = vma->vm_mm; 7260 struct mmu_notifier_range range; 7261 unsigned long address; 7262 spinlock_t *ptl; 7263 pte_t *ptep; 7264 7265 if (!(vma->vm_flags & VM_MAYSHARE)) 7266 return; 7267 7268 if (start >= end) 7269 return; 7270 7271 flush_cache_range(vma, start, end); 7272 /* 7273 * No need to call adjust_range_if_pmd_sharing_possible(), because 7274 * we have already done the PUD_SIZE alignment. 7275 */ 7276 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 7277 start, end); 7278 mmu_notifier_invalidate_range_start(&range); 7279 hugetlb_vma_lock_write(vma); 7280 i_mmap_lock_write(vma->vm_file->f_mapping); 7281 for (address = start; address < end; address += PUD_SIZE) { 7282 ptep = hugetlb_walk(vma, address, sz); 7283 if (!ptep) 7284 continue; 7285 ptl = huge_pte_lock(h, mm, ptep); 7286 huge_pmd_unshare(mm, vma, address, ptep); 7287 spin_unlock(ptl); 7288 } 7289 flush_hugetlb_tlb_range(vma, start, end); 7290 i_mmap_unlock_write(vma->vm_file->f_mapping); 7291 hugetlb_vma_unlock_write(vma); 7292 /* 7293 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see 7294 * Documentation/mm/mmu_notifier.rst. 7295 */ 7296 mmu_notifier_invalidate_range_end(&range); 7297 } 7298 7299 /* 7300 * This function will unconditionally remove all the shared pmd pgtable entries 7301 * within the specific vma for a hugetlbfs memory range. 7302 */ 7303 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7304 { 7305 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), 7306 ALIGN_DOWN(vma->vm_end, PUD_SIZE)); 7307 } 7308 7309 #ifdef CONFIG_CMA 7310 static bool cma_reserve_called __initdata; 7311 7312 static int __init cmdline_parse_hugetlb_cma(char *p) 7313 { 7314 int nid, count = 0; 7315 unsigned long tmp; 7316 char *s = p; 7317 7318 while (*s) { 7319 if (sscanf(s, "%lu%n", &tmp, &count) != 1) 7320 break; 7321 7322 if (s[count] == ':') { 7323 if (tmp >= MAX_NUMNODES) 7324 break; 7325 nid = array_index_nospec(tmp, MAX_NUMNODES); 7326 7327 s += count + 1; 7328 tmp = memparse(s, &s); 7329 hugetlb_cma_size_in_node[nid] = tmp; 7330 hugetlb_cma_size += tmp; 7331 7332 /* 7333 * Skip the separator if have one, otherwise 7334 * break the parsing. 7335 */ 7336 if (*s == ',') 7337 s++; 7338 else 7339 break; 7340 } else { 7341 hugetlb_cma_size = memparse(p, &p); 7342 break; 7343 } 7344 } 7345 7346 return 0; 7347 } 7348 7349 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); 7350 7351 void __init hugetlb_cma_reserve(int order) 7352 { 7353 unsigned long size, reserved, per_node; 7354 bool node_specific_cma_alloc = false; 7355 int nid; 7356 7357 cma_reserve_called = true; 7358 7359 if (!hugetlb_cma_size) 7360 return; 7361 7362 for (nid = 0; nid < MAX_NUMNODES; nid++) { 7363 if (hugetlb_cma_size_in_node[nid] == 0) 7364 continue; 7365 7366 if (!node_online(nid)) { 7367 pr_warn("hugetlb_cma: invalid node %d specified\n", nid); 7368 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7369 hugetlb_cma_size_in_node[nid] = 0; 7370 continue; 7371 } 7372 7373 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { 7374 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", 7375 nid, (PAGE_SIZE << order) / SZ_1M); 7376 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7377 hugetlb_cma_size_in_node[nid] = 0; 7378 } else { 7379 node_specific_cma_alloc = true; 7380 } 7381 } 7382 7383 /* Validate the CMA size again in case some invalid nodes specified. */ 7384 if (!hugetlb_cma_size) 7385 return; 7386 7387 if (hugetlb_cma_size < (PAGE_SIZE << order)) { 7388 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", 7389 (PAGE_SIZE << order) / SZ_1M); 7390 hugetlb_cma_size = 0; 7391 return; 7392 } 7393 7394 if (!node_specific_cma_alloc) { 7395 /* 7396 * If 3 GB area is requested on a machine with 4 numa nodes, 7397 * let's allocate 1 GB on first three nodes and ignore the last one. 7398 */ 7399 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); 7400 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", 7401 hugetlb_cma_size / SZ_1M, per_node / SZ_1M); 7402 } 7403 7404 reserved = 0; 7405 for_each_online_node(nid) { 7406 int res; 7407 char name[CMA_MAX_NAME]; 7408 7409 if (node_specific_cma_alloc) { 7410 if (hugetlb_cma_size_in_node[nid] == 0) 7411 continue; 7412 7413 size = hugetlb_cma_size_in_node[nid]; 7414 } else { 7415 size = min(per_node, hugetlb_cma_size - reserved); 7416 } 7417 7418 size = round_up(size, PAGE_SIZE << order); 7419 7420 snprintf(name, sizeof(name), "hugetlb%d", nid); 7421 /* 7422 * Note that 'order per bit' is based on smallest size that 7423 * may be returned to CMA allocator in the case of 7424 * huge page demotion. 7425 */ 7426 res = cma_declare_contiguous_nid(0, size, 0, 7427 PAGE_SIZE << HUGETLB_PAGE_ORDER, 7428 0, false, name, 7429 &hugetlb_cma[nid], nid); 7430 if (res) { 7431 pr_warn("hugetlb_cma: reservation failed: err %d, node %d", 7432 res, nid); 7433 continue; 7434 } 7435 7436 reserved += size; 7437 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", 7438 size / SZ_1M, nid); 7439 7440 if (reserved >= hugetlb_cma_size) 7441 break; 7442 } 7443 7444 if (!reserved) 7445 /* 7446 * hugetlb_cma_size is used to determine if allocations from 7447 * cma are possible. Set to zero if no cma regions are set up. 7448 */ 7449 hugetlb_cma_size = 0; 7450 } 7451 7452 static void __init hugetlb_cma_check(void) 7453 { 7454 if (!hugetlb_cma_size || cma_reserve_called) 7455 return; 7456 7457 pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); 7458 } 7459 7460 #endif /* CONFIG_CMA */ 7461